├── .codecov.yml
├── .gitattributes
├── .github
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── CI.yaml
│ ├── PyPI.yaml
│ └── codeql.yml
├── .gitignore
├── .lgtm.yml
├── CITATION.cff
├── CODE_OF_CONDUCT.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── colab_setup.sh
├── devtools
├── README.md
├── conda-envs
│ └── test_env.yaml
├── legacy-miniconda-setup
│ └── before_install.sh
└── scripts
│ └── create_conda_env.py
├── docs
├── Makefile
├── README.md
├── _static
│ └── README.md
├── _templates
│ ├── README.md
│ └── custom-class-template.rst
├── api.rst
├── api_core.rst
├── api_cvs.rst
├── api_data.rst
├── api_explain.rst
├── api_utils.rst
├── autosummary
│ ├── mlcolvar.core.loss.AutocorrelationLoss.rst
│ ├── mlcolvar.core.loss.ELBOGaussiansLoss.rst
│ ├── mlcolvar.core.loss.FisherDiscriminantLoss.rst
│ ├── mlcolvar.core.loss.MSELoss.rst
│ ├── mlcolvar.core.loss.ReduceEigenvaluesLoss.rst
│ ├── mlcolvar.core.loss.TDALoss.rst
│ ├── mlcolvar.core.nn.FeedForward.rst
│ ├── mlcolvar.core.stats.LDA.rst
│ ├── mlcolvar.core.stats.PCA.rst
│ ├── mlcolvar.core.stats.Stats.rst
│ ├── mlcolvar.core.stats.TICA.rst
│ ├── mlcolvar.core.transform.Normalization.rst
│ ├── mlcolvar.core.transform.Transform.rst
│ ├── mlcolvar.core.transform.descriptors.CoordinationNumbers.rst
│ ├── mlcolvar.core.transform.descriptors.EigsAdjMat.rst
│ ├── mlcolvar.core.transform.descriptors.MultipleDescriptors.rst
│ ├── mlcolvar.core.transform.descriptors.PairwiseDistances.rst
│ ├── mlcolvar.core.transform.descriptors.TorsionalAngle.rst
│ ├── mlcolvar.core.transform.tools.ContinuousHistogram.rst
│ ├── mlcolvar.core.transform.tools.Normalization.rst
│ ├── mlcolvar.core.transform.tools.SwitchingFunctions.rst
│ ├── mlcolvar.cvs.AutoEncoderCV.rst
│ ├── mlcolvar.cvs.BaseCV.rst
│ ├── mlcolvar.cvs.Committor.rst
│ ├── mlcolvar.cvs.DeepLDA.rst
│ ├── mlcolvar.cvs.DeepTDA.rst
│ ├── mlcolvar.cvs.DeepTICA.rst
│ ├── mlcolvar.cvs.MultiTaskCV.rst
│ ├── mlcolvar.cvs.RegressionCV.rst
│ ├── mlcolvar.cvs.VariationalAutoEncoderCV.rst
│ ├── mlcolvar.data.DictDataset.rst
│ ├── mlcolvar.data.DictLoader.rst
│ ├── mlcolvar.data.DictModule.rst
│ ├── mlcolvar.explain.lasso.lasso_classification.rst
│ ├── mlcolvar.explain.lasso.lasso_regression.rst
│ ├── mlcolvar.explain.lasso.plot_lasso_classification.rst
│ ├── mlcolvar.explain.lasso.plot_lasso_regression.rst
│ ├── mlcolvar.explain.sensitivity.plot_sensitivity.rst
│ ├── mlcolvar.explain.sensitivity.sensitivity_analysis.rst
│ ├── mlcolvar.utils.fes.compute_fes.rst
│ ├── mlcolvar.utils.io.create_dataset_from_files.rst
│ ├── mlcolvar.utils.io.load_dataframe.rst
│ ├── mlcolvar.utils.plot.plot_features_distribution.rst
│ ├── mlcolvar.utils.plot.plot_metrics.rst
│ ├── mlcolvar.utils.timelagged.create_timelagged_dataset.rst
│ └── mlcolvar.utils.trainer.MetricsCallback.rst
├── conf.py
├── contributing.rst
├── examples.rst
├── examples_experiments.rst
├── images
│ ├── logo_name_black_big.png
│ ├── logo_name_black_small.png
│ ├── logo_name_white.png
│ └── logo_plain.png
├── index.rst
├── installation.rst
├── make.bat
├── notebooks
│ ├── .gitignore
│ ├── examples
│ │ ├── .gitignore
│ │ ├── ex_DeepLDA.ipynb
│ │ ├── ex_DeepTICA.ipynb
│ │ ├── ex_TPI-DeepTDA.ipynb
│ │ ├── ex_committor.ipynb
│ │ ├── ex_stateinterpreter.ipynb
│ │ └── images
│ │ │ ├── ala2-deeplda.png
│ │ │ ├── alanine.png
│ │ │ ├── aldol-P.jpg
│ │ │ ├── aldol-R.jpg
│ │ │ ├── aldol-deeplda.png
│ │ │ └── chignolin.png
│ ├── paper_experiments
│ │ ├── README.md
│ │ ├── input_data
│ │ │ ├── multitask
│ │ │ │ └── mfep.txt
│ │ │ ├── supervised
│ │ │ │ ├── state-0
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── md_input
│ │ │ │ │ ├── md_potential
│ │ │ │ │ └── plumed.dat
│ │ │ │ ├── state-1
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── md_input
│ │ │ │ │ ├── md_potential
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── state-2
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── md_input
│ │ │ │ │ ├── md_potential
│ │ │ │ │ └── plumed.dat
│ │ │ ├── timelagged
│ │ │ │ └── opes-y
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── State.data
│ │ │ │ │ ├── md_input
│ │ │ │ │ ├── md_potential
│ │ │ │ │ └── plumed.dat
│ │ │ └── unsupervised
│ │ │ │ └── unbiased
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ ├── paper_1_unsupervised.ipynb
│ │ ├── paper_2_supervised.ipynb
│ │ ├── paper_3_timelagged.ipynb
│ │ ├── paper_4_multitask.ipynb
│ │ ├── paper_experiments.zip
│ │ ├── results
│ │ │ ├── multitask
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── State.data
│ │ │ │ │ ├── bck.last.State.data
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ ├── decoder_model.pt
│ │ │ │ ├── decoder_stats
│ │ │ │ │ ├── decoder_model_0.pt
│ │ │ │ │ ├── decoder_model_1.pt
│ │ │ │ │ ├── decoder_model_2.pt
│ │ │ │ │ ├── decoder_model_3.pt
│ │ │ │ │ ├── decoder_model_4.pt
│ │ │ │ │ ├── decoder_model_5.pt
│ │ │ │ │ ├── decoder_model_6.pt
│ │ │ │ │ ├── decoder_model_7.pt
│ │ │ │ │ ├── decoder_model_8.pt
│ │ │ │ │ ├── model_multitask_0.pt
│ │ │ │ │ ├── model_multitask_1.pt
│ │ │ │ │ ├── model_multitask_2.pt
│ │ │ │ │ ├── model_multitask_3.pt
│ │ │ │ │ ├── model_multitask_4.pt
│ │ │ │ │ ├── model_multitask_5.pt
│ │ │ │ │ ├── model_multitask_6.pt
│ │ │ │ │ ├── model_multitask_7.pt
│ │ │ │ │ └── model_multitask_8.pt
│ │ │ │ └── model_multitask.pt
│ │ │ ├── supervised
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── State.data
│ │ │ │ │ ├── bck.last.State.data
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_deepTDA.pt
│ │ │ ├── timelagged
│ │ │ │ └── deepTICA
│ │ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ │ └── model_deepTICA.pt
│ │ │ └── unsupervised
│ │ │ │ ├── iter_0
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_0.pt
│ │ │ │ ├── iter_1
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_1.pt
│ │ │ │ ├── iter_10
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_10.pt
│ │ │ │ ├── iter_11
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_11.pt
│ │ │ │ ├── iter_12
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_12.pt
│ │ │ │ ├── iter_13
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_13.pt
│ │ │ │ ├── iter_14
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_14.pt
│ │ │ │ ├── iter_15
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_15.pt
│ │ │ │ ├── iter_2
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_2.pt
│ │ │ │ ├── iter_3
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_3.pt
│ │ │ │ ├── iter_4
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_4.pt
│ │ │ │ ├── iter_5
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_5.pt
│ │ │ │ ├── iter_6
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_6.pt
│ │ │ │ ├── iter_7
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_7.pt
│ │ │ │ ├── iter_8
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_8.pt
│ │ │ │ ├── iter_9
│ │ │ │ ├── data
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── KERNELS
│ │ │ │ │ ├── input_md-potential.dat
│ │ │ │ │ ├── input_md.dat
│ │ │ │ │ └── plumed.dat
│ │ │ │ └── model_autoencoder_9.pt
│ │ │ │ └── unbiased
│ │ └── utils
│ │ │ └── generate_input.py
│ └── tutorials
│ │ ├── adv_multitask.ipynb
│ │ ├── adv_newcv_scratch.ipynb
│ │ ├── adv_newcv_subclass.ipynb
│ │ ├── adv_preprocessing.ipynb
│ │ ├── adv_transforms.ipynb
│ │ ├── cvs_Autoencoder.ipynb
│ │ ├── cvs_DeepLDA.ipynb
│ │ ├── cvs_DeepTDA.ipynb
│ │ ├── cvs_DeepTICA.ipynb
│ │ ├── cvs_committor.ipynb
│ │ ├── data
│ │ ├── README.md
│ │ ├── muller-brown-3states
│ │ │ ├── biased
│ │ │ │ └── opes-y
│ │ │ │ │ ├── COLVAR
│ │ │ │ │ ├── md_input
│ │ │ │ │ ├── md_potential
│ │ │ │ │ └── plumed.dat
│ │ │ ├── mfep.txt
│ │ │ ├── run-md-plumed.ipynb
│ │ │ └── unbiased
│ │ │ │ ├── high-temp
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ │ │ ├── state-0
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ │ │ ├── state-1
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ │ │ └── state-2
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ └── muller-brown
│ │ │ ├── biased
│ │ │ ├── explore-x
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ │ ├── opes-flooding
│ │ │ │ └── combined_ts.dat
│ │ │ ├── opes-x
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ │ └── opes-y
│ │ │ │ ├── COLVAR
│ │ │ │ ├── md_input
│ │ │ │ ├── md_potential
│ │ │ │ └── plumed.dat
│ │ │ ├── mfep.txt
│ │ │ ├── run-md-plumed.ipynb
│ │ │ └── unbiased
│ │ │ ├── high-temp
│ │ │ ├── COLVAR
│ │ │ ├── md_input
│ │ │ ├── md_potential
│ │ │ └── plumed.dat
│ │ │ ├── state-0
│ │ │ ├── COLVAR
│ │ │ ├── md_input
│ │ │ ├── md_potential
│ │ │ └── plumed.dat
│ │ │ └── state-1
│ │ │ ├── COLVAR
│ │ │ ├── md_input
│ │ │ ├── md_potential
│ │ │ └── plumed.dat
│ │ ├── expl_features_relevances.ipynb
│ │ ├── expl_lasso.ipynb
│ │ ├── images
│ │ ├── OPES_VK.png
│ │ ├── TPI_deepTDA.png
│ │ ├── committor_cv.png
│ │ ├── deepTDAscheme.png
│ │ ├── deeplda.png
│ │ ├── deeptica.png
│ │ ├── graphical_overview_mlcvs.png
│ │ └── lda.png
│ │ ├── intro_1_training.ipynb
│ │ ├── intro_2_data.ipynb
│ │ └── intro_3_loss_optim.ipynb
├── plumed.rst
├── requirements.yaml
├── tutorials.rst
├── tutorials_advanced.rst
├── tutorials_cvs.rst
├── tutorials_explain.rst
└── tutorials_overview.rst
├── mlcolvar
├── .gitignore
├── __init__.py
├── core
│ ├── __init__.py
│ ├── loss
│ │ ├── __init__.py
│ │ ├── autocorrelation.py
│ │ ├── committor_loss.py
│ │ ├── eigvals.py
│ │ ├── elbo.py
│ │ ├── fisher.py
│ │ ├── mse.py
│ │ └── tda_loss.py
│ ├── nn
│ │ ├── __init__.py
│ │ ├── feedforward.py
│ │ └── utils.py
│ ├── stats
│ │ ├── __init__.py
│ │ ├── lda.py
│ │ ├── pca.py
│ │ ├── stats.py
│ │ ├── tica.py
│ │ └── utils.py
│ └── transform
│ │ ├── __init__.py
│ │ ├── descriptors
│ │ ├── __init__.py
│ │ ├── coordination_numbers.py
│ │ ├── eigs_adjacency_matrix.py
│ │ ├── multiple_descriptors.py
│ │ ├── pairwise_distances.py
│ │ ├── torsional_angle.py
│ │ └── utils.py
│ │ ├── tools
│ │ ├── __init__.py
│ │ ├── continuous_hist.py
│ │ ├── normalization.py
│ │ ├── switching_functions.py
│ │ └── utils.py
│ │ ├── transform.py
│ │ └── utils.py
├── cvs
│ ├── __init__.py
│ ├── committor
│ │ ├── __init__.py
│ │ ├── committor.py
│ │ └── utils.py
│ ├── cv.py
│ ├── multitask
│ │ ├── __init__.py
│ │ └── multitask.py
│ ├── supervised
│ │ ├── __init__.py
│ │ ├── deeplda.py
│ │ ├── deeptda.py
│ │ └── regression.py
│ ├── timelagged
│ │ ├── __init__.py
│ │ └── deeptica.py
│ └── unsupervised
│ │ ├── __init__.py
│ │ ├── autoencoder.py
│ │ └── vae.py
├── data
│ ├── __init__.py
│ ├── dataloader.py
│ ├── datamodule.py
│ └── dataset.py
├── explain
│ ├── __init__.py
│ ├── lasso.py
│ └── sensitivity.py
├── py.typed
├── tests
│ ├── data
│ │ ├── mb-mcmc.dat
│ │ ├── state_A.dat
│ │ ├── state_B.dat
│ │ └── state_C.dat
│ ├── test_core_nn_feedforward.py
│ ├── test_core_stats_lda.py
│ ├── test_core_stats_pca.py
│ ├── test_core_stats_tica.py
│ ├── test_core_transform_adjacencymatrix.py
│ ├── test_core_transform_continuoushistogram.py
│ ├── test_core_transform_coordinationnumbers.py
│ ├── test_core_transform_descriptors_utils.py
│ ├── test_core_transform_multipledescriptors.py
│ ├── test_core_transform_normalization.py
│ ├── test_core_transform_pairwisedistances.py
│ ├── test_core_transform_switchingfunctions.py
│ ├── test_core_transform_torsionalangle.py
│ ├── test_core_transform_utils.py
│ ├── test_cvs.py
│ ├── test_cvs_committor.py
│ ├── test_cvs_multitask_multitask.py
│ ├── test_cvs_slowmodes_deeptica.py
│ ├── test_cvs_supervised_deeplda.py
│ ├── test_cvs_supervised_regression.py
│ ├── test_cvs_supervised_tda.py
│ ├── test_cvs_unsupervised_autoencoder.py
│ ├── test_cvs_unsupervised_vae.py
│ ├── test_explain_lasso.py
│ ├── test_explain_sensitivity.py
│ ├── test_utils_data_dataloader.py
│ ├── test_utils_data_datamodule.py
│ ├── test_utils_data_dataset.py
│ ├── test_utils_data_timelagged.py
│ ├── test_utils_fes.py
│ ├── test_utils_io.py
│ ├── test_utils_plot.py
│ └── test_utils_trainer.py
└── utils
│ ├── __init__.py
│ ├── fes.py
│ ├── io.py
│ ├── plot.py
│ ├── timelagged.py
│ └── trainer.py
├── pyproject.toml
├── readthedocs.yml
├── requirements.txt
└── setup.cfg
/.codecov.yml:
--------------------------------------------------------------------------------
1 | # Codecov configuration to make it a bit less noisy
2 | coverage:
3 | status:
4 | patch: false
5 | project:
6 | default:
7 | threshold: 50%
8 | comment:
9 | layout: "header"
10 | require_changes: false
11 | branches: null
12 | behavior: default
13 | flags: null
14 | paths: null
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | mlcolvar/_version.py export-subst
2 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 | Provide a brief description of the PR's purpose here.
3 |
4 | ## Todos
5 | Notable points that this PR has either accomplished or will accomplish.
6 | - [ ] TODO 1
7 |
8 | ## Questions
9 | - [ ] Question1
10 |
11 | ## Status
12 | - [ ] Ready to go
--------------------------------------------------------------------------------
/.github/workflows/CI.yaml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | # GitHub has started calling new repo's first branch "main" https://github.com/github/renaming
5 | # The cookiecutter uses the "--initial-branch" flag when it runs git-init
6 | push:
7 | branches:
8 | - "main"
9 | pull_request:
10 | branches:
11 | - "main"
12 | schedule:
13 | # Weekly tests run on main by default:
14 | # Scheduled workflows run on the latest commit on the default or base branch.
15 | # (from https://help.github.com/en/actions/reference/events-that-trigger-workflows#scheduled-events-schedule)
16 | - cron: "0 2 * * 1"
17 |
18 | jobs:
19 | test:
20 | name: Test on ${{ matrix.os }}, Python ${{ matrix.python-version }}
21 | runs-on: ${{ matrix.os }}
22 | strategy:
23 | matrix:
24 | # os: [macOS-latest, ubuntu-latest, windows-latest] # TODO use this when macOS-latest becomes stable again
25 | os: [macOS-13, ubuntu-latest, windows-latest]
26 | python-version: [3.8, 3.9, "3.10"]
27 |
28 | steps:
29 | - uses: actions/checkout@v4
30 |
31 | - name: Additional info about the build
32 | shell: bash
33 | run: |
34 | uname -a
35 | df -h
36 | ulimit -a
37 |
38 | # More info on options: https://github.com/mamba-org/setup-micromamba
39 | - uses: mamba-org/setup-micromamba@v1
40 | with:
41 | micromamba-version: '1.5.10-0'
42 | environment-file: devtools/conda-envs/test_env.yaml
43 | environment-name: test
44 | # channels: conda-forge,defaults
45 | create-args: >-
46 | python=${{ matrix.python-version }}
47 |
48 | - name: Install package
49 | # conda setup requires this special shell
50 | shell: bash -l {0}
51 | run: |
52 | python -m pip install -e . --no-deps
53 | micromamba list
54 |
55 | - name: Run tests
56 | # conda setup requires this special shell
57 | shell: bash -l {0}
58 | run: |
59 | pytest -v --cov=mlcolvar --cov-report=xml --color=yes mlcolvar/tests/
60 |
61 | - name: Run notebook tests
62 | # conda setup requires this special shell
63 | shell: bash -l {0}
64 | if: contains( matrix.os, 'ubuntu' )
65 | run: |
66 | pytest -v --nbmake docs/notebooks/ --ignore=docs/notebooks/tutorials/data/ --cov=mlcolvar --cov-append --cov-report=xml --color=yes
67 |
68 | - name: CodeCov
69 | if: contains( matrix.os, 'ubuntu' )
70 | uses: codecov/codecov-action@v3
71 | with:
72 | token: ${{ secrets.CODECOV_TOKEN }}
73 | file: ./coverage.xml
74 | flags: codecov
75 | name: codecov-${{ matrix.os }}-py${{ matrix.python-version }}
76 |
--------------------------------------------------------------------------------
/.github/workflows/PyPI.yaml:
--------------------------------------------------------------------------------
1 | name: PyPI
2 |
3 | on:
4 | schedule:
5 | # Weekly tests
6 | - cron: "0 2 * * 1"
7 |
8 | jobs:
9 | test:
10 | name: Test on ${{ matrix.os }}, Python ${{ matrix.python-version }}
11 | runs-on: ${{ matrix.os }}
12 | strategy:
13 | matrix:
14 | # os: [macOS-latest, ubuntu-latest, windows-latest] # TODO use this when macOS-latest becomes stable again
15 | # os: [macOS-13, ubuntu-latest, windows-latest] # TODO macOS-13 fails when building scipy with pip
16 | os: [ubuntu-latest, windows-latest]
17 | python-version: [3.8, 3.9, "3.10"]
18 |
19 | steps:
20 | - uses: actions/checkout@v4
21 | with:
22 | fetch-depth: 0
23 |
24 | - name: Checkout latest tag
25 | shell: bash
26 | run: |
27 | TAG=`git describe --tags $(git rev-list --tags --max-count=1)`
28 | echo "Latest tag is: $TAG"
29 | git checkout tags/$TAG
30 |
31 | - name: Additional info about the build
32 | shell: bash
33 | run: |
34 | uname -a
35 | df -h
36 | ulimit -a
37 |
38 | - name: Install package from PyPI
39 | # conda setup requires this special shell
40 | shell: bash -l {0}
41 | run: |
42 | python -m pip install mlcolvar[test]
43 | pip list
44 |
45 | - name: Run tests
46 | # conda setup requires this special shell
47 | shell: bash -l {0}
48 | run: |
49 | pytest -v --cov=mlcolvar --cov-report=xml --color=yes mlcolvar/tests/
50 |
51 | - name: Run notebook tests
52 | # conda setup requires this special shell
53 | shell: bash -l {0}
54 | if: contains( matrix.os, 'ubuntu' )
55 | run: |
56 | pytest -v --nbmake docs/notebooks/ --ignore=docs/notebooks/tutorials/data/ --cov=mlcolvar --cov-append --cov-report=xml --color=yes
57 |
--------------------------------------------------------------------------------
/.github/workflows/codeql.yml:
--------------------------------------------------------------------------------
1 | name: "CodeQL"
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | pull_request:
7 | branches: [ "main" ]
8 | schedule:
9 | - cron: "0 2 * * 1"
10 |
11 | jobs:
12 | analyze:
13 | name: Analyze
14 | runs-on: ubuntu-latest
15 | permissions:
16 | actions: read
17 | contents: read
18 | security-events: write
19 |
20 | strategy:
21 | fail-fast: false
22 | matrix:
23 | language: [ python ]
24 |
25 | steps:
26 | - name: Checkout
27 | uses: actions/checkout@v3
28 |
29 | - name: Initialize CodeQL
30 | uses: github/codeql-action/init@v2
31 | with:
32 | languages: ${{ matrix.language }}
33 | queries: +security-and-quality
34 |
35 | - name: Autobuild
36 | uses: github/codeql-action/autobuild@v2
37 |
38 | - name: Perform CodeQL Analysis
39 | uses: github/codeql-action/analyze@v2
40 | with:
41 | category: "/language:${{ matrix.language }}"
42 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | extra/
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | .pytest_cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | .hypothesis/
50 |
51 | # Translations
52 | *.mo
53 | *.pot
54 |
55 | # Django stuff:
56 | *.log
57 | local_settings.py
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # dotenv
85 | .env
86 |
87 | # virtualenv
88 | .venv
89 | venv/
90 | ENV/
91 |
92 | # Spyder project settings
93 | .spyderproject
94 | .spyproject
95 |
96 | # Rope project settings
97 | .ropeproject
98 |
99 | # mkdocs documentation
100 | /site
101 |
102 | # mypy
103 | .mypy_cache/
104 |
105 | # profraw files from LLVM? Unclear exactly what triggers this
106 | # There are reports this comes from LLVM profiling, but also Xcode 9.
107 | *profraw
108 |
109 | */_version.py
110 |
111 | old_code/
112 | .vscode/
113 | test_notebooks/
114 | lightning_logs/
--------------------------------------------------------------------------------
/.lgtm.yml:
--------------------------------------------------------------------------------
1 | # Configure LGTM for this package
2 |
3 | extraction:
4 | python: # Configure Python
5 | python_setup: # Configure the setup
6 | version: 3 # Specify Version 3
7 | path_classifiers:
8 | library:
9 | - versioneer.py # Set Versioneer.py to an external "library" (3rd party code)
10 | - devtools/*
11 | generated:
12 | - mlcolvar/_version.py
13 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: "Bonati"
5 | given-names: "Luigi"
6 | orcid: "https://orcid.org/0000-0002-9118-6239"
7 | - family-names: "Trizio"
8 | given-names: "Enrico"
9 | orcid: "https://orcid.org/0000-0003-2042-0232"
10 | - family-names: "Rizzi"
11 | given-names: "Andrea"
12 | orcid: "https://orcid.org/0000-0001-7693-2013"
13 | - family-names: "Parrinello"
14 | given-names: "Michele"
15 | orcid: "https://orcid.org/0000-0001-6550-3272"
16 | title: "mlcolvar"
17 | url: "https://github.com/luigibonati/mlcolvar"
18 | preferred-citation:
19 | type: article
20 | authors:
21 | - family-names: "Bonati"
22 | given-names: "Luigi"
23 | orcid: "https://orcid.org/0000-0002-9118-6239"
24 | - family-names: "Trizio"
25 | given-names: "Enrico"
26 | orcid: "https://orcid.org/0000-0003-2042-0232"
27 | - family-names: "Rizzi"
28 | given-names: "Andrea"
29 | orcid: "https://orcid.org/0000-0001-7693-2013"
30 | - family-names: "Parrinello"
31 | given-names: "Michele"
32 | orcid: "https://orcid.org/0000-0001-6550-3272"
33 | doi: "10.1063/5.0156343"
34 | journal: "Journal of Chemical Physics"
35 | start: 014801
36 | title: "A unified framework for machine learning collective variables for enhanced sampling simulations: mlcolvar"
37 | volume: 159
38 | year: 2023
39 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Luigi Bonati, Enrico Trizio, and Andrea Rizzi
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include MANIFEST.in
3 | include CODE_OF_CONDUCT.md
4 |
5 | graft mlcolvar
6 | global-exclude *.py[cod] __pycache__ *.so
--------------------------------------------------------------------------------
/colab_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This is a environment setup file for the use of mlcolvar (mainly tutorial notebooks but not only) in Colab
4 |
5 | if test -f "is_colab_set_up.txt"; then
6 | echo This Colab environment is already set up!
7 | else
8 | notebook_type=$1
9 | echo Setting up Colab environment.
10 |
11 | pip install mlcolvar
12 | echo - Installed mlcolvar
13 |
14 | git clone --quiet --depth 1 https://github.com/luigibonati/mlcolvar.git mlcolvar
15 | echo - Cloned mlcolvar from git
16 |
17 | if [ "$notebook_type" == "TUTORIAL" ]; then
18 | cp -r mlcolvar/docs/notebooks/tutorials/data data
19 | echo - Copied tutorials data
20 | elif [ "$notebook_type" == "EXPERIMENT" ]; then
21 | cp -r mlcolvar/docs/notebooks/paper_experiments/input_data input_data
22 | cp -r mlcolvar/docs/notebooks/paper_experiments/results results
23 | cp -r mlcolvar/docs/notebooks/paper_experiments/utils utils
24 | echo - Copied papers_experiments data
25 | elif [ "$notebook_type" == "EXAMPLE" ]; then
26 | echo - No data copied
27 | else
28 | cp -r mlcolvar/docs/notebooks/tutorials/data data
29 | cp -r mlcolvar/docs/notebooks/paper_experiments/input_data input_data
30 | cp -r mlcolvar/docs/notebooks/paper_experiments/results results
31 | cp -r mlcolvar/docs/notebooks/paper_experiments/utils utils
32 | echo - Copied tutorials + papers_experiments data
33 | fi
34 |
35 | rm -r mlcolvar
36 | echo - Removed mlcolvar folder
37 |
38 | echo True > is_colab_set_up.txt
39 | echo The environment is ready, enjoy!
40 | fi
--------------------------------------------------------------------------------
/devtools/conda-envs/test_env.yaml:
--------------------------------------------------------------------------------
1 | name: test
2 | channels:
3 |
4 | - pytorch
5 | - conda-forge
6 |
7 | - defaults
8 | dependencies:
9 | # Base depends
10 | - python
11 | - pip
12 |
13 | # Testing
14 | - pytest
15 | - pytest-cov
16 | - codecov
17 |
18 | # Package dependencies
19 | - numpy<2
20 | - pandas
21 | - pytorch
22 | - pydantic<2
23 | - lightning
24 | - matplotlib
25 | - scikit-learn
26 | - scipy
27 |
28 | # Pip-only installs
29 | - pip:
30 | - KDEpy
31 | - nbmake
32 |
33 |
--------------------------------------------------------------------------------
/devtools/legacy-miniconda-setup/before_install.sh:
--------------------------------------------------------------------------------
1 | # Temporarily change directory to $HOME to install software
2 | pushd .
3 | cd $HOME
4 | # Make sure some level of pip is installed
5 | python -m ensurepip
6 |
7 | # Install Miniconda
8 | if [ "$TRAVIS_OS_NAME" == "osx" ]; then
9 | # Make OSX md5 mimic md5sum from linux, alias does not work
10 | md5sum () {
11 | command md5 -r "$@"
12 | }
13 | MINICONDA=Miniconda3-latest-MacOSX-x86_64.sh
14 | else
15 | MINICONDA=Miniconda3-latest-Linux-x86_64.sh
16 | fi
17 | MINICONDA_HOME=$HOME/miniconda
18 | MINICONDA_MD5=$(wget -qO- https://repo.anaconda.com/miniconda/ | grep -A3 $MINICONDA | sed -n '4p' | sed -n 's/ *
\(.*\)<\/td> */\1/p')
19 | wget -q https://repo.anaconda.com/miniconda/$MINICONDA
20 | if [[ $MINICONDA_MD5 != $(md5sum $MINICONDA | cut -d ' ' -f 1) ]]; then
21 | echo "Miniconda MD5 mismatch"
22 | exit 1
23 | fi
24 | bash $MINICONDA -b -p $MINICONDA_HOME
25 |
26 | # Configure miniconda
27 | export PIP_ARGS="-U"
28 | # New to conda >=4.4
29 | echo ". $MINICONDA_HOME/etc/profile.d/conda.sh" >> ~/.bashrc # Source the profile.d file
30 | echo "conda activate" >> ~/.bashrc # Activate conda
31 | source ~/.bashrc # source file to get new commands
32 | #export PATH=$MINICONDA_HOME/bin:$PATH # Old way, should not be needed anymore
33 |
34 | conda config --add channels conda-forge
35 |
36 | conda config --set always_yes yes
37 | conda install conda conda-build jinja2 anaconda-client
38 | conda update --quiet --all
39 |
40 | # Restore original directory
41 | popd
42 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = mlcolvar
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Compiling mlcolvar's Documentation
2 |
3 | The docs for this project are built with [Sphinx](http://www.sphinx-doc.org/en/master/).
4 | To compile the docs, first ensure that Sphinx and the ReadTheDocs theme are installed.
5 |
6 |
7 | ```bash
8 | conda install sphinx sphinx_rtd_theme
9 | pip install sphinx-copybutton furo nbsphinx
10 | ```
11 |
12 | Once installed, you can use the `Makefile` in this directory to compile static HTML pages by
13 | ```bash
14 | make html
15 | ```
16 |
17 | The compiled docs will be in the `_build` directory and can be viewed by opening `index.html` (which may itself
18 | be inside a directory called `html/` depending on what version of Sphinx is installed).
19 |
20 |
21 | A configuration file for [Read The Docs](https://readthedocs.org/) (readthedocs.yaml) is included in the top level of the repository. To use Read the Docs to host your documentation, go to https://readthedocs.org/ and connect this repository. You may need to change your default branch to `main` under Advanced Settings for the project.
22 |
23 | If you would like to use Read The Docs with `autodoc` (included automatically) and your package has dependencies, you will need to include those dependencies in your documentation yaml file (`docs/requirements.yaml`).
24 |
25 |
--------------------------------------------------------------------------------
/docs/_static/README.md:
--------------------------------------------------------------------------------
1 | # Static Doc Directory
2 |
3 | Add any paths that contain custom static files (such as style sheets) here,
4 | relative to the `conf.py` file's directory.
5 | They are copied after the builtin static files,
6 | so a file named "default.css" will overwrite the builtin "default.css".
7 |
8 | The path to this folder is set in the Sphinx `conf.py` file in the line:
9 | ```python
10 | templates_path = ['_static']
11 | ```
12 |
13 | ## Examples of file to add to this directory
14 | * Custom Cascading Style Sheets
15 | * Custom JavaScript code
16 | * Static logo images
17 |
--------------------------------------------------------------------------------
/docs/_templates/README.md:
--------------------------------------------------------------------------------
1 | # Templates Doc Directory
2 |
3 | Add any paths that contain templates here, relative to
4 | the `conf.py` file's directory.
5 | They are copied after the builtin template files,
6 | so a file named "page.html" will overwrite the builtin "page.html".
7 |
8 | The path to this folder is set in the Sphinx `conf.py` file in the line:
9 | ```python
10 | html_static_path = ['_templates']
11 | ```
12 |
13 | ## Examples of file to add to this directory
14 | * HTML extensions of stock pages like `page.html` or `layout.html`
15 |
--------------------------------------------------------------------------------
/docs/_templates/custom-class-template.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline}}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 | .. autoclass:: {{ objname }}
6 | :members:
7 | :show-inheritance:
8 | :inherited-members: Module,LightningModule
9 |
10 | {% block methods %}
11 | .. automethod:: __init__
12 |
13 | {% if methods %}
14 | .. rubric:: {{ _('Methods') }}
15 |
16 | .. autosummary::
17 | {% for item in methods %}
18 | {%- if item not in inherited_members %}
19 | ~{{ name }}.{{ item }}
20 | {%- endif %}
21 | {%- endfor %}
22 | {% endif %}
23 | {% endblock %}
24 |
25 |
26 | ..
27 | {% block attributes %}
28 | {% if attributes %}
29 | .. rubric:: {{ _('Attributes') }}
30 |
31 | .. autosummary::
32 | {% for item in attributes %}
33 | {%- if item not in inherited_attributes %}
34 | ~{{ name }}.{{ item }}
35 | {%- endif %}
36 | {%- endfor %}
37 | {% endif %}
38 | {% endblock %}
39 |
40 |
41 |
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | Documentation
2 | =============
3 |
4 | .. rubric:: Collective variables
5 |
6 | The ``cvs`` module contains ready-to-use CV classes, grouped by the type of data used for their optimization in the following sub-packages: unsupervised, supervided and time-lagged.
7 |
8 | .. toctree::
9 | :maxdepth: 1
10 | :caption: API:
11 |
12 | api_cvs
13 |
14 | .. rubric:: Core modules
15 |
16 | This module contains the building blocks that are used for the construction of the CV classes. We organized them into the following submodules: ``nn``, ``loss``, ``stats`` and ``transform``.
17 |
18 | .. toctree::
19 | :maxdepth: 1
20 | :caption: API:
21 |
22 | api_core
23 |
24 | .. rubric:: Datasets
25 |
26 | Here we provide PyTorch- and Lightning-compatible classes that simplify and improve the efficiency of data access.
27 |
28 | .. toctree::
29 | :maxdepth: 1
30 | :caption: API:
31 |
32 | api_data
33 |
34 | .. rubric:: Utils
35 |
36 | Miscellaneous tools and helper functions:
37 |
38 | .. toctree::
39 | :maxdepth: 1
40 | :caption: API:
41 |
42 | api_utils
43 |
44 | Tools for inspecting & interpreting the models.
45 |
46 | .. toctree::
47 | :maxdepth: 1
48 | :caption: API:
49 |
50 | api_explain
51 |
--------------------------------------------------------------------------------
/docs/api_core.rst:
--------------------------------------------------------------------------------
1 | Core modules
2 | ------------
3 |
4 | These are the building blocks which are used to construct the CVs.
5 |
6 | .. rubric:: NN
7 |
8 | .. currentmodule:: mlcolvar.core.nn
9 |
10 | .. autosummary::
11 | :toctree: autosummary
12 | :template: custom-class-template.rst
13 |
14 | FeedForward
15 |
16 | .. rubric:: Loss
17 |
18 | .. currentmodule:: mlcolvar.core.loss
19 |
20 | .. autosummary::
21 | :toctree: autosummary
22 | :template: custom-class-template.rst
23 |
24 | MSELoss
25 | ELBOGaussiansLoss
26 | FisherDiscriminantLoss
27 | AutocorrelationLoss
28 | ReduceEigenvaluesLoss
29 | TDALoss
30 |
31 | .. rubric:: Stats
32 |
33 | .. currentmodule:: mlcolvar.core.stats
34 |
35 | .. autosummary::
36 | :toctree: autosummary
37 | :template: custom-class-template.rst
38 |
39 | Stats
40 | PCA
41 | LDA
42 | TICA
43 |
44 | .. rubric:: Transform
45 |
46 | .. currentmodule:: mlcolvar.core.transform
47 |
48 | .. autosummary::
49 | :toctree: autosummary
50 | :template: custom-class-template.rst
51 |
52 | Transform
53 |
54 |
55 | .. rubric:: Transform.descriptors
56 |
57 | .. currentmodule:: mlcolvar.core.transform.descriptors
58 |
59 | .. autosummary::
60 | :toctree: autosummary
61 | :template: custom-class-template.rst
62 |
63 | PairwiseDistances
64 | TorsionalAngle
65 | CoordinationNumbers
66 | EigsAdjMat
67 | MultipleDescriptors
68 |
69 | .. rubric:: Transform.tools
70 |
71 | .. currentmodule:: mlcolvar.core.transform.tools
72 |
73 | .. autosummary::
74 | :toctree: autosummary
75 | :template: custom-class-template.rst
76 |
77 | Normalization
78 | ContinuousHistogram
79 | SwitchingFunctions
--------------------------------------------------------------------------------
/docs/api_cvs.rst:
--------------------------------------------------------------------------------
1 | Collective variables
2 | --------------------
3 |
4 | In this section we report the neural network-based collective variables implemented in the library. Note that the linear statistical methods are implemented in ``mlcolvar.core.stats`` instead.
5 |
6 | .. rubric:: Base class
7 |
8 | All CVs inherits from this base class, which also implement default methods.
9 |
10 | .. currentmodule:: mlcolvar.cvs
11 |
12 | .. autosummary::
13 | :toctree: autosummary
14 | :template: custom-class-template.rst
15 |
16 | BaseCV
17 |
18 | For each of the specific CV described below there are reported the keys of the expected dataset and the loss function used.
19 |
20 | .. rubric:: Unsupervised learning
21 |
22 | CVs based on the autoencoder architecture. Can be used to reconstruct the original input or an arbitrary reference, with an optional reweighting of the data.
23 |
24 | .. currentmodule:: mlcolvar.cvs
25 |
26 | .. autosummary::
27 | :toctree: autosummary
28 | :template: custom-class-template.rst
29 |
30 | AutoEncoderCV
31 | VariationalAutoEncoderCV
32 |
33 | .. rubric:: Supervised learning
34 |
35 | CVs optimized with supervised learning tasks, either classification or regression.
36 |
37 | .. currentmodule:: mlcolvar.cvs
38 |
39 | .. autosummary::
40 | :toctree: autosummary
41 | :template: custom-class-template.rst
42 |
43 | DeepLDA
44 | DeepTDA
45 | RegressionCV
46 |
47 | .. rubric:: Time-informed learning
48 |
49 | CVs which are optimized on pairs of time-lagged configurations, and optional reweighting for the time-correlation functions.
50 | Note that also the autoencoder-related CVs can fall in this category when the target reference is the time-lagged data.
51 |
52 | .. currentmodule:: mlcolvar.cvs
53 |
54 | .. autosummary::
55 | :toctree: autosummary
56 | :template: custom-class-template.rst
57 |
58 | DeepTICA
59 |
60 | .. rubric:: MultiTask learning
61 |
62 | General framework which allows to optimize a single model with different loss functions optimized on different datasets.
63 |
64 | .. currentmodule:: mlcolvar.cvs
65 |
66 | .. autosummary::
67 | :toctree: autosummary
68 | :template: custom-class-template.rst
69 |
70 | MultiTaskCV
71 |
72 | Framework for the numerical determination of the committor function based on its variational principle.
73 |
74 | .. rubric:: Committor
75 |
76 | .. currentmodule:: mlcolvar.cvs
77 |
78 | .. autosummary::
79 | :toctree: autosummary
80 | :template: custom-class-template.rst
81 |
82 | Committor
--------------------------------------------------------------------------------
/docs/api_data.rst:
--------------------------------------------------------------------------------
1 | Data
2 | ----
3 |
4 | .. currentmodule:: mlcolvar.data
5 |
6 | This module contains the classes used for handling datasets and for feeding them to the Lightning trainer.
7 |
8 | .. autosummary::
9 | :toctree: autosummary
10 | :template: custom-class-template.rst
11 |
12 | DictDataset
13 | DictLoader
14 | DictModule
--------------------------------------------------------------------------------
/docs/api_explain.rst:
--------------------------------------------------------------------------------
1 | Explain
2 | -------
3 |
4 | .. rubric:: Sensitivity analysis
5 |
6 | Perform sensitivity analysis to identify feature relevances
7 |
8 | .. currentmodule:: mlcolvar.explain.sensitivity
9 |
10 | .. autosummary::
11 | :toctree: autosummary
12 | :template: custom-class-template.rst
13 |
14 | sensitivity_analysis
15 | plot_sensitivity
16 |
17 | .. rubric:: Sparse linear model
18 |
19 | Use sparse models to approximate classification/regression tasks
20 |
21 | .. currentmodule:: mlcolvar.explain.lasso
22 |
23 | .. autosummary::
24 | :toctree: autosummary
25 | :template: custom-class-template.rst
26 |
27 | lasso_classification
28 | lasso_regression
29 | plot_lasso_classification
30 | plot_lasso_regression
31 |
--------------------------------------------------------------------------------
/docs/api_utils.rst:
--------------------------------------------------------------------------------
1 | Utils
2 | -----
3 |
4 | .. rubric:: Input/Output
5 |
6 | Helper functions for loading dataframes (incl. PLUMED files) and directly creating datasets from them.
7 |
8 | .. currentmodule:: mlcolvar.utils.io
9 |
10 | .. autosummary::
11 | :toctree: autosummary
12 | :template: custom-class-template.rst
13 |
14 | load_dataframe
15 | create_dataset_from_files
16 |
17 | .. rubric:: Time-lagged datasets
18 |
19 | Create a dataset of pairs of time-lagged configurations.
20 |
21 | .. currentmodule:: mlcolvar.utils.timelagged
22 |
23 | .. autosummary::
24 | :toctree: autosummary
25 | :template: custom-class-template.rst
26 |
27 | create_timelagged_dataset
28 |
29 | .. rubric:: FES
30 |
31 | .. rubric:: Trainer
32 |
33 | Functions used in conjunction with the lightning Trainer (e.g. logging, metrics...).
34 |
35 | .. currentmodule:: mlcolvar.utils.trainer
36 |
37 | .. autosummary::
38 | :toctree: autosummary
39 | :template: custom-class-template.rst
40 |
41 | MetricsCallback
42 |
43 | Compute (and plot) the free energy surface along the CVs.
44 |
45 | .. currentmodule:: mlcolvar.utils.fes
46 |
47 | .. autosummary::
48 | :toctree: autosummary
49 | :template: custom-class-template.rst
50 |
51 | compute_fes
52 |
53 | Plotting utils
54 |
55 | .. currentmodule:: mlcolvar.utils.plot
56 |
57 | .. autosummary::
58 | :toctree: autosummary
59 | :template: custom-class-template.rst
60 |
61 | plot_metrics
62 | plot_features_distribution
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.loss.AutocorrelationLoss.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.loss.AutocorrelationLoss
2 | ======================================
3 |
4 | .. currentmodule:: mlcolvar.core.loss
5 |
6 | .. autoclass:: AutocorrelationLoss
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~AutocorrelationLoss.__init__
20 | ~AutocorrelationLoss.forward
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~AutocorrelationLoss.T_destination
33 | ~AutocorrelationLoss.call_super_init
34 | ~AutocorrelationLoss.dump_patches
35 | ~AutocorrelationLoss.training
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.loss.ELBOGaussiansLoss.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.loss.ELBOGaussiansLoss
2 | ====================================
3 |
4 | .. currentmodule:: mlcolvar.core.loss
5 |
6 | .. autoclass:: ELBOGaussiansLoss
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~ELBOGaussiansLoss.forward
20 |
21 |
22 |
23 |
24 | ..
25 |
26 |
27 | .. rubric:: Attributes
28 |
29 | .. autosummary::
30 |
31 | ~ELBOGaussiansLoss.T_destination
32 | ~ELBOGaussiansLoss.call_super_init
33 | ~ELBOGaussiansLoss.dump_patches
34 | ~ELBOGaussiansLoss.training
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.loss.FisherDiscriminantLoss.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.loss.FisherDiscriminantLoss
2 | =========================================
3 |
4 | .. currentmodule:: mlcolvar.core.loss
5 |
6 | .. autoclass:: FisherDiscriminantLoss
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~FisherDiscriminantLoss.__init__
20 | ~FisherDiscriminantLoss.forward
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~FisherDiscriminantLoss.T_destination
33 | ~FisherDiscriminantLoss.call_super_init
34 | ~FisherDiscriminantLoss.dump_patches
35 | ~FisherDiscriminantLoss.training
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.loss.MSELoss.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.loss.MSELoss
2 | ==========================
3 |
4 | .. currentmodule:: mlcolvar.core.loss
5 |
6 | .. autoclass:: MSELoss
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~MSELoss.forward
20 |
21 |
22 |
23 |
24 | ..
25 |
26 |
27 | .. rubric:: Attributes
28 |
29 | .. autosummary::
30 |
31 | ~MSELoss.T_destination
32 | ~MSELoss.call_super_init
33 | ~MSELoss.dump_patches
34 | ~MSELoss.training
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.loss.ReduceEigenvaluesLoss.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.loss.ReduceEigenvaluesLoss
2 | ========================================
3 |
4 | .. currentmodule:: mlcolvar.core.loss
5 |
6 | .. autoclass:: ReduceEigenvaluesLoss
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~ReduceEigenvaluesLoss.__init__
20 | ~ReduceEigenvaluesLoss.forward
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~ReduceEigenvaluesLoss.T_destination
33 | ~ReduceEigenvaluesLoss.call_super_init
34 | ~ReduceEigenvaluesLoss.dump_patches
35 | ~ReduceEigenvaluesLoss.training
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.loss.TDALoss.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.loss.TDALoss
2 | ==========================
3 |
4 | .. currentmodule:: mlcolvar.core.loss
5 |
6 | .. autoclass:: TDALoss
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~TDALoss.__init__
20 | ~TDALoss.forward
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~TDALoss.T_destination
33 | ~TDALoss.call_super_init
34 | ~TDALoss.dump_patches
35 | ~TDALoss.training
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.nn.FeedForward.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.nn.FeedForward
2 | ============================
3 |
4 | .. currentmodule:: mlcolvar.core.nn
5 |
6 | .. autoclass:: FeedForward
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~FeedForward.__init__
20 | ~FeedForward.forward
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~FeedForward.CHECKPOINT_HYPER_PARAMS_KEY
33 | ~FeedForward.CHECKPOINT_HYPER_PARAMS_NAME
34 | ~FeedForward.CHECKPOINT_HYPER_PARAMS_TYPE
35 | ~FeedForward.T_destination
36 | ~FeedForward.automatic_optimization
37 | ~FeedForward.call_super_init
38 | ~FeedForward.current_epoch
39 | ~FeedForward.device
40 | ~FeedForward.dtype
41 | ~FeedForward.dump_patches
42 | ~FeedForward.example_input_array
43 | ~FeedForward.fabric
44 | ~FeedForward.global_rank
45 | ~FeedForward.global_step
46 | ~FeedForward.hparams
47 | ~FeedForward.hparams_initial
48 | ~FeedForward.local_rank
49 | ~FeedForward.logger
50 | ~FeedForward.loggers
51 | ~FeedForward.on_gpu
52 | ~FeedForward.trainer
53 | ~FeedForward.training
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.stats.LDA.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.stats.LDA
2 | =======================
3 |
4 | .. currentmodule:: mlcolvar.core.stats
5 |
6 | .. autoclass:: LDA
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~LDA.__init__
20 | ~LDA.compute
21 | ~LDA.compute_scatter_matrices
22 | ~LDA.extra_repr
23 | ~LDA.forward
24 |
25 |
26 |
27 |
28 | ..
29 |
30 |
31 | .. rubric:: Attributes
32 |
33 | .. autosummary::
34 |
35 | ~LDA.T_destination
36 | ~LDA.call_super_init
37 | ~LDA.dump_patches
38 | ~LDA.training
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.stats.PCA.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.stats.PCA
2 | =======================
3 |
4 | .. currentmodule:: mlcolvar.core.stats
5 |
6 | .. autoclass:: PCA
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~PCA.__init__
20 | ~PCA.compute
21 | ~PCA.extra_repr
22 | ~PCA.forward
23 |
24 |
25 |
26 |
27 | ..
28 |
29 |
30 | .. rubric:: Attributes
31 |
32 | .. autosummary::
33 |
34 | ~PCA.T_destination
35 | ~PCA.call_super_init
36 | ~PCA.cumulative_explained_variance
37 | ~PCA.dump_patches
38 | ~PCA.explained_variance
39 | ~PCA.training
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.stats.Stats.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.stats.Stats
2 | =========================
3 |
4 | .. currentmodule:: mlcolvar.core.stats
5 |
6 | .. autoclass:: Stats
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~Stats.compute
20 | ~Stats.forward
21 | ~Stats.teardown
22 |
23 |
24 |
25 |
26 | ..
27 |
28 |
29 | .. rubric:: Attributes
30 |
31 | .. autosummary::
32 |
33 | ~Stats.T_destination
34 | ~Stats.call_super_init
35 | ~Stats.dump_patches
36 | ~Stats.training
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.stats.TICA.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.stats.TICA
2 | ========================
3 |
4 | .. currentmodule:: mlcolvar.core.stats
5 |
6 | .. autoclass:: TICA
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~TICA.__init__
20 | ~TICA.compute
21 | ~TICA.extra_repr
22 | ~TICA.forward
23 | ~TICA.timescales
24 |
25 |
26 |
27 |
28 | ..
29 |
30 |
31 | .. rubric:: Attributes
32 |
33 | .. autosummary::
34 |
35 | ~TICA.T_destination
36 | ~TICA.call_super_init
37 | ~TICA.dump_patches
38 | ~TICA.training
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.Normalization.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.Normalization
2 | =====================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform
5 |
6 | .. autoclass:: Normalization
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~Normalization.__init__
20 | ~Normalization.extra_repr
21 | ~Normalization.forward
22 | ~Normalization.inverse
23 | ~Normalization.set_custom
24 | ~Normalization.set_from_stats
25 | ~Normalization.setup_from_datamodule
26 |
27 |
28 |
29 |
30 | ..
31 |
32 |
33 | .. rubric:: Attributes
34 |
35 | .. autosummary::
36 |
37 | ~Normalization.T_destination
38 | ~Normalization.call_super_init
39 | ~Normalization.dump_patches
40 | ~Normalization.training
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.Transform.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.Transform
2 | =================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform
5 |
6 | .. autoclass:: Transform
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~Transform.__init__
20 | ~Transform.forward
21 | ~Transform.setup_from_datamodule
22 | ~Transform.teardown
23 |
24 |
25 |
26 |
27 | ..
28 |
29 |
30 | .. rubric:: Attributes
31 |
32 | .. autosummary::
33 |
34 | ~Transform.T_destination
35 | ~Transform.call_super_init
36 | ~Transform.dump_patches
37 | ~Transform.training
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.descriptors.CoordinationNumbers.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.descriptors.CoordinationNumbers
2 | =======================================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.descriptors
5 |
6 | .. autoclass:: CoordinationNumbers
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~CoordinationNumbers.__init__
20 | ~CoordinationNumbers.compute_coordination_number
21 | ~CoordinationNumbers.forward
22 |
23 |
24 |
25 |
26 | ..
27 |
28 |
29 | .. rubric:: Attributes
30 |
31 | .. autosummary::
32 |
33 | ~CoordinationNumbers.T_destination
34 | ~CoordinationNumbers.call_super_init
35 | ~CoordinationNumbers.dump_patches
36 | ~CoordinationNumbers.training
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.descriptors.EigsAdjMat.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.descriptors.EigsAdjMat
2 | ==============================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.descriptors
5 |
6 | .. autoclass:: EigsAdjMat
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~EigsAdjMat.__init__
20 | ~EigsAdjMat.compute_adjacency_matrix
21 | ~EigsAdjMat.forward
22 | ~EigsAdjMat.get_eigenvalues
23 |
24 |
25 |
26 |
27 | ..
28 |
29 |
30 | .. rubric:: Attributes
31 |
32 | .. autosummary::
33 |
34 | ~EigsAdjMat.T_destination
35 | ~EigsAdjMat.call_super_init
36 | ~EigsAdjMat.dump_patches
37 | ~EigsAdjMat.training
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.descriptors.MultipleDescriptors.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.descriptors.MultipleDescriptors
2 | =======================================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.descriptors
5 |
6 | .. autoclass:: MultipleDescriptors
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~MultipleDescriptors.__init__
20 | ~MultipleDescriptors.forward
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~MultipleDescriptors.T_destination
33 | ~MultipleDescriptors.call_super_init
34 | ~MultipleDescriptors.dump_patches
35 | ~MultipleDescriptors.training
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.descriptors.PairwiseDistances.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.descriptors.PairwiseDistances
2 | =====================================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.descriptors
5 |
6 | .. autoclass:: PairwiseDistances
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~PairwiseDistances.__init__
20 | ~PairwiseDistances.compute_pairwise_distances
21 | ~PairwiseDistances.forward
22 |
23 |
24 |
25 |
26 | ..
27 |
28 |
29 | .. rubric:: Attributes
30 |
31 | .. autosummary::
32 |
33 | ~PairwiseDistances.T_destination
34 | ~PairwiseDistances.call_super_init
35 | ~PairwiseDistances.dump_patches
36 | ~PairwiseDistances.training
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.descriptors.TorsionalAngle.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.descriptors.TorsionalAngle
2 | ==================================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.descriptors
5 |
6 | .. autoclass:: TorsionalAngle
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~TorsionalAngle.__init__
20 | ~TorsionalAngle.compute_torsional_angle
21 | ~TorsionalAngle.forward
22 |
23 |
24 |
25 |
26 | ..
27 |
28 |
29 | .. rubric:: Attributes
30 |
31 | .. autosummary::
32 |
33 | ~TorsionalAngle.MODES
34 | ~TorsionalAngle.T_destination
35 | ~TorsionalAngle.call_super_init
36 | ~TorsionalAngle.dump_patches
37 | ~TorsionalAngle.training
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.tools.ContinuousHistogram.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.tools.ContinuousHistogram
2 | =================================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.tools
5 |
6 | .. autoclass:: ContinuousHistogram
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~ContinuousHistogram.__init__
20 | ~ContinuousHistogram.compute_hist
21 | ~ContinuousHistogram.forward
22 |
23 |
24 |
25 |
26 | ..
27 |
28 |
29 | .. rubric:: Attributes
30 |
31 | .. autosummary::
32 |
33 | ~ContinuousHistogram.T_destination
34 | ~ContinuousHistogram.call_super_init
35 | ~ContinuousHistogram.dump_patches
36 | ~ContinuousHistogram.training
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.tools.Normalization.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.tools.Normalization
2 | ===========================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.tools
5 |
6 | .. autoclass:: Normalization
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~Normalization.__init__
20 | ~Normalization.extra_repr
21 | ~Normalization.forward
22 | ~Normalization.inverse
23 | ~Normalization.set_custom
24 | ~Normalization.set_from_stats
25 | ~Normalization.setup_from_datamodule
26 |
27 |
28 |
29 |
30 | ..
31 |
32 |
33 | .. rubric:: Attributes
34 |
35 | .. autosummary::
36 |
37 | ~Normalization.T_destination
38 | ~Normalization.call_super_init
39 | ~Normalization.dump_patches
40 | ~Normalization.training
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.core.transform.tools.SwitchingFunctions.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.core.transform.tools.SwitchingFunctions
2 | ================================================
3 |
4 | .. currentmodule:: mlcolvar.core.transform.tools
5 |
6 | .. autoclass:: SwitchingFunctions
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~SwitchingFunctions.Fermi_switch
20 | ~SwitchingFunctions.Rational_switch
21 | ~SwitchingFunctions.__init__
22 | ~SwitchingFunctions.forward
23 |
24 |
25 |
26 |
27 | ..
28 |
29 |
30 | .. rubric:: Attributes
31 |
32 | .. autosummary::
33 |
34 | ~SwitchingFunctions.SWITCH_FUNCS
35 | ~SwitchingFunctions.T_destination
36 | ~SwitchingFunctions.call_super_init
37 | ~SwitchingFunctions.dump_patches
38 | ~SwitchingFunctions.training
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.AutoEncoderCV.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.AutoEncoderCV
2 | ==========================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: AutoEncoderCV
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~AutoEncoderCV.__init__
20 | ~AutoEncoderCV.encode_decode
21 | ~AutoEncoderCV.forward_cv
22 | ~AutoEncoderCV.get_decoder
23 | ~AutoEncoderCV.training_step
24 |
25 |
26 |
27 |
28 | ..
29 |
30 |
31 | .. rubric:: Attributes
32 |
33 | .. autosummary::
34 |
35 | ~AutoEncoderCV.BLOCKS
36 | ~AutoEncoderCV.CHECKPOINT_HYPER_PARAMS_KEY
37 | ~AutoEncoderCV.CHECKPOINT_HYPER_PARAMS_NAME
38 | ~AutoEncoderCV.CHECKPOINT_HYPER_PARAMS_TYPE
39 | ~AutoEncoderCV.T_destination
40 | ~AutoEncoderCV.automatic_optimization
41 | ~AutoEncoderCV.call_super_init
42 | ~AutoEncoderCV.current_epoch
43 | ~AutoEncoderCV.device
44 | ~AutoEncoderCV.dtype
45 | ~AutoEncoderCV.dump_patches
46 | ~AutoEncoderCV.example_input_array
47 | ~AutoEncoderCV.fabric
48 | ~AutoEncoderCV.global_rank
49 | ~AutoEncoderCV.global_step
50 | ~AutoEncoderCV.hparams
51 | ~AutoEncoderCV.hparams_initial
52 | ~AutoEncoderCV.local_rank
53 | ~AutoEncoderCV.logger
54 | ~AutoEncoderCV.loggers
55 | ~AutoEncoderCV.n_cvs
56 | ~AutoEncoderCV.on_gpu
57 | ~AutoEncoderCV.optimizer_name
58 | ~AutoEncoderCV.trainer
59 | ~AutoEncoderCV.training
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.BaseCV.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.BaseCV
2 | ===================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: BaseCV
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~BaseCV.__init__
20 | ~BaseCV.configure_optimizers
21 | ~BaseCV.forward
22 | ~BaseCV.forward_cv
23 | ~BaseCV.initialize_blocks
24 | ~BaseCV.initialize_transforms
25 | ~BaseCV.parse_options
26 | ~BaseCV.setup
27 | ~BaseCV.test_step
28 | ~BaseCV.validation_step
29 |
30 |
31 |
32 |
33 | ..
34 |
35 |
36 | .. rubric:: Attributes
37 |
38 | .. autosummary::
39 |
40 | ~BaseCV.example_input_array
41 | ~BaseCV.n_cvs
42 | ~BaseCV.optimizer_name
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.Committor.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.Committor
2 | ======================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: Committor
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~Committor.__init__
20 | ~Committor.training_step
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~Committor.BLOCKS
33 | ~Committor.CHECKPOINT_HYPER_PARAMS_KEY
34 | ~Committor.CHECKPOINT_HYPER_PARAMS_NAME
35 | ~Committor.CHECKPOINT_HYPER_PARAMS_TYPE
36 | ~Committor.T_destination
37 | ~Committor.automatic_optimization
38 | ~Committor.call_super_init
39 | ~Committor.current_epoch
40 | ~Committor.device
41 | ~Committor.dtype
42 | ~Committor.dump_patches
43 | ~Committor.example_input_array
44 | ~Committor.fabric
45 | ~Committor.global_rank
46 | ~Committor.global_step
47 | ~Committor.hparams
48 | ~Committor.hparams_initial
49 | ~Committor.local_rank
50 | ~Committor.logger
51 | ~Committor.loggers
52 | ~Committor.n_cvs
53 | ~Committor.on_gpu
54 | ~Committor.optimizer_name
55 | ~Committor.trainer
56 | ~Committor.training
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.DeepLDA.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.DeepLDA
2 | ====================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: DeepLDA
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~DeepLDA.__init__
20 | ~DeepLDA.forward_nn
21 | ~DeepLDA.regularization_lorentzian
22 | ~DeepLDA.set_regularization
23 | ~DeepLDA.training_step
24 |
25 |
26 |
27 |
28 | ..
29 |
30 |
31 | .. rubric:: Attributes
32 |
33 | .. autosummary::
34 |
35 | ~DeepLDA.BLOCKS
36 | ~DeepLDA.CHECKPOINT_HYPER_PARAMS_KEY
37 | ~DeepLDA.CHECKPOINT_HYPER_PARAMS_NAME
38 | ~DeepLDA.CHECKPOINT_HYPER_PARAMS_TYPE
39 | ~DeepLDA.T_destination
40 | ~DeepLDA.automatic_optimization
41 | ~DeepLDA.call_super_init
42 | ~DeepLDA.current_epoch
43 | ~DeepLDA.device
44 | ~DeepLDA.dtype
45 | ~DeepLDA.dump_patches
46 | ~DeepLDA.example_input_array
47 | ~DeepLDA.fabric
48 | ~DeepLDA.global_rank
49 | ~DeepLDA.global_step
50 | ~DeepLDA.hparams
51 | ~DeepLDA.hparams_initial
52 | ~DeepLDA.local_rank
53 | ~DeepLDA.logger
54 | ~DeepLDA.loggers
55 | ~DeepLDA.n_cvs
56 | ~DeepLDA.on_gpu
57 | ~DeepLDA.optimizer_name
58 | ~DeepLDA.trainer
59 | ~DeepLDA.training
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.DeepTDA.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.DeepTDA
2 | ====================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: DeepTDA
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~DeepTDA.__init__
20 | ~DeepTDA.training_step
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~DeepTDA.BLOCKS
33 | ~DeepTDA.CHECKPOINT_HYPER_PARAMS_KEY
34 | ~DeepTDA.CHECKPOINT_HYPER_PARAMS_NAME
35 | ~DeepTDA.CHECKPOINT_HYPER_PARAMS_TYPE
36 | ~DeepTDA.T_destination
37 | ~DeepTDA.automatic_optimization
38 | ~DeepTDA.call_super_init
39 | ~DeepTDA.current_epoch
40 | ~DeepTDA.device
41 | ~DeepTDA.dtype
42 | ~DeepTDA.dump_patches
43 | ~DeepTDA.example_input_array
44 | ~DeepTDA.fabric
45 | ~DeepTDA.global_rank
46 | ~DeepTDA.global_step
47 | ~DeepTDA.hparams
48 | ~DeepTDA.hparams_initial
49 | ~DeepTDA.local_rank
50 | ~DeepTDA.logger
51 | ~DeepTDA.loggers
52 | ~DeepTDA.n_cvs
53 | ~DeepTDA.on_gpu
54 | ~DeepTDA.optimizer_name
55 | ~DeepTDA.trainer
56 | ~DeepTDA.training
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.DeepTICA.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.DeepTICA
2 | =====================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: DeepTICA
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~DeepTICA.__init__
20 | ~DeepTICA.forward_nn
21 | ~DeepTICA.set_regularization
22 | ~DeepTICA.training_step
23 |
24 |
25 |
26 |
27 | ..
28 |
29 |
30 | .. rubric:: Attributes
31 |
32 | .. autosummary::
33 |
34 | ~DeepTICA.BLOCKS
35 | ~DeepTICA.CHECKPOINT_HYPER_PARAMS_KEY
36 | ~DeepTICA.CHECKPOINT_HYPER_PARAMS_NAME
37 | ~DeepTICA.CHECKPOINT_HYPER_PARAMS_TYPE
38 | ~DeepTICA.T_destination
39 | ~DeepTICA.automatic_optimization
40 | ~DeepTICA.call_super_init
41 | ~DeepTICA.current_epoch
42 | ~DeepTICA.device
43 | ~DeepTICA.dtype
44 | ~DeepTICA.dump_patches
45 | ~DeepTICA.example_input_array
46 | ~DeepTICA.fabric
47 | ~DeepTICA.global_rank
48 | ~DeepTICA.global_step
49 | ~DeepTICA.hparams
50 | ~DeepTICA.hparams_initial
51 | ~DeepTICA.local_rank
52 | ~DeepTICA.logger
53 | ~DeepTICA.loggers
54 | ~DeepTICA.n_cvs
55 | ~DeepTICA.on_gpu
56 | ~DeepTICA.optimizer_name
57 | ~DeepTICA.trainer
58 | ~DeepTICA.training
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.MultiTaskCV.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.MultiTaskCV
2 | ========================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: MultiTaskCV
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~MultiTaskCV.__init__
20 | ~MultiTaskCV.training_step
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.RegressionCV.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.RegressionCV
2 | =========================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: RegressionCV
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~RegressionCV.__init__
20 | ~RegressionCV.training_step
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~RegressionCV.BLOCKS
33 | ~RegressionCV.CHECKPOINT_HYPER_PARAMS_KEY
34 | ~RegressionCV.CHECKPOINT_HYPER_PARAMS_NAME
35 | ~RegressionCV.CHECKPOINT_HYPER_PARAMS_TYPE
36 | ~RegressionCV.T_destination
37 | ~RegressionCV.automatic_optimization
38 | ~RegressionCV.call_super_init
39 | ~RegressionCV.current_epoch
40 | ~RegressionCV.device
41 | ~RegressionCV.dtype
42 | ~RegressionCV.dump_patches
43 | ~RegressionCV.example_input_array
44 | ~RegressionCV.fabric
45 | ~RegressionCV.global_rank
46 | ~RegressionCV.global_step
47 | ~RegressionCV.hparams
48 | ~RegressionCV.hparams_initial
49 | ~RegressionCV.local_rank
50 | ~RegressionCV.logger
51 | ~RegressionCV.loggers
52 | ~RegressionCV.n_cvs
53 | ~RegressionCV.on_gpu
54 | ~RegressionCV.optimizer_name
55 | ~RegressionCV.trainer
56 | ~RegressionCV.training
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.cvs.VariationalAutoEncoderCV.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.cvs.VariationalAutoEncoderCV
2 | =====================================
3 |
4 | .. currentmodule:: mlcolvar.cvs
5 |
6 | .. autoclass:: VariationalAutoEncoderCV
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~VariationalAutoEncoderCV.__init__
20 | ~VariationalAutoEncoderCV.encode_decode
21 | ~VariationalAutoEncoderCV.forward_cv
22 | ~VariationalAutoEncoderCV.get_decoder
23 | ~VariationalAutoEncoderCV.training_step
24 |
25 |
26 |
27 |
28 | ..
29 |
30 |
31 | .. rubric:: Attributes
32 |
33 | .. autosummary::
34 |
35 | ~VariationalAutoEncoderCV.BLOCKS
36 | ~VariationalAutoEncoderCV.CHECKPOINT_HYPER_PARAMS_KEY
37 | ~VariationalAutoEncoderCV.CHECKPOINT_HYPER_PARAMS_NAME
38 | ~VariationalAutoEncoderCV.CHECKPOINT_HYPER_PARAMS_TYPE
39 | ~VariationalAutoEncoderCV.T_destination
40 | ~VariationalAutoEncoderCV.automatic_optimization
41 | ~VariationalAutoEncoderCV.call_super_init
42 | ~VariationalAutoEncoderCV.current_epoch
43 | ~VariationalAutoEncoderCV.device
44 | ~VariationalAutoEncoderCV.dtype
45 | ~VariationalAutoEncoderCV.dump_patches
46 | ~VariationalAutoEncoderCV.example_input_array
47 | ~VariationalAutoEncoderCV.fabric
48 | ~VariationalAutoEncoderCV.global_rank
49 | ~VariationalAutoEncoderCV.global_step
50 | ~VariationalAutoEncoderCV.hparams
51 | ~VariationalAutoEncoderCV.hparams_initial
52 | ~VariationalAutoEncoderCV.local_rank
53 | ~VariationalAutoEncoderCV.logger
54 | ~VariationalAutoEncoderCV.loggers
55 | ~VariationalAutoEncoderCV.n_cvs
56 | ~VariationalAutoEncoderCV.on_gpu
57 | ~VariationalAutoEncoderCV.optimizer_name
58 | ~VariationalAutoEncoderCV.trainer
59 | ~VariationalAutoEncoderCV.training
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.data.DictDataset.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.data.DictDataset
2 | =========================
3 |
4 | .. currentmodule:: mlcolvar.data
5 |
6 | .. autoclass:: DictDataset
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~DictDataset.__init__
20 | ~DictDataset.get_stats
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~DictDataset.feature_names
33 | ~DictDataset.keys
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.data.DictLoader.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.data.DictLoader
2 | ========================
3 |
4 | .. currentmodule:: mlcolvar.data
5 |
6 | .. autoclass:: DictLoader
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~DictLoader.__init__
20 | ~DictLoader.get_stats
21 | ~DictLoader.set_dataset_and_batch_size
22 |
23 |
24 |
25 |
26 | ..
27 |
28 |
29 | .. rubric:: Attributes
30 |
31 | .. autosummary::
32 |
33 | ~DictLoader.batch_size
34 | ~DictLoader.dataset
35 | ~DictLoader.dataset_len
36 | ~DictLoader.has_multiple_datasets
37 | ~DictLoader.keys
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.data.DictModule.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.data.DictModule
2 | ========================
3 |
4 | .. currentmodule:: mlcolvar.data
5 |
6 | .. autoclass:: DictModule
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~DictModule.__init__
20 | ~DictModule.predict_dataloader
21 | ~DictModule.setup
22 | ~DictModule.teardown
23 | ~DictModule.test_dataloader
24 | ~DictModule.train_dataloader
25 | ~DictModule.val_dataloader
26 |
27 |
28 |
29 |
30 | ..
31 |
32 |
33 | .. rubric:: Attributes
34 |
35 | .. autosummary::
36 |
37 | ~DictModule.CHECKPOINT_HYPER_PARAMS_KEY
38 | ~DictModule.CHECKPOINT_HYPER_PARAMS_NAME
39 | ~DictModule.CHECKPOINT_HYPER_PARAMS_TYPE
40 | ~DictModule.hparams
41 | ~DictModule.hparams_initial
42 | ~DictModule.name
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.explain.lasso.lasso_classification.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.explain.lasso.lasso\_classification
2 | ============================================
3 |
4 | .. currentmodule:: mlcolvar.explain.lasso
5 |
6 | .. autoclass:: lasso_classification
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.explain.lasso.lasso_regression.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.explain.lasso.lasso\_regression
2 | ========================================
3 |
4 | .. currentmodule:: mlcolvar.explain.lasso
5 |
6 | .. autoclass:: lasso_regression
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.explain.lasso.plot_lasso_classification.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.explain.lasso.plot\_lasso\_classification
2 | ==================================================
3 |
4 | .. currentmodule:: mlcolvar.explain.lasso
5 |
6 | .. autoclass:: plot_lasso_classification
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.explain.lasso.plot_lasso_regression.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.explain.lasso.plot\_lasso\_regression
2 | ==============================================
3 |
4 | .. currentmodule:: mlcolvar.explain.lasso
5 |
6 | .. autoclass:: plot_lasso_regression
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.explain.sensitivity.plot_sensitivity.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.explain.sensitivity.plot\_sensitivity
2 | ==============================================
3 |
4 | .. currentmodule:: mlcolvar.explain.sensitivity
5 |
6 | .. autoclass:: plot_sensitivity
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.explain.sensitivity.sensitivity_analysis.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.explain.sensitivity.sensitivity\_analysis
2 | ==================================================
3 |
4 | .. currentmodule:: mlcolvar.explain.sensitivity
5 |
6 | .. autoclass:: sensitivity_analysis
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.utils.fes.compute_fes.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.utils.fes.compute\_fes
2 | ===============================
3 |
4 | .. currentmodule:: mlcolvar.utils.fes
5 |
6 | .. autoclass:: compute_fes
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.utils.io.create_dataset_from_files.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.utils.io.create\_dataset\_from\_files
2 | ==============================================
3 |
4 | .. currentmodule:: mlcolvar.utils.io
5 |
6 | .. autoclass:: create_dataset_from_files
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.utils.io.load_dataframe.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.utils.io.load\_dataframe
2 | =================================
3 |
4 | .. currentmodule:: mlcolvar.utils.io
5 |
6 | .. autoclass:: load_dataframe
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.utils.plot.plot_features_distribution.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.utils.plot.plot\_features\_distribution
2 | ================================================
3 |
4 | .. currentmodule:: mlcolvar.utils.plot
5 |
6 | .. autoclass:: plot_features_distribution
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.utils.plot.plot_metrics.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.utils.plot.plot\_metrics
2 | =================================
3 |
4 | .. currentmodule:: mlcolvar.utils.plot
5 |
6 | .. autoclass:: plot_metrics
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.utils.timelagged.create_timelagged_dataset.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.utils.timelagged.create\_timelagged\_dataset
2 | =====================================================
3 |
4 | .. currentmodule:: mlcolvar.utils.timelagged
5 |
6 | .. autoclass:: create_timelagged_dataset
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 |
16 |
17 |
18 | ..
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/docs/autosummary/mlcolvar.utils.trainer.MetricsCallback.rst:
--------------------------------------------------------------------------------
1 | mlcolvar.utils.trainer.MetricsCallback
2 | ======================================
3 |
4 | .. currentmodule:: mlcolvar.utils.trainer
5 |
6 | .. autoclass:: MetricsCallback
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: Module,LightningModule
10 |
11 |
12 | .. automethod:: __init__
13 |
14 |
15 | .. rubric:: Methods
16 |
17 | .. autosummary::
18 |
19 | ~MetricsCallback.__init__
20 | ~MetricsCallback.on_train_epoch_end
21 |
22 |
23 |
24 |
25 | ..
26 |
27 |
28 | .. rubric:: Attributes
29 |
30 | .. autosummary::
31 |
32 | ~MetricsCallback.state_key
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/docs/examples.rst:
--------------------------------------------------------------------------------
1 | Examples
2 | ========
3 |
4 | .. rubric:: JCP paper experiments
5 |
6 | Experiments on the modified Muller-Brown potential (3 states) reported in the mlcolvar JCP paper (https://doi.org/10.1063/5.0156343), describing different learning scenarios.
7 |
8 | .. toctree::
9 | :maxdepth: 2
10 |
11 | examples_experiments
12 |
13 | .. rubric:: Literature examples
14 |
15 | Examples of usage of data-driven CVs or other tools from the mlcolvar library in the literature.
16 |
17 | .. toctree::
18 | :maxdepth: 1
19 |
20 | notebooks/examples/ex_DeepLDA.ipynb
21 | notebooks/examples/ex_DeepTICA.ipynb
22 | notebooks/examples/ex_TPI-DeepTDA.ipynb
23 | notebooks/examples/ex_stateinterpreter.ipynb
24 | notebooks/examples/ex_committor.ipynb
--------------------------------------------------------------------------------
/docs/examples_experiments.rst:
--------------------------------------------------------------------------------
1 | Modified Muller-Brown
2 | =====================
3 |
4 | Experiments on the modified Muller-Brown potential (3 states) reported in the mlcolvar' JCP paper (https://doi.org/10.1063/5.0156343), describing different learning scenarios.
5 |
6 | .. toctree::
7 | :maxdepth: 1
8 |
9 | notebooks/paper_experiments/paper_1_unsupervised.ipynb
10 | notebooks/paper_experiments/paper_2_supervised.ipynb
11 | notebooks/paper_experiments/paper_3_timelagged.ipynb
12 | notebooks/paper_experiments/paper_4_multitask.ipynb
--------------------------------------------------------------------------------
/docs/images/logo_name_black_big.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/images/logo_name_black_big.png
--------------------------------------------------------------------------------
/docs/images/logo_name_black_small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/images/logo_name_black_small.png
--------------------------------------------------------------------------------
/docs/images/logo_name_white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/images/logo_name_white.png
--------------------------------------------------------------------------------
/docs/images/logo_plain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/images/logo_plain.png
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. mlcolvar documentation master file, created by
2 | sphinx-quickstart on Thu Mar 15 13:55:56 2018.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | mlcolvar: Machine Learning Collective Variables
7 | ===============================================
8 |
9 | .. image:: https://img.shields.io/badge/Github-mlcolvar-brightgreen
10 | :target: https://github.com/luigibonati/mlcolvar
11 |
12 | .. image:: https://img.shields.io/badge/doi-10.1063/5.0156343-blue
13 | :target: https://doi.org/10.1063/5.0156343
14 |
15 | .. image:: https://img.shields.io/badge/arXiv:2305.19980-red
16 | :target: https://arxiv.org/abs/2305.19980
17 |
18 | ``mlcolvar`` is a Python library aimed to help design data-driven collective-variables (CVs) for enhanced sampling simulations. The key features are:
19 |
20 | 1. A unified framework to help test and use (some) of the CVs proposed in the literature.
21 | 2. A modular interface to simplify the development of new approaches and the contamination between them.
22 | 3. A streamlined distribution of CVs in the context of advanced sampling.
23 |
24 | The library is built upon the `PyTorch `_ ML library as well as the `Lightning `_ high-level framework.
25 |
26 | Some of the **CVs** which are implemented, organized by learning setting:
27 |
28 | * Unsupervised: PCA, (Variational) AutoEncoders [`1 `_, `2 `_ ]
29 | * Supervised: LDA [`3 <(http://dx.doi.org/10.1021/acs.jpclett.8b00733>`_], DeepLDA [`4 <(http://dx.doi.org/2010.1021/acs.jpclett.0c00535>`_], DeepTDA [`5 <(http://dx.doi.org/%2010.1021/acs.jpclett.1c02317>`_]
30 | * Time-informed: TICA [`6 <(http://dx.doi.org/%2010.1063/1.4811489>`_], DeepTICA/SRVs [`7 <(http://dx.doi.org/10.1073/pnas.2113533118>`_, `8 <(http://dx.doi.org/%2010.1063/1.5092521>`_ ], VDE [`9 <(http://dx.doi.org/10.1103/PhysRevE.97.062412>`_]
31 | * Committor-based [`8 <(https://dx.doi.org/10.48550/arXiv.2410.17029)>`_]
32 |
33 | And many others can be implemented based on the building blocks or with simple modifications. Check out the documentation and the examples section!
34 |
35 | The **workflow** for training and deploying CVs is illustrated in the figure:
36 |
37 | .. image:: notebooks/tutorials/images/graphical_overview_mlcvs.png
38 | :width: 800
39 | :alt: Example workflow
40 |
41 | The resulting CVs can be then deployed for enhancing sampling with the `PLUMED `_ package via the `Pytorch `_ interface, , available since version 2.9.
42 |
43 | Table of contents
44 | -----------------
45 |
46 | .. toctree::
47 | :maxdepth: 2
48 | :caption: Contents:
49 |
50 | installation
51 | api
52 | tutorials
53 | examples
54 | plumed
55 | contributing
56 |
57 | Indices and tables
58 | ------------------
59 |
60 | * :ref:`genindex`
61 | * :ref:`modindex`
62 | * :ref:`search`
63 |
--------------------------------------------------------------------------------
/docs/installation.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 | The recommended way to install the package is using ``pip`` in a dedicated `virtual environment `_.
5 |
6 | .. code-block:: bash
7 |
8 | # Activate here your Python virtual environment (e.g., with venv or conda).
9 | pip install mlcolvar
10 |
11 |
12 | Download & Install from source
13 | ------------------------------
14 |
15 | You can download the source code by cloning the repository locally using ``git``
16 |
17 | .. code-block:: bash
18 |
19 | git clone https://github.com/luigibonati/mlcolvar.git
20 |
21 | Alternatively, you can download a ``tar.gz`` or ``zip`` of the `latest release `_
22 | or a specific release from the `releases page `_.
23 |
24 | To install `mlcolvar` from source, you will need an `environment `_
25 | with the following **requirements**:
26 |
27 | * ``python >= 3.8``
28 | * ``numpy``
29 | * ``pytorch >= 1.11``
30 | * ``lightning > 1.18``
31 |
32 | The following packages are optional requirements, but they are recommended as they allow to use all of the helper functions
33 | contained in the utils module.
34 |
35 | * ``pandas`` (i/o)
36 | * ``matplolib`` (plot)
37 | * ``KDEpy`` or ``scikit-learn`` (compute free energy profiles via KDE)
38 | * ``tqdm`` (monitor training progress)
39 |
40 | Finally, you can install the package by entering the downloaded (and unzipped) directory and executing
41 |
42 | .. code-block:: bash
43 |
44 | # Activate here your Python virtual environment (e.g., with venv or conda).
45 | cd mlcolvar
46 | pip install .
47 |
48 | If you are planning to `modify the code `_, we recommend you install in editable mode to have your
49 | modifications automatically installed
50 |
51 | .. code-block:: bash
52 |
53 | pip install -e .
54 |
55 | Furthermore, if you want to check that the library is working properly, you can perform the regtests by installing the optional dependencies and then executing `pytest` in the mlcolvar folder.
56 |
57 | .. code-block:: bash
58 |
59 | pip install mlcolvar[test]
60 | pytest
61 |
62 |
63 | Create a virtual environment
64 | ----------------------------
65 |
66 | To create a virtual environment you can use either ``venv`` (which is supplied with Python 3) or if you prefer ``conda``.
67 |
68 | With ``venv``, you can create a new virtual environment with
69 |
70 | .. code-block:: bash
71 |
72 | python -m venv path/to/created/environment/folder
73 |
74 | Then you can activate the environment to install packages in it.
75 |
76 | .. code-block:: bash
77 |
78 | source path/to/created/environment/folder/bin/activate
79 |
80 | Alternatively, if you are using ``conda`` you can create and activate the environment using
81 |
82 | .. code-block:: bash
83 |
84 | conda create --name myenvname
85 | conda activate myenvname
86 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=mlcolvar
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/docs/notebooks/.gitignore:
--------------------------------------------------------------------------------
1 | test*
2 |
--------------------------------------------------------------------------------
/docs/notebooks/examples/.gitignore:
--------------------------------------------------------------------------------
1 | *.pt
2 |
--------------------------------------------------------------------------------
/docs/notebooks/examples/images/ala2-deeplda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/examples/images/ala2-deeplda.png
--------------------------------------------------------------------------------
/docs/notebooks/examples/images/alanine.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/examples/images/alanine.png
--------------------------------------------------------------------------------
/docs/notebooks/examples/images/aldol-P.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/examples/images/aldol-P.jpg
--------------------------------------------------------------------------------
/docs/notebooks/examples/images/aldol-R.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/examples/images/aldol-R.jpg
--------------------------------------------------------------------------------
/docs/notebooks/examples/images/aldol-deeplda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/examples/images/aldol-deeplda.png
--------------------------------------------------------------------------------
/docs/notebooks/examples/images/chignolin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/examples/images/chignolin.png
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/README.md:
--------------------------------------------------------------------------------
1 | # mlcolvar paper experiments
2 |
3 | Experiments files from 'A unified framework for machine learning collective variables for enhanced sampling simulations: mlcolvar'.
4 |
5 | Luigi Bonati, Enrico Trizio, Andrea Rizzi and Michele Parrinello
6 |
7 | [arXiv preprint](https://arxiv.org/abs/2305.19980)
8 |
9 | Here you can find all the files needed to reproduce the experiments reported in the paper on a three states toy model in two dimension.
10 | To run the simulations you need PLUMED with the pytorch and ves modules active.
11 |
12 | #### Contents:
13 | - Input files for the training of the models (input_data folder)
14 | - Input files for reproducing the simulations in the paper (results folder)
15 | - Jupyter notebooks for the training of the models and analysis (main_folder)
16 | - Trained models used in the paper (results_folder)
17 |
18 | #### Colab links for Jupyter notebooks
19 | - [Notebook unsupervised](https://colab.research.google.com/github/luigibonati/mlcolvar/blob/main/docs/notebooks/paper_experiments/paper_1_unsupervised.ipynb)
20 | - [Notebook supervised](https://colab.research.google.com/github/luigibonati/mlcolvar/blob/main/docs/notebooks/paper_experiments/paper_2_supervised.ipynb)
21 | - [Notebook timelagged](https://colab.research.google.com/github/luigibonati/mlcolvar/blob/main/docs/notebooks/paper_experiments/paper_3_timelagged.ipynb)
22 | - [Notebook multitask](https://colab.research.google.com/github/luigibonati/mlcolvar/blob/main/docs/notebooks/paper_experiments/paper_4_multitask.ipynb)
23 |
24 | #### mlcolvar library
25 | - [Documentation](https://mlcolvar.readthedocs.io)
26 | - [GitHub](https://github.com/luigibonati/mlcolvar)
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-0/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.25,1.75
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-0/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-0/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-1/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position 0.,0.5
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-1/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-1/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-2/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position 1.,0.
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-2/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/supervised/state-2/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/timelagged/opes-y/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 4000000
3 | tstep 0.005
4 | temperature 1.0
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.75,1.5
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/timelagged/opes-y/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/timelagged/opes-y/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | opes: OPES_METAD ARG=p.y BARRIER=15 PACE=200
8 |
9 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
10 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
11 |
12 | PRINT STRIDE=200 ARG=* FILE=COLVAR
13 |
14 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/unsupervised/unbiased/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.25,1.75
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/unsupervised/unbiased/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/input_data/unsupervised/unbiased/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/paper_experiments.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/paper_experiments.zip
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 1000000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.7,1.4
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_multitask.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16 STATE_WSTRIDE=10000 STATE_WFILE=State.data
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_model.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_model.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_0.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_1.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_1.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_2.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_2.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_3.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_3.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_4.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_4.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_5.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_5.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_6.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_6.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_7.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_7.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_8.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/decoder_model_8.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_0.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_1.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_1.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_2.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_2.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_3.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_3.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_4.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_4.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_5.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_5.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_6.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_6.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_7.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_7.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_8.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/decoder_stats/model_multitask_8.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/multitask/model_multitask.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/multitask/model_multitask.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/supervised/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/supervised/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 1000000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.7,1.4
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/supervised/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_deepTDA.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16 STATE_WSTRIDE=10000 STATE_WFILE=State.data
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/supervised/model_deepTDA.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/supervised/model_deepTDA.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/timelagged/deepTICA/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/timelagged/deepTICA/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 4000000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.7,1.4
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/timelagged/deepTICA/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | tica: PYTORCH_MODEL FILE=../model_deepTICA.pt ARG=p.x,p.y
15 |
16 | # apply static bias from previous sim
17 | static: OPES_METAD ARG=p.y ...
18 | RESTART=YES
19 | STATE_RFILE=../../../../input_data/timelagged/opes-y/State.data
20 | BARRIER=16
21 | PACE=10000000
22 | ...
23 |
24 | lwall_x: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
25 | uwall_x: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
26 | lwall_y: LOWER_WALLS ARG=p.y KAPPA=1000 AT=-0.4
27 | uwall_y: UPPER_WALLS ARG=p.y KAPPA=1000 AT=+2.0
28 |
29 | # apply bias
30 | opes: OPES_METAD ARG=tica.node-0 PACE=200 BARRIER=16
31 |
32 | PRINT FMT=%g STRIDE=200 FILE=COLVAR ARG=p.x,p.y,tica.*,opes.*,static.*
33 |
34 | ENDPLUMED
35 |
36 |
37 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/timelagged/deepTICA/model_deepTICA.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/timelagged/deepTICA/model_deepTICA.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_0/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_0/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.7,1.4
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_0/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_0.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_0/model_autoencoder_0.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_0/model_autoencoder_0.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_1/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_1/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.642997,1.38177
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_1/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_1.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_1/model_autoencoder_1.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_1/model_autoencoder_1.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_10/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_10/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position 0.213335,0.543154
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_10/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_10.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_10/model_autoencoder_10.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_10/model_autoencoder_10.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_11/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_11/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.511004,1.52488
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_11/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_11.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_11/model_autoencoder_11.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_11/model_autoencoder_11.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_12/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_12/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position 0.29939,0.532049
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_12/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_12.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_12/model_autoencoder_12.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_12/model_autoencoder_12.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_13/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_13/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position 0.998183,0.0598882
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_13/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_13.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_13/model_autoencoder_13.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_13/model_autoencoder_13.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_14/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_14/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.535068,1.48956
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_14/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_14.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_14/model_autoencoder_14.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_14/model_autoencoder_14.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_15/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_15/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.530235,1.49656
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_15/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_15.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_15/model_autoencoder_15.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_15/model_autoencoder_15.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_2/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_2/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.598636,1.42363
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_2/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_2.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_2/model_autoencoder_2.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_2/model_autoencoder_2.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_3/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_3/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position 0.127404,0.539671
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_3/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_3.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_3/model_autoencoder_3.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_3/model_autoencoder_3.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_4/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_4/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.536925,1.48531
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_4/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_4.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_4/model_autoencoder_4.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_4/model_autoencoder_4.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_5/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_5/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.59128,1.43247
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_5/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_5.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_5/model_autoencoder_5.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_5/model_autoencoder_5.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_6/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_6/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position 0.15442,0.538291
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_6/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_6.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_6/model_autoencoder_6.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_6/model_autoencoder_6.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_7/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_7/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.549627,1.47595
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_7/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_7.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_7/model_autoencoder_7.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_7/model_autoencoder_7.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_8/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_8/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position 0.223085,0.55137
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_8/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_8.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_8/model_autoencoder_8.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_8/model_autoencoder_8.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_9/data/input_md-potential.dat:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 1
5 | #! SET shape_dim1 2
6 | #! SET shape_dim2 2
7 | 0 0 1.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_9/data/input_md.dat:
--------------------------------------------------------------------------------
1 | nstep 100000
2 | tstep 0.005
3 | temperature 1.0
4 | friction 10.0
5 | random_seed 42
6 | plumed_input plumed.dat
7 | dimension 2
8 | replicas 1
9 | basis_functions_1 BF_POWERS ORDER=1 MINIMUM=-4.0 MAXIMUM=+3.0
10 | basis_functions_2 BF_POWERS ORDER=1 MINIMUM=-1.0 MAXIMUM=+2.5
11 | input_coeffs input_md-potential.dat
12 | initial_position -0.586332,1.43607
13 | output_coeffs /dev/null
14 | output_potential /dev/null
15 | output_potential_grid 10
16 | output_histogram /dev/null
17 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_9/data/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 |
4 | p: POSITION ATOM=1
5 |
6 | # define modified Muller Brown potential
7 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO ...
8 | FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
9 | ...
10 |
11 | pot: BIASVALUE ARG=ene
12 |
13 | # load deep cv pytorch model
14 | cv: PYTORCH_MODEL FILE=../model_autoencoder_9.pt ARG=p.x,p.y
15 |
16 | # apply bias
17 | opes: OPES_METAD ARG=cv.node-0 PACE=500 BARRIER=16
18 |
19 | PRINT FMT=%g STRIDE=100 FILE=COLVAR ARG=p.x,p.y,cv.*,opes.*
20 |
21 | ENDPLUMED
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/iter_9/model_autoencoder_9.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/paper_experiments/results/unsupervised/iter_9/model_autoencoder_9.pt
--------------------------------------------------------------------------------
/docs/notebooks/paper_experiments/results/unsupervised/unbiased:
--------------------------------------------------------------------------------
1 | ../../../tutorials/data/muller-brown-3states/unbiased/state-0/
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/README.md:
--------------------------------------------------------------------------------
1 | This repository contains simulations data for 2D toy models which are used in the tutorials.
2 |
3 | - `muller-brown` --> Muller-Brown potential
4 | - `muller-brown-3states` --> Modified Muller-Brown potential to have 3 metastable states
5 |
6 | Each directory contains a notebook `run-md-plumed.ipynb` which has been used to run Langevin dynamics using PLUMED.
7 | Both unbiased and biased simulations have been performed to provide a comprehensive set of simulation data.
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/biased/opes-y/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 4000000
3 | tstep 0.005
4 | temperature 1.0
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.75,1.5
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/biased/opes-y/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/biased/opes-y/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | opes: OPES_METAD ARG=p.y BARRIER=15 PACE=200
8 |
9 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
10 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
11 |
12 | PRINT STRIDE=200 ARG=* FILE=COLVAR
13 |
14 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/high-temp/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 2000000
3 | tstep 0.005
4 | temperature 2.5
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position 0.5,0.0
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/high-temp/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/high-temp/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=400 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-0/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.25,1.75
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-0/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-0/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-1/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position 0.,0.5
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-1/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-1/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-2/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position 1.,0.
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-2/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown-3states/unbiased/state-2/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-280*exp(-15*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-170*exp(-1*(x-0.2)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.2
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/explore-x/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 4000000
3 | tstep 0.005
4 | temperature 1.0
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.75,1.5
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/explore-x/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/explore-x/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-200*exp(-1*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-100*exp(-1*(x-0)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | opes: OPES_METAD_EXPLORE ARG=p.x BARRIER=20 PACE=200
8 |
9 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
10 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.0
11 |
12 | PRINT STRIDE=400 ARG=* FILE=COLVAR
13 |
14 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/opes-x/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 40000000
3 | tstep 0.005
4 | temperature 1.0
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.75,1.5
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/opes-x/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/opes-x/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-200*exp(-1*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-100*exp(-1*(x-0)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | opes: OPES_METAD ARG=p.x BARRIER=20 PACE=200
8 |
9 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
10 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.0
11 |
12 | PRINT STRIDE=4000 ARG=* FILE=COLVAR
13 |
14 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/opes-y/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 2000000
3 | tstep 0.005
4 | temperature 1.0
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.75,1.5
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/opes-y/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/biased/opes-y/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-200*exp(-1*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-100*exp(-1*(x-0)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | opes: OPES_METAD ARG=p.y BARRIER=10 PACE=200
8 |
9 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
10 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.0
11 |
12 | PRINT STRIDE=200 ARG=* FILE=COLVAR
13 |
14 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/high-temp/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 1000000
3 | tstep 0.005
4 | temperature 2.5
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position 0.5,0.0
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/high-temp/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/high-temp/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-200*exp(-1*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-100*exp(-1*(x-0)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.0
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/state-0/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position -0.25,1.75
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/state-0/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/state-0/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-200*exp(-1*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-100*exp(-1*(x-0)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.0
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/state-1/md_input:
--------------------------------------------------------------------------------
1 |
2 | nstep 400000
3 | tstep 0.005
4 | temperature 1.
5 | friction 10.0
6 | random_seed 1
7 | plumed_input plumed.dat
8 | dimension 2
9 | replicas 1
10 | basis_functions_1 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
11 | basis_functions_2 BF_POWERS ORDER=2 MINIMUM=-4.0 MAXIMUM=+4.0
12 | input_coeffs md_potential
13 | initial_position 0.5,0.0
14 | output_potential out_potential.data
15 | output_potential_grid 100
16 | output_histogram histogram.data
17 |
18 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/state-1/md_potential:
--------------------------------------------------------------------------------
1 | #! FIELDS idx_dim1 idx_dim2 pot.coeffs index description
2 | #! SET type LinearBasisSet
3 | #! SET ndimensions 2
4 | #! SET ncoeffs_total 9
5 | #! SET shape_dim1 3
6 | #! SET shape_dim2 3
7 | 0 0 0.0000000000000000e+00 0 1*1
8 | #!-------------------
9 |
10 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/data/muller-brown/unbiased/state-1/plumed.dat:
--------------------------------------------------------------------------------
1 | # vim:ft=plumed
2 | UNITS NATURAL
3 | p: POSITION ATOM=1
4 | ene: CUSTOM ARG=p.x,p.y PERIODIC=NO FUNC=0.15*(146.7-200*exp(-1*(x-1)^2+0*(x-1)*(y-0)-10*(y-0)^2)-100*exp(-1*(x-0)^2+0*(x-0)*(y-0.5)-10*(y-0.5)^2)-170*exp(-6.5*(x+0.5)^2+11*(x+0.5)*(y-1.5)-6.5*(y-1.5)^2)+15*exp(0.7*(x+1)^2+0.6*(x+1)*(y-1)+0.7*(y-1)^2))
5 | pot: BIASVALUE ARG=ene
6 |
7 | lwall: LOWER_WALLS ARG=p.x KAPPA=1000 AT=-1.3
8 | uwall: UPPER_WALLS ARG=p.x KAPPA=1000 AT=+1.0
9 |
10 | PRINT STRIDE=200 ARG=* FILE=COLVAR
11 |
12 |
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/OPES_VK.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/OPES_VK.png
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/TPI_deepTDA.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/TPI_deepTDA.png
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/committor_cv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/committor_cv.png
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/deepTDAscheme.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/deepTDAscheme.png
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/deeplda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/deeplda.png
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/deeptica.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/deeptica.png
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/graphical_overview_mlcvs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/graphical_overview_mlcvs.png
--------------------------------------------------------------------------------
/docs/notebooks/tutorials/images/lda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luigibonati/mlcolvar/4909d2f78a5847a713b0c6078ad98ad87d003ea1/docs/notebooks/tutorials/images/lda.png
--------------------------------------------------------------------------------
/docs/plumed.rst:
--------------------------------------------------------------------------------
1 | PLUMED module
2 | =============
3 |
4 | **Deploying CVs in PLUMED2**
5 |
6 | In order to use the ML CVs to enhance the sampling we can export them into the PLUMED2 open-source plug-in for molecular simulations.
7 | To do so, we will compile our model using the just in time compiler (``torch.jit``). This creates a file which can be execute outside Python, e.g. in a standalone C++ programs.
8 |
9 | In this way we can load the CVs in PLUMED by using PyTorch C++ APIs (LibTorch). We have developed an interface (`PYTORCH_MODEL `_) which is now part of the official PLUMED2 software as an additional module, starting from version 2.9. To configure PLUMED with Libtorch please have a look at the PLUMED `documentation `_.
10 |
--------------------------------------------------------------------------------
/docs/requirements.yaml:
--------------------------------------------------------------------------------
1 | name: docs
2 | channels:
3 |
4 | - conda-forge
5 | - pytorch
6 | - defaults
7 | - pyg
8 |
9 | dependencies:
10 |
11 | # Base depends
12 | - python >=3.8,<=3.10
13 | - pip
14 |
15 | # Core dependencies
16 | - numpy<2
17 | - pandas
18 | - pytorch
19 | - pydantic<2 # workaround to avoid clashes with lightning
20 | - lightning
21 |
22 | # utils
23 | - matplotlib
24 | - nbsphinx
25 | - ipython
26 | - ipykernel
27 | - scikit-learn
28 | - scipy
29 |
30 | # Pip-only installs
31 | - pip:
32 | - sphinx-copybutton
33 | - furo
34 | - KDEpy
35 |
--------------------------------------------------------------------------------
/docs/tutorials.rst:
--------------------------------------------------------------------------------
1 | Tutorials
2 | =========
3 |
4 | .. rubric:: Getting Started
5 |
6 | These tutorials describe the basics of `mlcolvar` through an example workflow.
7 | In addition, instructions on how to create datasets and customize the training are also given.
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 |
12 | tutorials_overview
13 |
14 | .. rubric:: Collective variables
15 |
16 | The following tutorials cover (some of) the CVs implemented in the library.
17 |
18 | .. toctree::
19 | :maxdepth: 2
20 |
21 | tutorials_cvs
22 |
23 | .. rubric:: Customizing CVs
24 |
25 | How to customize the CVs, either using the multitask framework or by creating new ones based on the existing ones. Moreover, we also discuss how to add pre or post processing layers.
26 |
27 | .. toctree::
28 | :maxdepth: 2
29 |
30 | tutorials_advanced
31 |
32 | .. rubric:: Interpreting CVs
33 |
34 | Some examples of how to interpret the CVs, using sensitivity analysis or sparse linear models.
35 |
36 | .. toctree::
37 | :maxdepth: 2
38 |
39 | tutorials_explain
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/docs/tutorials_advanced.rst:
--------------------------------------------------------------------------------
1 | Customizing CVs
2 | ===============
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | notebooks/tutorials/adv_multitask.ipynb
8 | notebooks/tutorials/adv_newcv_scratch.ipynb
9 | notebooks/tutorials/adv_newcv_subclass.ipynb
10 | notebooks/tutorials/adv_preprocessing.ipynb
11 | notebooks/tutorials/adv_transforms.ipynb
12 |
--------------------------------------------------------------------------------
/docs/tutorials_cvs.rst:
--------------------------------------------------------------------------------
1 | Methods for CVs optimization
2 | ============================
3 |
4 | .. toctree::
5 | :caption: Unsupervised setting
6 | :maxdepth: 1
7 |
8 | notebooks/tutorials/cvs_Autoencoder.ipynb
9 |
10 | .. toctree::
11 | :caption: Supervised setting
12 | :maxdepth: 1
13 |
14 | notebooks/tutorials/cvs_DeepLDA.ipynb
15 | notebooks/tutorials/cvs_DeepTDA.ipynb
16 |
17 | .. toctree::
18 | :caption: Time-informed setting
19 | :maxdepth: 1
20 |
21 | notebooks/tutorials/cvs_DeepTICA.ipynb
22 |
23 | .. toctree::
24 | :caption: Committor-based setting
25 | :maxdepth: 1
26 |
27 | notebooks/tutorials/cvs_committor.ipynb
--------------------------------------------------------------------------------
/docs/tutorials_explain.rst:
--------------------------------------------------------------------------------
1 | Interpreting CVs
2 | ================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | notebooks/tutorials/expl_features_relevances.ipynb
8 | notebooks/tutorials/expl_lasso.ipynb
--------------------------------------------------------------------------------
/docs/tutorials_overview.rst:
--------------------------------------------------------------------------------
1 | Getting Started
2 | ===============
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | notebooks/tutorials/intro_1_training.ipynb
8 | notebooks/tutorials/intro_2_data.ipynb
9 | notebooks/tutorials/intro_3_loss_optim.ipynb
10 |
--------------------------------------------------------------------------------
/mlcolvar/.gitignore:
--------------------------------------------------------------------------------
1 | *ipynb
2 | logs/
--------------------------------------------------------------------------------
/mlcolvar/__init__.py:
--------------------------------------------------------------------------------
1 | """Machine learning collective variables"""
2 |
3 | __all__ = ["loss", "nn", "transform", "stats"]
4 |
5 | # Add imports here
6 | from .core import *
7 | from .data import *
8 | from .cvs import *
9 |
10 | from ._version import __version__
11 |
--------------------------------------------------------------------------------
/mlcolvar/core/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["loss", "nn", "transform", "stats"]
2 |
3 | from .loss import *
4 | from .nn import *
5 | from .transform import *
6 | from .stats import *
7 |
--------------------------------------------------------------------------------
/mlcolvar/core/loss/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "MSELoss",
3 | "mse_loss",
4 | "TDALoss",
5 | "tda_loss",
6 | "ELBOGaussiansLoss",
7 | "elbo_gaussians_loss",
8 | "ReduceEigenvaluesLoss",
9 | "reduce_eigenvalues_loss",
10 | "AutocorrelationLoss",
11 | "autocorrelation_loss",
12 | "FisherDiscriminantLoss",
13 | "fisher_discriminant_loss",
14 | "CommittorLoss",
15 | "committor_loss",
16 | "SmartDerivatives",
17 | "compute_descriptors_derivatives"
18 | ]
19 |
20 | from .mse import MSELoss, mse_loss
21 | from .tda_loss import TDALoss, tda_loss
22 | from .eigvals import ReduceEigenvaluesLoss, reduce_eigenvalues_loss
23 | from .elbo import ELBOGaussiansLoss, elbo_gaussians_loss
24 | from .autocorrelation import AutocorrelationLoss, autocorrelation_loss
25 | from .fisher import FisherDiscriminantLoss, fisher_discriminant_loss
26 | from .committor_loss import CommittorLoss, committor_loss, SmartDerivatives, compute_descriptors_derivatives
27 |
--------------------------------------------------------------------------------
/mlcolvar/core/loss/mse.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # =============================================================================
4 | # MODULE DOCSTRING
5 | # =============================================================================
6 |
7 | """
8 | (Weighted) Mean Squared Error (MSE) loss function.
9 | """
10 |
11 | __all__ = ["MSELoss", "mse_loss"]
12 |
13 |
14 | # =============================================================================
15 | # GLOBAL IMPORTS
16 | # =============================================================================
17 |
18 | from typing import Optional
19 |
20 | import torch
21 |
22 |
23 | # =============================================================================
24 | # LOSS FUNCTIONS
25 | # =============================================================================
26 |
27 |
28 | class MSELoss(torch.nn.Module):
29 | """(Weighted) Mean Square Error"""
30 |
31 | def forward(
32 | self,
33 | input: torch.Tensor,
34 | target: torch.Tensor,
35 | weights: Optional[torch.Tensor] = None,
36 | ) -> torch.Tensor:
37 | """Compute the value of the loss function."""
38 | return mse_loss(input, target, weights)
39 |
40 |
41 | def mse_loss(
42 | input: torch.Tensor, target: torch.Tensor, weights: Optional[torch.Tensor] = None
43 | ) -> torch.Tensor:
44 | """(Weighted) Mean Square Error
45 |
46 | Parameters
47 | ----------
48 | input : torch.Tensor
49 | prediction
50 | target : torch.Tensor
51 | reference
52 | weights : torch.Tensor, optional
53 | sample weights, by default None
54 |
55 | Returns
56 | -------
57 | loss: torch.Tensor
58 | loss function
59 | """
60 | # reshape in the correct format (batch, size)
61 | if input.ndim == 1:
62 | input = input.unsqueeze(1)
63 | if target.ndim == 1:
64 | target = target.unsqueeze(1)
65 | # take the different
66 | diff = input - target
67 | # weight them
68 | if weights is not None:
69 | if weights.ndim == 1:
70 | weights = weights.unsqueeze(1)
71 | loss = (diff * weights).square().mean()
72 | else:
73 | loss = diff.square().mean()
74 | return loss
75 |
--------------------------------------------------------------------------------
/mlcolvar/core/nn/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["FeedForward"]
2 |
3 | from .feedforward import *
4 |
--------------------------------------------------------------------------------
/mlcolvar/core/nn/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import math
4 |
5 |
6 | class Shifted_Softplus(torch.nn.Softplus):
7 | """Element-wise softplus function shifted as to pass from the origin."""
8 |
9 | def __init__(self, beta=1, threshold=20):
10 | super(Shifted_Softplus, self).__init__(beta, threshold)
11 |
12 | def forward(self, input):
13 | sp0 = F.softplus(torch.zeros(1), self.beta, self.threshold).item()
14 | return F.softplus(input, self.beta, self.threshold) - sp0
15 |
16 | class Custom_Sigmoid(torch.nn.Module):
17 | def __init__(self, p=3):
18 | super(Custom_Sigmoid, self).__init__()
19 | self.p = p
20 |
21 | def forward(self, input):
22 | return 1 / (1 + torch.exp(-self.p*(input)))
23 |
24 |
25 | def get_activation(activation: str):
26 | """Return activation module given string."""
27 | activ = None
28 | if activation == "relu":
29 | activ = torch.nn.ReLU(True)
30 | elif activation == "elu":
31 | activ = torch.nn.ELU(True)
32 | elif activation == "tanh":
33 | activ = torch.nn.Tanh()
34 | elif activation == "softplus":
35 | activ = torch.nn.Softplus()
36 | elif activation == "shifted_softplus":
37 | activ = Shifted_Softplus()
38 | elif activation == "custom_sigmoid":
39 | activ = Custom_Sigmoid()
40 | elif activation == "linear":
41 | print("WARNING: no activation selected")
42 | elif activation is None:
43 | pass
44 | else:
45 | raise ValueError(
46 | f"Unknown activation: {activation}. options: 'relu','elu','tanh','softplus','shifted_softplus','linear'. "
47 | )
48 | return activ
49 |
50 |
51 | def parse_nn_options(options: str, n_layers: int, last_layer_activation: bool):
52 | """Parse args per layer of the NN.
53 |
54 | If a single value is given, repeat options to all layers but for the output one,
55 | unless ``last_layer_activation is True``, in which case the option is repeated
56 | also for the output layer.
57 | """
58 | # If an iterable is given cheeck that its length matches the number of NN layers
59 | if hasattr(options, "__iter__") and not isinstance(options, str):
60 | if len(options) != n_layers:
61 | raise ValueError(
62 | f"Length of options: {options} ({len(options)} should be equal to number of layers ({n_layers}))."
63 | )
64 | options_list = options
65 | # if a single value is given, repeat options to all layers but for the output one
66 | else:
67 | if last_layer_activation:
68 | options_list = [options for _ in range(n_layers)]
69 | else:
70 | options_list = [options for _ in range(n_layers - 1)]
71 | options_list.append(None)
72 |
73 | return options_list
74 |
--------------------------------------------------------------------------------
/mlcolvar/core/stats/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["Stats", "PCA", "LDA", "TICA"]
2 |
3 | from .stats import *
4 | from .pca import *
5 | from .lda import *
6 | from .tica import *
7 |
--------------------------------------------------------------------------------
/mlcolvar/core/stats/stats.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | __all__ = ["Stats"]
4 |
5 |
6 | class Stats(torch.nn.Module):
7 | """
8 | Base stats class.
9 | To implement a new stats override the compute and forward methods.
10 | The parameters of the stats should be set either in the initialization or via the setup_from_datamodule function.
11 | """
12 |
13 | def compute(self, X: torch.Tensor):
14 | """
15 | Compute the parameters of the estimator
16 | """
17 | raise NotImplementedError
18 |
19 | def forward(self, X: torch.Tensor):
20 | """
21 | Apply estimator
22 | """
23 | raise NotImplementedError
24 |
25 | def teardown(self):
26 | pass
27 |
--------------------------------------------------------------------------------
/mlcolvar/core/transform/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["Transform","Normalization","Statistics","SwitchingFunctions","MultipleDescriptors","PairwiseDistances","EigsAdjMat","ContinuousHistogram","Inverse","TorsionalAngle","SequentialTransform"]
2 |
3 | from .transform import *
4 | from .utils import *
5 | from .tools import *
6 | from .descriptors import *
7 |
--------------------------------------------------------------------------------
/mlcolvar/core/transform/descriptors/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["MultipleDescriptors", "CoordinationNumbers", "EigsAdjMat", "PairwiseDistances", "TorsionalAngle"]
2 |
3 | from .coordination_numbers import *
4 | from .eigs_adjacency_matrix import *
5 | from .pairwise_distances import *
6 | from .torsional_angle import *
7 | from .multiple_descriptors import *
--------------------------------------------------------------------------------
/mlcolvar/core/transform/tools/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["ContinuousHistogram", "Normalization", "SwitchingFunctions"]
2 |
3 | from .continuous_hist import *
4 | from .normalization import *
5 | from .switching_functions import *
--------------------------------------------------------------------------------
/mlcolvar/core/transform/tools/continuous_hist.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from mlcolvar.core.transform import Transform
4 | from mlcolvar.core.transform.tools.utils import easy_KDE
5 |
6 | __all__ = ["ContinuousHistogram"]
7 |
8 | class ContinuousHistogram(Transform):
9 | """
10 | Compute continuous histogram using Gaussian kernels
11 | """
12 |
13 | def __init__(self,
14 | in_features: int,
15 | min: float,
16 | max: float,
17 | bins: int,
18 | sigma_to_center: float = 1.0) -> torch.Tensor :
19 | """Computes the continuous histogram of a quantity using Gaussian kernels
20 |
21 | Parameters
22 | ----------
23 | in_features : int
24 | Number of inputs
25 | min : float
26 | Minimum value of the histogram
27 | max : float
28 | Maximum value of the histogram
29 | bins : int
30 | Number of bins of the histogram
31 | sigma_to_center : float, optional
32 | Sigma value in bin_size units, by default 1.0
33 |
34 |
35 | Returns
36 | -------
37 | torch.Tensor
38 | Values of the histogram for each bin
39 | """
40 |
41 | super().__init__(in_features=in_features, out_features=bins)
42 |
43 | self.min = min
44 | self.max = max
45 | self.bins = bins
46 | self.sigma_to_center = sigma_to_center
47 |
48 | def compute_hist(self, x):
49 | hist = easy_KDE(x=x,
50 | n_input=self.in_features,
51 | min_max=[self.min, self.max],
52 | n=self.bins,
53 | sigma_to_center=self.sigma_to_center)
54 | return hist
55 |
56 | def forward(self, x: torch.Tensor):
57 | x = self.compute_hist(x)
58 | return x
59 |
60 | def test_continuous_histogram():
61 | x = torch.randn((5,100))
62 | x.requires_grad = True
63 | hist = ContinuousHistogram(in_features=100, min=-1, max=1, bins=10, sigma_to_center=1)
64 | out = hist(x)
65 | out.sum().backward()
66 |
67 | if __name__ == "__main__":
68 | test_continuous_histogram()
--------------------------------------------------------------------------------
/mlcolvar/core/transform/tools/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | from typing import Union, List
5 |
6 | def batch_reshape(t: torch.Tensor, size: torch.Size) -> torch.Tensor:
7 | """Return value reshaped according to size.
8 | In case of batch unsqueeze and expand along the first dimension.
9 | For single inputs just pass.
10 |
11 | Parameters
12 | ----------
13 | mean and range
14 |
15 | """
16 | if len(size) == 1:
17 | return t
18 | if len(size) == 2:
19 | batch_size = size[0]
20 | x_size = size[1]
21 | t = t.unsqueeze(0).expand(batch_size, x_size)
22 | else:
23 | raise ValueError(
24 | f"Input tensor must of shape (n_features) or (n_batch,n_features), not {size} (len={len(size)})."
25 | )
26 | return t
27 |
28 |
29 | def _gaussian_expansion(x : torch.Tensor,
30 | centers : torch.Tensor,
31 | sigma : torch.Tensor):
32 | """Computes the values in x of a set of Gaussian kernels centered on centers and with width sigma
33 |
34 | Parameters
35 | ----------
36 | x : torch.Tensor
37 | Input value(s)
38 | centers : torch.Tensor
39 | Centers of the Gaussian kernels
40 | sigma : torch.Tensor
41 | Width of the Gaussian kernels
42 | """
43 | return torch.exp(- torch.div(torch.pow(x-centers, 2), 2*torch.pow(sigma,2) ))
44 |
45 | def easy_KDE(x : torch.Tensor,
46 | n_input : int,
47 | min_max : Union[List[float], np.ndarray],
48 | n : int,
49 | sigma_to_center : float = 1.0,
50 | normalize : bool = False,
51 | return_bins : bool = False) -> torch.Tensor:
52 | """Compute histogram using KDE with Gaussian kernels
53 |
54 | Parameters
55 | ----------
56 | x : torch.Tensor
57 | Input
58 | n_input : int
59 | Number of inputs per batch
60 | min_max : Union[list[float], np.ndarray]
61 | Minimum and maximum values for the histogram
62 | n : int
63 | Number of Gaussian kernels
64 | sigma_to_center : float, optional
65 | Sigma value in bin_size units, by default 1.0
66 | normalize : bool, optional
67 | Switch for normalization of the histogram to sum to n_input, by default False
68 | return_bins : bool, optional
69 | Switch to return the bins of the histogram alongside the values, by default False
70 |
71 | Returns
72 | -------
73 | torch.Tensor
74 | Values of the histogram for each bin. The bins can be optionally returned enabling `return_bins`.
75 | """
76 | if len(x.shape) == 1:
77 | x = torch.reshape(x, (1, n_input, 1))
78 | if x.shape[-1] != 1:
79 | x = x.unsqueeze(-1)
80 | if x.shape[0] == n_input:
81 | x = x.unsqueeze(0)
82 |
83 | centers = torch.linspace(min_max[0], min_max[1], n, device=x.device)
84 | bins = torch.clone(centers)
85 | sigma = (centers[1] - centers[0]) * sigma_to_center
86 | centers = torch.tile(centers, dims=(n_input,1))
87 | out = torch.sum(_gaussian_expansion(x, centers, sigma), dim=1)
88 | if normalize:
89 | out = torch.div(out, torch.sum(out, -1, keepdim=True)) * n_input
90 | if return_bins:
91 | return out, bins
92 | else:
93 | return out
--------------------------------------------------------------------------------
/mlcolvar/core/transform/transform.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | __all__ = ["Transform"]
4 |
5 |
6 | class Transform(torch.nn.Module):
7 | """
8 | Base transform class.
9 | To implement a new transform override the forward method.
10 | The parameters of the transform should be set either in the initialization or via the setup_from_datamodule function.
11 | """
12 |
13 | def __init__(self, in_features: int, out_features: int):
14 | """Transform class options.
15 |
16 | Parameters
17 | ----------
18 | in_features : int
19 | Number of inputs of the transform
20 | out_features : int
21 | Number of outputs of the transform
22 | """
23 | super().__init__()
24 | self.in_features = in_features
25 | self.out_features = out_features
26 |
27 | def setup_from_datamodule(self, datamodule):
28 | """
29 | Initialize parameters based on pytorch lighting datamodule.
30 | """
31 | pass
32 |
33 | def forward(self, X: torch.Tensor):
34 | raise NotImplementedError()
35 |
36 | def teardown(self):
37 | pass
38 |
--------------------------------------------------------------------------------
/mlcolvar/cvs/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "BaseCV",
3 | "DeepLDA",
4 | "DeepTICA",
5 | "DeepTDA",
6 | "AutoEncoderCV",
7 | "RegressionCV",
8 | "MultiTaskCV",
9 | 'Committor',
10 | ]
11 |
12 | from .cv import BaseCV
13 | from .unsupervised import *
14 | from .supervised import *
15 | from .timelagged import *
16 | from .multitask import *
17 | from .committor import *
18 |
--------------------------------------------------------------------------------
/mlcolvar/cvs/committor/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["Committor", "KolmogorovBias", "compute_committor_weights", "initialize_committor_masses"]
2 |
3 | from .committor import *
4 | from .utils import *
--------------------------------------------------------------------------------
/mlcolvar/cvs/multitask/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["MultiTaskCV"]
2 |
3 | from mlcolvar.cvs.multitask.multitask import MultiTaskCV
4 |
--------------------------------------------------------------------------------
/mlcolvar/cvs/supervised/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["DeepLDA", "DeepTDA", "RegressionCV"]
2 |
3 | from .deeplda import *
4 | from .deeptda import *
5 | from .regression import *
6 |
--------------------------------------------------------------------------------
/mlcolvar/cvs/timelagged/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["DeepTICA"]
2 |
3 | from .deeptica import *
4 |
--------------------------------------------------------------------------------
/mlcolvar/cvs/unsupervised/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["AutoEncoderCV", "VariationalAutoEncoderCV"]
2 |
3 | from .autoencoder import *
4 | from .vae import *
5 |
--------------------------------------------------------------------------------
/mlcolvar/data/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["DictDataset", "DictModule", "DictLoader"]
2 |
3 | from .dataset import *
4 | from .dataloader import *
5 | from .datamodule import *
6 |
--------------------------------------------------------------------------------
/mlcolvar/explain/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "sensitivity_analysis",
3 | "plot_sensitivity",
4 | ]
5 |
6 | from .sensitivity import *
7 | # from .lasso import * # lasso requires additional dependencies
8 |
--------------------------------------------------------------------------------
/mlcolvar/py.typed:
--------------------------------------------------------------------------------
1 | # PEP 561 marker file. See https://peps.python.org/pep-0561/
2 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_stats_lda.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.core.stats.lda import test_lda
4 |
5 | if __name__ == "__main__":
6 | test_lda()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_stats_pca.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.core.stats.pca import test_pca
4 |
5 | if __name__ == "__main__":
6 | test_pca()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_stats_tica.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.core.stats.tica import test_tica # ,test_reduced_rank_tica
4 |
5 | if __name__ == "__main__":
6 | test_tica()
7 | # test_reduced_rank_tica()
8 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_adjacencymatrix.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.descriptors.eigs_adjacency_matrix import test_eigs_of_adj_matrix
2 |
3 | if __name__ == "__main__":
4 | test_eigs_of_adj_matrix()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_continuoushistogram.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.tools.continuous_hist import test_continuous_histogram
2 |
3 | if __name__ == "__main__":
4 | test_continuous_histogram()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_coordinationnumbers.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.descriptors.coordination_numbers import test_coordination_number
2 |
3 | if __name__ == "__main__":
4 | test_coordination_number()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_descriptors_utils.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.descriptors.utils import test_adjacency_matrix,test_applycutoff
2 |
3 | if __name__ == "__main__":
4 | test_applycutoff()
5 | test_adjacency_matrix()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_multipledescriptors.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.descriptors.multiple_descriptors import test_multipledescriptors
2 |
3 | if __name__ == "__main__":
4 | test_multipledescriptors()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_normalization.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.tools.normalization import test_normalization
2 |
3 | if __name__ == "__main__":
4 | test_normalization()
5 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_pairwisedistances.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.descriptors.pairwise_distances import test_pairwise_distances
2 |
3 | if __name__ == "__main__":
4 | test_pairwise_distances()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_switchingfunctions.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.tools.switching_functions import test_switchingfunctions
2 |
3 | if __name__ == "__main__":
4 | test_switchingfunctions()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_torsionalangle.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.descriptors.torsional_angle import test_torsional_angle
2 |
3 | if __name__ == "__main__":
4 | test_torsional_angle()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_core_transform_utils.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.core.transform.utils import test_inverse, test_statistics, test_sequential_transform
2 |
3 | if __name__ == "__main__":
4 | test_inverse()
5 | test_statistics()
6 | test_sequential_transform()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_cvs_committor.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.cvs.committor.committor import test_committor
2 | from mlcolvar.core.loss.committor_loss import test_smart_derivatives
3 |
4 | if __name__ == "__main__":
5 | test_committor()
6 | test_smart_derivatives()
--------------------------------------------------------------------------------
/mlcolvar/tests/test_cvs_slowmodes_deeptica.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.cvs.timelagged.deeptica import test_deep_tica
4 |
5 | if __name__ == "__main__":
6 | test_deep_tica()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_cvs_supervised_deeplda.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.cvs.supervised.deeplda import test_deeplda
4 |
5 | if __name__ == "__main__":
6 | test_deeplda(n_states=2)
7 | test_deeplda(n_states=3)
8 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_cvs_supervised_regression.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.cvs.supervised.regression import test_regression_cv
4 |
5 | if __name__ == "__main__":
6 | test_regression_cv()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_cvs_supervised_tda.py:
--------------------------------------------------------------------------------
1 | from mlcolvar.cvs.supervised.deeptda import test_deeptda_cv
2 |
3 | if __name__ == "__main__":
4 | test_deeptda_cv()
5 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_cvs_unsupervised_autoencoder.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.cvs.unsupervised.autoencoder import test_autoencodercv
4 |
5 | if __name__ == "__main__":
6 | test_autoencodercv()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_cvs_unsupervised_vae.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 |
4 | # =============================================================================
5 | # MODULE DOCSTRING
6 | # =============================================================================
7 |
8 | """
9 | Test objects and function in mlcolvar.cvs.unsupervised.vae.
10 | """
11 |
12 |
13 | # =============================================================================
14 | # GLOBAL IMPORTS
15 | # =============================================================================
16 |
17 | import os
18 | import tempfile
19 |
20 | import pytest
21 | import lightning
22 | import torch
23 |
24 | from mlcolvar.cvs.unsupervised.vae import VariationalAutoEncoderCV
25 | from mlcolvar.data import DictDataset, DictModule
26 |
27 |
28 | # =============================================================================
29 | # TESTS
30 | # =============================================================================
31 |
32 |
33 | @pytest.mark.parametrize("weights", [False, True])
34 | def test_vae_cv_training(weights):
35 | """Run a full training of a VAECv."""
36 | # Create VAE CV.
37 | n_cvs = 2
38 | in_features = 8
39 | model = VariationalAutoEncoderCV(
40 | n_cvs=n_cvs,
41 | encoder_layers=[in_features, 6, 4],
42 | options={
43 | "norm_in": None,
44 | "encoder": {"activation": "relu"},
45 | },
46 | )
47 |
48 | # Create input data.
49 | batch_size = 100
50 | x = torch.randn(batch_size, in_features)
51 | data = {"data": x}
52 |
53 | # Create weights.
54 | if weights is True:
55 | data["weights"] = torch.rand(batch_size)
56 |
57 | # Train.
58 | datamodule = DictModule(DictDataset(data))
59 | trainer = lightning.Trainer(
60 | max_epochs=1, log_every_n_steps=2, logger=None, enable_checkpointing=False
61 | )
62 | trainer.fit(model, datamodule)
63 |
64 | # Eval.
65 | model.eval()
66 | x_hat = model(x)
67 | assert x_hat.shape == (batch_size, n_cvs)
68 |
69 | # Test export to torchscript.
70 | # This try-finally clause is a workaround for windows not allowing opening temp files twice.
71 | try:
72 | tmp_file = tempfile.NamedTemporaryFile("wb", suffix=".ptc", delete=False)
73 | tmp_file.close()
74 | model.to_torchscript(file_path=tmp_file.name, method="trace")
75 | model_loaded = torch.jit.load(tmp_file.name)
76 | finally:
77 | os.unlink(tmp_file.name)
78 | x_hat2 = model_loaded(x)
79 | assert torch.allclose(x_hat, x_hat2)
80 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_explain_lasso.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.explain.lasso import test_lasso_classification, test_lasso_regression
4 |
5 | if __name__ == "__main__":
6 | test_lasso_classification()
7 | test_lasso_regression()
8 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_explain_sensitivity.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.explain.sensitivity import test_sensitivity_analysis
4 |
5 | if __name__ == "__main__":
6 | test_sensitivity_analysis()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_utils_data_dataset.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.data.dataset import test_DictDataset
4 |
5 | if __name__ == "__main__":
6 | test_DictDataset()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_utils_data_timelagged.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.utils.timelagged import test_create_timelagged_dataset
4 |
5 | if __name__ == "__main__":
6 | test_create_timelagged_dataset()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_utils_fes.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.utils.fes import test_compute_fes
4 |
5 | if __name__ == "__main__":
6 | test_compute_fes()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_utils_io.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import urllib
3 | from mlcolvar.utils.io import load_dataframe
4 | from mlcolvar.utils.io import test_datasetFromFile
5 |
6 | example_files = {
7 | "str": "mlcolvar/tests/data/state_A.dat",
8 | "list": ["mlcolvar/tests/data/state_A.dat", "mlcolvar/tests/data/state_B.dat"],
9 | "url": "https://raw.githubusercontent.com/luigibonati/mlcolvar/main/mlcolvar/tests/data/2d_model/COLVAR_stateA",
10 | }
11 |
12 |
13 | @pytest.mark.parametrize("file_type", ["str", "list", "url"])
14 | def test_loadDataframe(file_type):
15 | filename = example_files[file_type]
16 | if file_type == "url":
17 | # disable test if connection is not available
18 | try:
19 | urllib.request.urlopen(filename)
20 | except urllib.error.URLError:
21 | pytest.skip("internet not available")
22 |
23 | df = load_dataframe(filename, start=0, stop=10, stride=1)
24 |
25 |
26 | if __name__ == "__main__":
27 | # test_loadDataframe()
28 | test_datasetFromFile()
29 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_utils_plot.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.utils.plot import test_utils_plot
4 |
5 | if __name__ == "__main__":
6 | test_utils_plot()
7 |
--------------------------------------------------------------------------------
/mlcolvar/tests/test_utils_trainer.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from mlcolvar.utils.trainer import test_metrics_callbacks
4 |
5 | if __name__ == "__main__":
6 | test_metrics_callbacks()
7 |
--------------------------------------------------------------------------------
/mlcolvar/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # __all__ = ["io","fes","plot"]
2 |
3 | # not imported by default as they depend on optional libraries (pandas, scikit-learn or KDEpy)
4 | # from .io import *
5 | # from .fes import *
6 | # from .plot import *
7 |
--------------------------------------------------------------------------------
/mlcolvar/utils/trainer.py:
--------------------------------------------------------------------------------
1 | from lightning import Callback
2 | import copy
3 |
4 |
5 | class SimpleMetricsCallback(Callback):
6 | """Lightning callback which append logged metrics to a list.
7 | The metrics are recorded at the end of each validation epoch.
8 | """
9 |
10 | def __init__(self):
11 | super().__init__()
12 | self.metrics = []
13 |
14 | def on_validation_end(self, trainer, pl_module):
15 | if not trainer.sanity_checking:
16 | metrics = copy.deepcopy(trainer.callback_metrics)
17 | self.metrics.append(metrics)
18 |
19 |
20 | class MetricsCallback(Callback):
21 | """Lightning callback which saves logged metrics into a dictionary.
22 | The metrics are recorded at the end of each validation epoch.
23 | """
24 |
25 | def __init__(self):
26 | super().__init__()
27 | self.metrics = {"epoch": []}
28 |
29 | def on_train_epoch_end(self, trainer, pl_module):
30 | metrics = trainer.callback_metrics
31 | if not trainer.sanity_checking:
32 | self.metrics["epoch"].append(trainer.current_epoch)
33 | for key, val in metrics.items():
34 | val = val.item()
35 | if key in self.metrics:
36 | self.metrics[key].append(val)
37 | else:
38 | self.metrics[key] = [val]
39 |
40 |
41 | def test_metrics_callbacks():
42 | import torch
43 | import lightning
44 | from mlcolvar.cvs import AutoEncoderCV
45 | from mlcolvar.data import DictDataset, DictModule
46 |
47 | X = torch.rand((100, 2))
48 | dataset = DictDataset({"data": X})
49 | datamodule = DictModule(dataset)
50 |
51 | model = AutoEncoderCV([2, 2, 1])
52 | metrics = SimpleMetricsCallback()
53 | trainer = lightning.Trainer(
54 | max_epochs=1,
55 | log_every_n_steps=2,
56 | logger=None,
57 | enable_checkpointing=False,
58 | callbacks=metrics,
59 | )
60 | trainer.fit(model, datamodule)
61 |
62 | model = AutoEncoderCV([2, 2, 1])
63 | metrics = MetricsCallback()
64 | trainer = lightning.Trainer(
65 | max_epochs=1,
66 | log_every_n_steps=2,
67 | logger=None,
68 | enable_checkpointing=False,
69 | callbacks=metrics,
70 | )
71 | trainer.fit(model, datamodule)
72 |
--------------------------------------------------------------------------------
/readthedocs.yml:
--------------------------------------------------------------------------------
1 | # readthedocs.yml
2 |
3 | version: 2
4 |
5 | build:
6 | os: ubuntu-20.04
7 | tools:
8 | python: "mambaforge-4.10"
9 |
10 | python:
11 | install:
12 | - method: pip
13 | path: .
14 |
15 | sphinx:
16 | configuration: docs/conf.py
17 |
18 | conda:
19 | environment: docs/requirements.yaml
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | lightning
2 | torch
3 | numpy<2
4 | pandas
5 | matplotlib
6 | kdepy
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # Helper file to handle all configs
2 |
3 | [coverage:run]
4 | # .coveragerc to control coverage.py and pytest-cov
5 | omit =
6 | # Omit the tests
7 | */tests/*
8 | # Omit generated versioneer
9 | mlcolvar/_version.py
10 |
11 | [yapf]
12 | # YAPF, in .style.yapf files this shows up as "[style]" header
13 | COLUMN_LIMIT = 119
14 | INDENT_WIDTH = 4
15 | USE_TABS = False
16 |
17 | [flake8]
18 | # Flake8, PyFlakes, etc
19 | max-line-length = 119
20 |
21 | [aliases]
22 | test = pytest
23 |
--------------------------------------------------------------------------------
|