├── .gitignore ├── LICENSE ├── README.md ├── dynalearn ├── __init__.py ├── config │ ├── __init__.py │ ├── config.py │ ├── datasets.py │ ├── dynamics.py │ ├── experiments.py │ ├── metrics.py │ ├── networks.py │ ├── trainable.py │ └── util │ │ ├── __init__.py │ │ ├── callback.py │ │ ├── metrics │ │ ├── __init__.py │ │ ├── attention.py │ │ ├── forecast.py │ │ ├── ltp.py │ │ ├── prediction.py │ │ ├── stationary.py │ │ └── statistics.py │ │ ├── networks │ │ ├── __init__.py │ │ ├── transforms.py │ │ └── weights.py │ │ ├── optimizer.py │ │ └── training.py ├── datasets │ ├── __init__.py │ ├── continuous_dataset.py │ ├── data │ │ ├── __init__.py │ │ ├── data.py │ │ ├── network_data.py │ │ └── state_data.py │ ├── dataset.py │ ├── discrete_dataset.py │ ├── getter.py │ ├── sampler.py │ ├── transforms │ │ ├── __init__.py │ │ ├── getter.py │ │ ├── random_flip.py │ │ ├── random_rewire.py │ │ ├── remap.py │ │ ├── threshold.py │ │ └── transform.py │ └── weights │ │ ├── __init__.py │ │ ├── continuous.py │ │ ├── discrete.py │ │ ├── kde.py │ │ ├── structure.py │ │ └── weight.py ├── dynamics │ ├── __init__.py │ ├── activation.py │ ├── deterministic_epidemics │ │ ├── __init__.py │ │ ├── base.py │ │ ├── incidence.py │ │ └── simple.py │ ├── dynamics.py │ ├── getter.py │ ├── incidence │ │ ├── __init__.py │ │ ├── base.py │ │ └── simple.py │ ├── stochastic_epidemics │ │ ├── __init__.py │ │ ├── base.py │ │ ├── complex.py │ │ ├── interacting.py │ │ └── simple.py │ └── trainable │ │ ├── __init__.py │ │ ├── deterministic_epidemics.py │ │ ├── incidence.py │ │ ├── kapoor.py │ │ ├── stochastic_epidemics.py │ │ └── var.py ├── experiments │ ├── __init__.py │ ├── experiment.py │ └── metrics │ │ ├── __init__.py │ │ ├── attention.py │ │ ├── forecast.py │ │ ├── getter.py │ │ ├── ltp.py │ │ ├── metrics.py │ │ ├── prediction.py │ │ ├── stationary.py │ │ ├── statistics.py │ │ └── util │ │ ├── __init__.py │ │ ├── initializer.py │ │ ├── model_sampler.py │ │ ├── mutual_info.py │ │ └── statistics.py ├── networks │ ├── __init__.py │ ├── generator.py │ ├── getter.py │ ├── network.py │ ├── random.py │ ├── transform.py │ └── weight.py ├── nn │ ├── __init__.py │ ├── activation.py │ ├── callbacks │ │ ├── __init__.py │ │ ├── best_model_restore.py │ │ ├── callbacks.py │ │ ├── checkpoint.py │ │ ├── getter.py │ │ ├── lr_scheduler.py │ │ ├── periodic.py │ │ └── util.py │ ├── history.py │ ├── loss.py │ ├── metrics.py │ ├── models │ │ ├── __init__.py │ │ ├── deterministic_epidemics.py │ │ ├── dgat.py │ │ ├── getter.py │ │ ├── gnn.py │ │ ├── incidence.py │ │ ├── kapoor.py │ │ ├── model.py │ │ ├── propagator.py │ │ ├── reaction_diffusion.py │ │ ├── stochastic_epidemics.py │ │ └── util.py │ ├── optimizers │ │ ├── __init__.py │ │ ├── optimizer.py │ │ └── radam.py │ └── transformers │ │ ├── __init__.py │ │ ├── batch.py │ │ ├── normalizer.py │ │ └── transformer.py └── util │ ├── __init__.py │ ├── display.py │ ├── distribution.py │ ├── loggers │ ├── __init__.py │ ├── logger.py │ ├── memory.py │ └── time.py │ ├── util.py │ └── verbose.py ├── notebooks ├── example-sis-ba.ipynb └── figures │ ├── manuscript-figure1.ipynb │ ├── manuscript-figure2.ipynb │ ├── manuscript-figure3.ipynb │ ├── manuscript-figure4.ipynb │ └── manuscript-figure5.ipynb ├── requirements.txt ├── scripts ├── figure-6 │ └── run-covid.py ├── figures-234 │ ├── run-synthetic-continuous.py │ └── run-synthetic-discrete.py ├── run-test.py ├── si-figures │ ├── run-bias.py │ ├── run-datasize.py │ ├── run-gnnlayers.py │ ├── run-netsize.py │ └── run-resamp.py └── sources │ ├── run-covid.py │ ├── run.py │ ├── script.py │ └── specs.json ├── setup.py └── tests ├── __init__.py ├── all.py ├── config ├── __init__.py ├── all.py ├── templates.py ├── test_callback.py ├── test_config.py ├── test_dataset.py ├── test_dynamics.py ├── test_experiment.py ├── test_metrics.py ├── test_networks.py ├── test_optimizer.py ├── test_trainable.py ├── test_training.py └── test_weights.py ├── datasets ├── __init__.py ├── all.py ├── templates.py ├── test_continuousdataset.py ├── test_data.py ├── test_discretedataset.py ├── test_kde.py ├── test_remap.py ├── test_sampler.py ├── test_threshold.py └── test_weightdata.py ├── dynamics ├── __init__.py ├── all.py ├── templates.py ├── test_deterministic.py ├── test_incidence.py ├── test_stochasticepidemics.py ├── test_trainable.py └── test_var.py ├── experiments ├── __init__.py ├── all.py ├── templates.py ├── test_attention.py ├── test_experiment.py ├── test_forecast.py ├── test_ltp.py ├── test_pred.py ├── test_stationary.py └── test_statistics.py ├── networks ├── __init__.py ├── all.py ├── test_ba.py ├── test_gnp.py ├── test_multiplex.py └── test_network.py ├── nn ├── __init__.py ├── all.py ├── test_gat.py ├── test_gnn.py ├── test_history.py ├── test_normalizer.py └── test_propagator.py └── util ├── __init__.py ├── all.py ├── test_logger.py ├── test_timelogger.py ├── test_util.py └── test_verbose.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /dynalearn/__init__.py: -------------------------------------------------------------------------------- 1 | import dynalearn.config 2 | import dynalearn.datasets 3 | import dynalearn.dynamics 4 | import dynalearn.networks 5 | import dynalearn.nn 6 | import dynalearn.util 7 | import dynalearn.experiments 8 | -------------------------------------------------------------------------------- /dynalearn/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import * 2 | from .datasets import * 3 | from .dynamics import * 4 | from .trainable import * 5 | from .networks import * 6 | from .metrics import * 7 | from .experiments import * 8 | -------------------------------------------------------------------------------- /dynalearn/config/config.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from itertools import product 3 | 4 | 5 | class Config: 6 | def __init__(self, **kwargs): 7 | self.__dict__ = kwargs 8 | 9 | def __str__(self): 10 | return self.to_string() 11 | 12 | def __setitem__(self, key, val): 13 | key = key.split("/") 14 | if len(key) == 1: 15 | setattr(self, key[0], val) 16 | else: 17 | config = getattr(self, key[0]) 18 | key = "/".join(key[1:]) 19 | config[key] = val 20 | 21 | def __getitem__(self, key): 22 | key = key.split("/") 23 | if len(key) == 1: 24 | return getattr(self, key[0]) 25 | else: 26 | config = getattr(self, key[0]) 27 | key = "/".join(key[1:]) 28 | return config[key] 29 | 30 | def get(self, key, default=None): 31 | if key in self.__dict__: 32 | return self.__dict__[key] 33 | else: 34 | return default 35 | 36 | def to_string(self, prefix=""): 37 | string = "" 38 | for k, v in self.__dict__.items(): 39 | if issubclass(v.__class__, Config): 40 | string += prefix + f"{k}:\n" 41 | string += "{0}\n".format(v.to_string(prefix=prefix + "\t")) 42 | else: 43 | string += prefix + f"{k}: {v.__str__()}\n" 44 | return string 45 | 46 | def get_state_dict(self): 47 | state_dict = {} 48 | for k, v in self.__dict__.items(): 49 | if k != "_state_dict": 50 | if issubclass(v.__class__, Config): 51 | v_dict = v.state_dict 52 | for kk, vv in v_dict.items(): 53 | state_dict[k + "/" + kk] = vv 54 | else: 55 | state_dict[k] = v 56 | return state_dict 57 | 58 | @property 59 | def state_dict(self): 60 | return self.get_state_dict() 61 | 62 | def has_list(self): 63 | for k, v in self.state_dict.items(): 64 | if isinstance(v, list): 65 | return True 66 | return False 67 | 68 | def merge(self, config): 69 | for k, v in config.__dict__.items(): 70 | self.__dict__[k] = v 71 | 72 | def copy(self): 73 | config_copy = self.__class__() 74 | for k, v in self.__dict__.items(): 75 | if issubclass(v.__class__, Config) or isinstance(v, (np.ndarray, list)): 76 | setattr(config_copy, k, v.copy()) 77 | else: 78 | setattr(config_copy, k, v) 79 | return config_copy 80 | 81 | def keys(self): 82 | return self.__dict__.keys() 83 | 84 | def values(self): 85 | return self.__dict__.values() 86 | 87 | def items(self): 88 | return self.__dict__.items() 89 | -------------------------------------------------------------------------------- /dynalearn/config/datasets.py: -------------------------------------------------------------------------------- 1 | from .config import Config 2 | 3 | 4 | class DiscreteDatasetConfig(Config): 5 | @classmethod 6 | def get_config(cls, weight_type="state", **kwargs): 7 | return getattr(cls, weight_type)(**kwargs) 8 | 9 | @classmethod 10 | def plain(cls, **kwargs): 11 | cls = cls() 12 | cls.name = "DiscreteDataset" 13 | cls.modes = ["main"] 14 | cls.bias = 0 15 | cls.replace = True 16 | cls.use_groundtruth = False 17 | return cls 18 | 19 | @classmethod 20 | def structure(cls, use_strength=True, **kwargs): 21 | cls = cls() 22 | cls.name = "DiscreteStructureWeightDataset" 23 | cls.modes = ["main"] 24 | cls.bias = 0 25 | cls.replace = True 26 | cls.use_groundtruth = False 27 | cls.use_strength = use_strength 28 | return cls 29 | 30 | @classmethod 31 | def state(cls, use_strength=True, compounded=True, **kwargs): 32 | cls = cls() 33 | cls.name = "DiscreteStateWeightDataset" 34 | cls.modes = ["main"] 35 | cls.bias = 0 36 | cls.replace = True 37 | cls.use_groundtruth = False 38 | cls.use_strength = use_strength 39 | cls.compounded = compounded 40 | return cls 41 | 42 | 43 | class ContinuousDatasetConfig(Config): 44 | @classmethod 45 | def get_config(cls, weight_type="state", **kwargs): 46 | return getattr(cls, weight_type)(**kwargs) 47 | 48 | @classmethod 49 | def plain(cls, **kwargs): 50 | cls = cls() 51 | cls.name = "ContinuousDataset" 52 | cls.modes = ["main"] 53 | cls.bias = 0 54 | cls.replace = True 55 | cls.use_groundtruth = False 56 | return cls 57 | 58 | @classmethod 59 | def structure(cls, use_strength=True, **kwargs): 60 | cls = cls() 61 | cls.name = "ContinuousStructureWeightDataset" 62 | cls.modes = ["main"] 63 | cls.bias = 0 64 | cls.replace = True 65 | cls.use_groundtruth = False 66 | cls.use_strength = use_strength 67 | return cls 68 | 69 | @classmethod 70 | def state( 71 | cls, use_strength=True, compounded=False, reduce=False, total=True, **kwargs 72 | ): 73 | cls = cls() 74 | cls.name = "ContinuousStateWeightDataset" 75 | cls.modes = ["main"] 76 | cls.bias = 0 77 | cls.replace = True 78 | cls.use_groundtruth = False 79 | cls.use_strength = use_strength 80 | cls.compounded = compounded 81 | cls.total = total 82 | cls.reduce = reduce 83 | cls.max_num_points = -1 84 | return cls 85 | 86 | 87 | class TransformConfig(Config): 88 | @classmethod 89 | def kapoor2020(cls): 90 | cls = cls() 91 | cls.names = ["ThresholdNetworkTransform"] 92 | cls.threshold = 32 93 | cls.collapse = True 94 | return cls 95 | -------------------------------------------------------------------------------- /dynalearn/config/dynamics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .config import Config 4 | 5 | 6 | class DynamicsConfig(Config): 7 | @classmethod 8 | def sis(cls): 9 | cls = cls() 10 | cls.name = "SIS" 11 | cls.infection = 0.04 12 | cls.recovery = 0.08 13 | cls.init_param = None 14 | return cls 15 | 16 | @classmethod 17 | def sir(cls): 18 | cls = cls() 19 | cls.name = "SIR" 20 | cls.infection = 0.04 21 | cls.recovery = 0.08 22 | cls.init_param = None 23 | return cls 24 | 25 | @classmethod 26 | def plancksis(cls): 27 | cls = cls() 28 | cls.name = "PlanckSIS" 29 | cls.temperature = 6.0 30 | cls.recovery = 0.08 31 | cls.init_param = None 32 | 33 | return cls 34 | 35 | @classmethod 36 | def sissis(cls): 37 | cls = cls() 38 | cls.name = "AsymmetricSISSIS" 39 | cls.infection1 = 0.01 40 | cls.infection2 = 0.012 41 | cls.recovery1 = 0.19 42 | cls.recovery2 = 0.22 43 | cls.coupling = 50.0 44 | cls.boost = "source" 45 | cls.init_param = None 46 | 47 | return cls 48 | 49 | @classmethod 50 | def dsir(cls): 51 | cls = cls() 52 | cls.name = "DSIR" 53 | cls.infection_prob = 2.5 / 2.3 54 | cls.recovery_prob = 1.0 / 7.5 55 | cls.infection_type = 2 56 | cls.density = 10000 57 | epsilon = 1e-5 58 | cls.init_param = np.array([1 - epsilon, epsilon, 0]) 59 | return cls 60 | 61 | @classmethod 62 | def incsir(cls): 63 | cls = cls() 64 | cls.name = "IncSIR" 65 | cls.infection_prob = 2.5 / 2.3 66 | cls.recovery_prob = 1.0 / 7.5 67 | cls.infection_type = 2 68 | cls.density = 10000 69 | epsilon = 1e-5 70 | cls.init_param = epsilon 71 | return cls 72 | -------------------------------------------------------------------------------- /dynalearn/config/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from itertools import product 3 | from .config import Config 4 | from .util import ( 5 | AttentionConfig, 6 | ForecastConfig, 7 | LTPConfig, 8 | PredictionConfig, 9 | StationaryConfig, 10 | StatisticsConfig, 11 | ) 12 | 13 | 14 | class MetricsConfig(Config): 15 | @classmethod 16 | def test(cls): 17 | cls = cls() 18 | cls.names = [] 19 | cls.ltp = LTPConfig.default() 20 | cls.prediction = PredictionConfig.default() 21 | cls.statistics = StatisticsConfig.default() 22 | cls.stationary = StationaryConfig.test() 23 | cls.forecast = ForecastConfig.default() 24 | cls.attention = AttentionConfig.default() 25 | 26 | return cls 27 | 28 | @classmethod 29 | def sis(cls): 30 | cls = cls() 31 | cls.names = [] 32 | cls.ltp = LTPConfig.default() 33 | cls.prediciton = PredictionConfig.default() 34 | cls.statistics = StatisticsConfig.default() 35 | cls.stationary = StationaryConfig.sis() 36 | cls.attention = AttentionConfig.default() 37 | 38 | return cls 39 | 40 | @classmethod 41 | def plancksis(cls): 42 | cls = cls() 43 | cls.names = [] 44 | cls.ltp = LTPConfig.default() 45 | cls.prediciton = PredictionConfig.default() 46 | cls.statistics = StatisticsConfig.default() 47 | cls.stationary = StationaryConfig.plancksis() 48 | cls.attention = AttentionConfig.default() 49 | 50 | return cls 51 | 52 | @classmethod 53 | def sissis(cls): 54 | cls = cls() 55 | cls.names = [] 56 | cls.ltp = LTPConfig.default() 57 | cls.prediciton = PredictionConfig.default() 58 | cls.statistics = StatisticsConfig.default() 59 | cls.stationary = StationaryConfig.sissis() 60 | cls.attention = AttentionConfig.default() 61 | 62 | return cls 63 | 64 | @classmethod 65 | def dsir(cls): 66 | cls = cls() 67 | cls.names = [] 68 | cls.prediction = PredictionConfig.default() 69 | cls.forecast = ForecastConfig.default() 70 | cls.stationary = StationaryConfig.dsir() 71 | cls.attention = AttentionConfig.default() 72 | 73 | return cls 74 | 75 | @classmethod 76 | def covid(cls): 77 | cls = cls() 78 | cls.names = [] 79 | cls.prediction = PredictionConfig.default() 80 | cls.forecast = ForecastConfig.default() 81 | cls.attention = AttentionConfig.default() 82 | 83 | return cls 84 | -------------------------------------------------------------------------------- /dynalearn/config/networks.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | 4 | from .config import Config 5 | from .util import TransformConfig, WeightConfig 6 | 7 | 8 | class NetworkConfig(Config): 9 | @classmethod 10 | def gnp(cls, num_nodes=1000, p=0.004, weights=None, transforms=None, layers=None): 11 | cls = cls() 12 | cls.name = "GNPNetworkGenerator" 13 | cls.num_nodes = num_nodes 14 | cls.p = p 15 | if weights is not None: 16 | cls.weights = weights 17 | if transforms is not None: 18 | cls.transforms = transforms 19 | 20 | if isinstance(layers, int): 21 | cls.layers = [f"layer{i}" for i in range(layers)] 22 | elif isinstance(layers, list): 23 | cls.layers = layers 24 | 25 | return cls 26 | 27 | @classmethod 28 | def gnm(cls, num_nodes=1000, m=2000, weights=None, transforms=None, layers=None): 29 | cls = cls() 30 | cls.name = "GNMNetworkGenerator" 31 | cls.num_nodes = num_nodes 32 | cls.m = m 33 | if weights is not None: 34 | cls.weights = weights 35 | if transforms is not None: 36 | cls.transforms = transforms 37 | 38 | if isinstance(layers, int): 39 | cls.layers = [f"layer{i}" for i in range(layers)] 40 | elif isinstance(layers, list): 41 | cls.layers = layers 42 | 43 | return cls 44 | 45 | @classmethod 46 | def ba(cls, num_nodes=1000, m=2, weights=None, transforms=None, layers=None): 47 | cls = cls() 48 | cls.name = "BANetworkGenerator" 49 | cls.num_nodes = num_nodes 50 | cls.m = m 51 | if weights is not None: 52 | cls.weights = weights 53 | if transforms is not None: 54 | cls.transforms = transforms 55 | if isinstance(layers, int): 56 | cls.layers = [f"layer{i}" for i in range(layers)] 57 | elif isinstance(layers, list): 58 | cls.layers = layers 59 | 60 | return cls 61 | 62 | @classmethod 63 | def w_gnp(cls, num_nodes=1000, p=0.004): 64 | w = WeightConfig.uniform() 65 | t = TransformConfig.sparcifier() 66 | cls = cls.gnp(num_nodes=num_nodes, p=p, weights=w, transforms=t) 67 | return cls 68 | 69 | @classmethod 70 | def w_ba(cls, num_nodes=1000, m=2): 71 | w = WeightConfig.uniform() 72 | t = TransformConfig.sparcifier() 73 | cls = cls.ba(num_nodes=num_nodes, m=m, weights=w, transforms=t) 74 | return cls 75 | 76 | @classmethod 77 | def mw_ba(cls, num_nodes=1000, m=2, layers=1): 78 | w = WeightConfig.uniform() 79 | t = TransformConfig.sparcifier() 80 | cls = cls.ba(num_nodes=num_nodes, m=m, weights=w, transforms=t, layers=layers) 81 | return cls 82 | 83 | @property 84 | def is_weighted(self): 85 | return "weights" in self.__dict__ 86 | 87 | @property 88 | def is_multiplex(self): 89 | return "layers" in self.__dict__ 90 | -------------------------------------------------------------------------------- /dynalearn/config/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .networks import * 2 | from .metrics import * 3 | from .callback import * 4 | from .optimizer import * 5 | from .training import * 6 | -------------------------------------------------------------------------------- /dynalearn/config/util/callback.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class CallbackConfig(Config): 5 | @classmethod 6 | def default(cls, path_to_best="./"): 7 | cls = cls() 8 | cls.names = ["ModelCheckpoint", "StepLR"] 9 | cls.step_size = 20 10 | cls.gamma = 0.5 11 | cls.path_to_best = path_to_best 12 | return cls 13 | 14 | @classmethod 15 | def empty(cls): 16 | cls = cls() 17 | cls.names = [] 18 | return cls 19 | -------------------------------------------------------------------------------- /dynalearn/config/util/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .attention import * 2 | from .forecast import * 3 | from .ltp import * 4 | from .prediction import * 5 | from .stationary import * 6 | from .statistics import * 7 | -------------------------------------------------------------------------------- /dynalearn/config/util/metrics/attention.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class AttentionConfig(Config): 5 | @classmethod 6 | def default(cls): 7 | cls = cls() 8 | cls.max_num_points = 100 9 | return cls 10 | -------------------------------------------------------------------------------- /dynalearn/config/util/metrics/forecast.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class ForecastConfig(Config): 5 | @classmethod 6 | def default(cls): 7 | cls = cls() 8 | cls.num_steps = [1] 9 | return cls 10 | -------------------------------------------------------------------------------- /dynalearn/config/util/metrics/ltp.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class LTPConfig(Config): 5 | @classmethod 6 | def default(cls): 7 | cls = cls() 8 | cls.max_num_sample = 1000 9 | cls.max_num_points = -1 10 | return cls 11 | -------------------------------------------------------------------------------- /dynalearn/config/util/metrics/prediction.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class PredictionConfig(Config): 5 | @classmethod 6 | def default(cls): 7 | cls = cls() 8 | cls.max_num_points = 1e4 9 | return cls 10 | -------------------------------------------------------------------------------- /dynalearn/config/util/metrics/statistics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from dynalearn.config import Config 4 | 5 | 6 | class StatisticsConfig(Config): 7 | @classmethod 8 | def default(cls): 9 | cls = cls() 10 | cls.max_num_points = 10000 11 | cls.maxlag = 1 12 | return cls 13 | -------------------------------------------------------------------------------- /dynalearn/config/util/networks/__init__.py: -------------------------------------------------------------------------------- 1 | from .transforms import * 2 | from .weights import * 3 | -------------------------------------------------------------------------------- /dynalearn/config/util/networks/transforms.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class TransformConfig(Config): 5 | @classmethod 6 | def sparcifier(cls): 7 | cls = cls() 8 | cls.names = ["SparcifierTransform"] 9 | cls.maxiter = 100 10 | cls.p = -1 11 | return cls 12 | -------------------------------------------------------------------------------- /dynalearn/config/util/networks/weights.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class WeightConfig(Config): 5 | @classmethod 6 | def uniform(cls): 7 | cls = cls() 8 | cls.name = "UniformWeightGenerator" 9 | cls.low = 0 10 | cls.high = 100 11 | return cls 12 | 13 | @classmethod 14 | def loguniform(cls): 15 | cls = cls() 16 | cls.name = "LogUniformWeightGenerator" 17 | cls.low = 1e-5 18 | cls.high = 100 19 | return cls 20 | 21 | @classmethod 22 | def normal(cls): 23 | cls = cls() 24 | cls.name = "NormalWeightGenerator" 25 | cls.mean = 100 26 | cls.std = 5 27 | return cls 28 | 29 | @classmethod 30 | def lognormal(cls): 31 | cls = cls() 32 | cls.name = "LogNormalWeightGenerator" 33 | cls.mean = 100 34 | cls.std = 5 35 | return cls 36 | 37 | @classmethod 38 | def degree(cls): 39 | cls = cls() 40 | cls.name = "DegreeWeightGenerator" 41 | cls.mean = 100 42 | cls.std = 5 43 | cls.normalized = True 44 | return cls 45 | 46 | @classmethod 47 | def betweenness(cls): 48 | cls = cls() 49 | cls.name = "BetweennessWeightGenerator" 50 | cls.mean = 100 51 | cls.std = 5 52 | cls.normalized = True 53 | return cls 54 | -------------------------------------------------------------------------------- /dynalearn/config/util/optimizer.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class OptimizerConfig(Config): 5 | @classmethod 6 | def default(cls): 7 | cls = cls() 8 | 9 | cls.name = "RAdam" 10 | cls.lr = 1.0e-3 11 | cls.weight_decay = 1.0e-4 12 | cls.betas = (0.9, 0.999) 13 | cls.eps = 1.0e-8 14 | cls.amsgrad = False 15 | 16 | return cls 17 | -------------------------------------------------------------------------------- /dynalearn/config/util/training.py: -------------------------------------------------------------------------------- 1 | from dynalearn.config import Config 2 | 3 | 4 | class TrainingConfig(Config): 5 | @classmethod 6 | def default( 7 | cls, 8 | ): 9 | cls = cls() 10 | 11 | cls.val_fraction = 0.01 12 | cls.val_bias = 0.8 13 | cls.epochs = 30 14 | cls.batch_size = 32 15 | cls.num_nodes = 1000 16 | cls.num_networks = 1 17 | cls.num_samples = 10000 18 | cls.resampling = 2 19 | cls.maxlag = 1 20 | cls.resample_when_dead = True 21 | 22 | return cls 23 | 24 | @classmethod 25 | def discrete( 26 | cls, 27 | ): 28 | cls = cls() 29 | 30 | cls.val_fraction = 0.01 31 | cls.val_bias = 0.8 32 | cls.epochs = 30 33 | cls.batch_size = 1 34 | cls.num_networks = 1 35 | cls.num_samples = 10000 36 | cls.resampling = 2 37 | cls.maxlag = 1 38 | cls.resample_when_dead = True 39 | 40 | return cls 41 | 42 | @classmethod 43 | def continuous( 44 | cls, 45 | ): 46 | cls = cls() 47 | 48 | cls.val_fraction = 0.1 49 | cls.val_bias = 0.5 50 | cls.epochs = 30 51 | cls.batch_size = 1 52 | cls.num_networks = 1 53 | cls.num_samples = 10000 54 | cls.resampling = 100 55 | cls.maxlag = 1 56 | cls.resample_when_dead = False 57 | 58 | return cls 59 | 60 | @classmethod 61 | def test( 62 | cls, 63 | ): 64 | cls = cls() 65 | 66 | cls.val_fraction = 0.01 67 | cls.val_bias = 0.8 68 | cls.epochs = 5 69 | cls.batch_size = 10 70 | cls.num_networks = 1 71 | cls.num_samples = 10 72 | cls.resampling = 2 73 | cls.maxlag = 1 74 | cls.resample_when_dead = True 75 | 76 | return cls 77 | -------------------------------------------------------------------------------- /dynalearn/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .transforms import * 2 | from .data import * 3 | from .weights import * 4 | from .dataset import * 5 | from .sampler import * 6 | from .discrete_dataset import * 7 | from .continuous_dataset import * 8 | from .getter import * 9 | -------------------------------------------------------------------------------- /dynalearn/datasets/continuous_dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import networkx as nx 3 | import torch 4 | import tqdm 5 | 6 | from scipy.stats import gaussian_kde 7 | from dynalearn.datasets import Dataset, StructureWeightDataset 8 | from dynalearn.datasets.weights import ( 9 | ContinuousStateWeight, 10 | ContinuousGlobalStateWeight, 11 | ContinuousCompoundStateWeight, 12 | StrengthContinuousGlobalStateWeight, 13 | StrengthContinuousStateWeight, 14 | StrengthContinuousCompoundStateWeight, 15 | ) 16 | from dynalearn.config import Config 17 | from dynalearn.util import from_nary 18 | from dynalearn.util import to_edge_index, onehot, get_node_attr 19 | 20 | 21 | class ContinuousDataset(Dataset): 22 | def __getitem__(self, index): 23 | i, j = self.indices[index] 24 | g = self.networks[i].get() 25 | x = torch.FloatTensor(self.inputs[i].get(j)) 26 | y = torch.FloatTensor(self.targets[i].get(j)) 27 | w = torch.FloatTensor(self.weights[i].get(j)) 28 | w /= w.sum() 29 | return (x, g), y, w 30 | 31 | 32 | class ContinuousStructureWeightDataset(ContinuousDataset, StructureWeightDataset): 33 | def __init__(self, config=None, **kwargs): 34 | config = config or Config(**kwargs) 35 | ContinuousDataset.__init__(self, config) 36 | StructureWeightDataset.__init__(self, config) 37 | 38 | 39 | class ContinuousStateWeightDataset(ContinuousDataset): 40 | def __init__(self, config=None, **kwargs): 41 | config = config or Config(**kwargs) 42 | ContinuousDataset.__init__(self, config) 43 | self.max_num_points = config.max_num_points 44 | self.reduce = config.reduce 45 | self.compounded = config.compounded 46 | self.total = config.total 47 | if not self.total and not self.compounded: 48 | raise ValueError("[total] and [compounded] are mutually exclusive.") 49 | 50 | def _get_weights_(self): 51 | if self.total: 52 | if self.m_networks.is_weighted: 53 | weights = StrengthContinuousGlobalStateWeight( 54 | reduce=self.reduce, bias=self.bias 55 | ) 56 | else: 57 | weights = ContinuousGlobalStateWeight( 58 | reduce=self.reduce, bias=self.bias 59 | ) 60 | else: 61 | if self.m_networks.is_weighted and self.compounded: 62 | weights = StrengthContinuousCompoundStateWeight( 63 | reduce=self.reduce, bias=self.bias 64 | ) 65 | elif self.m_networks.is_weighted and not self.compounded: 66 | weights = StrengthContinuousStateWeight( 67 | reduce=self.reduce, bias=self.bias 68 | ) 69 | elif not self.m_networks.is_weighted and self.compounded: 70 | weights = ContinuousCompoundStateWeight( 71 | reduce=self.reduce, bias=self.bias 72 | ) 73 | else: 74 | weights = ContinuousStateWeight(bias=self.bias) 75 | weights.compute(self, verbose=self.verbose) 76 | return weights 77 | -------------------------------------------------------------------------------- /dynalearn/datasets/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .data import * 2 | from .network_data import * 3 | from .state_data import * 4 | -------------------------------------------------------------------------------- /dynalearn/datasets/data/data.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy as np 3 | 4 | 5 | class Data: 6 | def __init__(self, name="data"): 7 | self.name = name 8 | self._data = None 9 | 10 | def __eq__(self, other): 11 | if isinstance(other, Data): 12 | return self.data == other.data 13 | return False 14 | 15 | def copy(self): 16 | data_copy = self.__class__() 17 | data_copy.__dict__ = self.__dict__.copy() 18 | data_copy.data = self._data.copy() 19 | return data_copy 20 | 21 | def get(self): 22 | return self.data 23 | 24 | def save(self, h5file): 25 | if self.name in h5file: 26 | del h5file[self.name] 27 | assert isinstance(self.data, np.ndarray) 28 | h5file.create_dataset(self.name, data=self.data) 29 | 30 | def load(self, h5file): 31 | if isinstance(h5file, h5py.Dataset): 32 | self.data = h5file[...] 33 | elif isinstance(h5file, h5py.Group): 34 | if self.name in h5file: 35 | self.data = h5file[self.name][...] 36 | else: 37 | print( 38 | f"{self.name} not in h5file with name {h5file}. Available keys are {h5file.keys()}" 39 | ) 40 | 41 | @property 42 | def data(self): 43 | return self._data 44 | 45 | @data.setter 46 | def data(self, data): 47 | self._data = data 48 | 49 | 50 | class DataCollection: 51 | def __init__(self, name="data_collection", data_list=[], template=None): 52 | self.name = name 53 | self.data_list = [] 54 | if template is None: 55 | self.template = lambda d: Data(data=d, shape=d.shape[1:]) 56 | else: 57 | self.template = lambda d: template(data=d) 58 | for data in data_list: 59 | self.add(data) 60 | 61 | def __getitem__(self, index): 62 | return self.data_list[index] 63 | 64 | def __len__(self): 65 | return len(self.data_list) 66 | 67 | def __eq__(self, other): 68 | if isinstance(other, DataCollection): 69 | for d1, d2 in zip(self.data_list, other.data_list): 70 | if d1 != d2: 71 | return False 72 | else: 73 | return False 74 | return True 75 | 76 | def add(self, x): 77 | assert issubclass(type(x), Data) 78 | x.name = "d" + str(len(self)) 79 | self.data_list.append(x) 80 | 81 | def copy(self): 82 | data_copy = self.__class__() 83 | data_copy.__dict__ = self.__dict__.copy() 84 | data_copy.data_list = [data.copy() for data in self.data_list] 85 | return data_copy 86 | 87 | def save(self, h5file): 88 | assert isinstance(h5file, h5py.Group) 89 | group = h5file.create_group(self.name) 90 | for i, d in enumerate(self.data_list): 91 | d.save(group) 92 | 93 | def load(self, h5file): 94 | assert isinstance(h5file, h5py.Group) 95 | if self.name in h5file: 96 | group = h5file[self.name] 97 | for k, v in group.items(): 98 | d = self.template(v) 99 | self.add(d) 100 | 101 | @property 102 | def size(self): 103 | return len(self) 104 | -------------------------------------------------------------------------------- /dynalearn/datasets/data/state_data.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy as np 3 | 4 | from dynalearn.datasets.data.data import Data 5 | 6 | 7 | class StateData(Data): 8 | def __init__(self, name="state_data", data=None): 9 | Data.__init__(self, name=name) 10 | if data is not None: 11 | if isinstance(data, h5py.Dataset): 12 | data = data[...] 13 | assert isinstance(data, np.ndarray) 14 | self.data = data 15 | 16 | def __eq__(self, other): 17 | if isinstance(other, StateData): 18 | return np.all(self.data == other.data) 19 | return False 20 | 21 | def get(self, index): 22 | return self._data[index] 23 | 24 | @property 25 | def size(self): 26 | if len(self._data.shape) > 1: 27 | return self._data.shape[0] 28 | else: 29 | return 1 30 | 31 | @property 32 | def shape(self): 33 | if len(self._data.shape) > 1: 34 | return self._data.shape[1:] 35 | else: 36 | return (0,) 37 | -------------------------------------------------------------------------------- /dynalearn/datasets/discrete_dataset.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | 5 | from dynalearn.datasets import Dataset, StructureWeightDataset 6 | from dynalearn.config import Config 7 | from dynalearn.util import from_nary 8 | from dynalearn.util import to_edge_index, onehot 9 | from dynalearn.datasets.weights import DiscreteStateWeight, DiscreteCompoundStateWeight 10 | 11 | 12 | class DiscreteDataset(Dataset): 13 | def __getitem__(self, index): 14 | i, j = self.indices[index] 15 | g = self.networks[i].get() 16 | x = torch.FloatTensor(self.inputs[i].get(j)) 17 | if len(self.targets[i].get(j).shape) == 1: 18 | y = onehot(self.targets[i].get(j), num_class=self.num_states) 19 | else: 20 | y = self.targets[i].get(j) 21 | y = torch.FloatTensor(y) 22 | w = torch.FloatTensor(self.weights[i].get(j)) 23 | w /= w.sum() 24 | return (x, g), y, w 25 | 26 | 27 | class DiscreteStructureWeightDataset(DiscreteDataset, StructureWeightDataset): 28 | def __init__(self, config=None, **kwargs): 29 | config = config or Config(**kwargs) 30 | DiscreteDataset.__init__(self, config) 31 | StructureWeightDataset.__init__(self, config) 32 | 33 | 34 | class DiscreteStateWeightDataset(DiscreteDataset): 35 | def _get_weights_(self): 36 | if self.config.compounded: 37 | weights = DiscreteCompoundStateWeight(bias=self.bias) 38 | else: 39 | weights = DiscreteStateWeight(bias=self.bias) 40 | weights.compute(self, verbose=self.verbose) 41 | return weights 42 | -------------------------------------------------------------------------------- /dynalearn/datasets/getter.py: -------------------------------------------------------------------------------- 1 | from dynalearn.datasets import ( 2 | DiscreteDataset, 3 | DiscreteStructureWeightDataset, 4 | DiscreteStateWeightDataset, 5 | ContinuousDataset, 6 | ContinuousStructureWeightDataset, 7 | ContinuousStateWeightDataset, 8 | ) 9 | 10 | 11 | __datasets__ = { 12 | "DiscreteDataset": DiscreteDataset, 13 | "DiscreteStructureWeightDataset": DiscreteStructureWeightDataset, 14 | "DiscreteStateWeightDataset": DiscreteStateWeightDataset, 15 | "ContinuousDataset": ContinuousDataset, 16 | "ContinuousStructureWeightDataset": ContinuousStructureWeightDataset, 17 | "ContinuousStateWeightDataset": ContinuousStateWeightDataset, 18 | } 19 | 20 | 21 | def get(config): 22 | name = config.name 23 | if name in __datasets__: 24 | return __datasets__[name](config) 25 | else: 26 | raise ValueError( 27 | f"{name} is invalid, possible entries are {list(__datasets__.keys())}" 28 | ) 29 | -------------------------------------------------------------------------------- /dynalearn/datasets/sampler.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | 4 | 5 | class Sampler: 6 | def __init__(self, dataset): 7 | self.dataset = dataset 8 | self.config = dataset.config 9 | self.bias = self.config.bias 10 | self.replace = self.config.replace 11 | self.counter = 0 12 | self.avail_networks = list() 13 | self.avail_states = dict() 14 | 15 | def __call__(self): 16 | if len(self.avail_networks) > 0 and self.counter <= len(self.dataset): 17 | g_index = self._get_network_() 18 | s_index = self._get_state_(g_index) 19 | self.update(g_index, s_index) 20 | return (g_index, s_index) 21 | else: 22 | self.reset() 23 | raise StopIteration 24 | 25 | def update(self, g_index, s_index): 26 | self.counter += 1 27 | if not self.replace: 28 | self.avail_states[g_index].remove(s_index) 29 | if len(self.avail_states[g_index]) == 0: 30 | self.avail_networks.remove(g_index) 31 | 32 | def reset(self): 33 | self.counter = 0 34 | self.avail_networks = list(range(self.dataset.network_weights.size)) 35 | self.avail_states = { 36 | i: list(range(int(self.dataset.state_weights[i].data.shape[0]))) 37 | for i in self.avail_networks 38 | } 39 | 40 | def _get_network_(self): 41 | indices = self.avail_networks 42 | p = self.dataset.network_weights.data[indices] 43 | p /= p.sum() 44 | index = np.random.choice(self.avail_networks, p=p) 45 | return index 46 | 47 | def _get_state_(self, g_index): 48 | indices = self.avail_states[g_index] 49 | p = self.dataset.state_weights[g_index].data[indices] 50 | p /= p.sum() 51 | index = np.random.choice(indices, p=p) 52 | return index 53 | -------------------------------------------------------------------------------- /dynalearn/datasets/transforms/__init__.py: -------------------------------------------------------------------------------- 1 | from .transform import * 2 | from .random_flip import * 3 | from .random_rewire import * 4 | from .threshold import * 5 | from .remap import * 6 | from .getter import * 7 | -------------------------------------------------------------------------------- /dynalearn/datasets/transforms/getter.py: -------------------------------------------------------------------------------- 1 | from .transform import TransformList 2 | from .random_flip import RandomFlipStateTransform 3 | from .remap import RemapStateTransform, PartiallyRemapStateTransform 4 | from .threshold import ThresholdNetworkTransform 5 | 6 | __transforms__ = { 7 | "RandomFlipStateTransform": RandomFlipStateTransform, 8 | "RemapStateTransform": RemapStateTransform, 9 | "PartiallyRemapStateTransform": PartiallyRemapStateTransform, 10 | "ThresholdNetworkTransform": ThresholdNetworkTransform, 11 | } 12 | 13 | 14 | def get(config): 15 | names = config.names 16 | transforms = [] 17 | for n in names: 18 | if n in __transforms__: 19 | transforms.append(__transforms__[n](config)) 20 | else: 21 | raise ValueError( 22 | f"{n} is invalid, possible entries are {list(__transforms__.keys())}" 23 | ) 24 | return TransformList(transforms) 25 | -------------------------------------------------------------------------------- /dynalearn/datasets/transforms/random_flip.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from dynalearn.datasets.transforms import StateTransform 4 | 5 | 6 | class RandomFlipStateTransform(StateTransform): 7 | def __init__(self, config=None, **kwargs): 8 | if config is None: 9 | config = Config() 10 | config.__dict__ = kwargs 11 | self.flip = config.flip 12 | Transform.__init__(self, config, **kwargs) 13 | 14 | def setup(self, experiment): 15 | self.num_states = experiment.dynamics.num_states 16 | 17 | def _transform_state_(self, x): 18 | _x = x.copy() 19 | num_nodes = x.shape[0] 20 | n = np.random.binomial(x.shape[0], self.flip) 21 | index = np.random.choice(range(num_nodes), size=n, replace=False) 22 | _x[index] = np.random.randint(self.num_states, size=n) 23 | return _x 24 | -------------------------------------------------------------------------------- /dynalearn/datasets/transforms/random_rewire.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | 3 | from dynalearn.datasets.transforms import NetworkTransform 4 | 5 | 6 | class RandomRewireNetworkTransform(NetworkTransform): 7 | def __init__(self, config=None, **kwargs): 8 | if config is None: 9 | config = Config() 10 | config.__dict__ = kwargs 11 | self.rewire = config.rewire 12 | Transform.__init__(self, config, **kwargs) 13 | 14 | def _transform_network_(self, g): 15 | g = g.data 16 | num_edges = g.number_of_edges() 17 | n = np.random.binomial(num_edges, self.rewire) 18 | return nx.double_edge_swap(g.copy(), nswap=n, seed=np.random.randint(2 ** 31)) 19 | -------------------------------------------------------------------------------- /dynalearn/datasets/transforms/remap.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from dynalearn.datasets.transforms import StateTransform 4 | 5 | 6 | class RemapStateTransform(StateTransform): 7 | def setup(self, experiment): 8 | self.state_map = experiment.dynamics.state_map 9 | 10 | def _transform_state_(self, x): 11 | _x = np.vectorize(self.state_map.get)(x.copy()) 12 | return _x 13 | 14 | 15 | class PartiallyRemapStateTransform(StateTransform): 16 | def setup(self, experiment): 17 | self.state_map = experiment.dynamics.state_map 18 | self.hide_prob = experiment.dynamics.hide_prob 19 | 20 | def _transform_state_(self, x): 21 | if x.ndim == 1: 22 | window_size = 0 23 | num_nodes = x.shape[0] 24 | elif x.ndim == 2: 25 | window_size = x.shape[0] 26 | num_nodes = x.shape[1] 27 | _x = np.vectorize(self.state_map.get)(x.copy()) 28 | y = x.copy() 29 | if window_size > 0: 30 | for i in range(window_size): 31 | n_remap = np.random.binomial(num_nodes, self.hide_prob) 32 | index = np.random.choice(range(num_nodes), size=n_remap, replace=False) 33 | y[i, index] = _x[i, index] 34 | else: 35 | n_remap = np.random.binomial(num_nodes, self.hide_prob) 36 | index = np.random.choice(range(num_nodes), size=n_remap, replace=False) 37 | y[index] = _x[index] 38 | return y 39 | -------------------------------------------------------------------------------- /dynalearn/datasets/transforms/threshold.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | 4 | from .transform import NetworkTransform 5 | from dynalearn.networks import Network, MultiplexNetwork 6 | from dynalearn.util import ( 7 | get_node_attr, 8 | get_edge_attr, 9 | set_node_attr, 10 | set_edge_attr, 11 | ) 12 | 13 | 14 | class ThresholdNetworkTransform(NetworkTransform): 15 | def __init__(self, config=None, **kwargs): 16 | NetworkTransform.__init__(self, config=config, **kwargs) 17 | self.threshold = self.config.threshold 18 | self.collapse = self.config.collapse 19 | 20 | def _transform_network_(self, g): 21 | if isinstance(g, MultiplexNetwork) and self.collapse: 22 | g = g.collapse() 23 | g = Network(self._threshold_network(g.data)) 24 | elif isinstance(g, MultiplexNetwork) and not self.collapse: 25 | data = {} 26 | for k, v in g.data.items(): 27 | data[k] = self._threshold_network(v) 28 | g = MultiplexNetwork(data) 29 | else: 30 | g = Network(self._threshold_network(g.data)) 31 | return g 32 | 33 | def _threshold_network(self, g): 34 | edges = np.array(list(g.to_directed().edges())) 35 | N = g.number_of_nodes() 36 | W = np.zeros((N, N)) 37 | A = np.zeros((N, N)) 38 | node_attr = get_node_attr(g) 39 | edge_attr = get_edge_attr(g) 40 | if "weight" not in edge_attr: 41 | return g 42 | weights = edge_attr["weight"] 43 | W[edges[:, 0], edges[:, 1]] = weights 44 | for i, w in enumerate(W.T): 45 | index = np.argsort(w)[::-1] 46 | index = index[: self.threshold] 47 | A[index, i] = (w[index] > 0).astype("int") 48 | gg = nx.DiGraph() 49 | gg.add_nodes_from(np.arange(N)) 50 | gg.add_edges_from(np.array(np.where(A != 0)).T) 51 | gg = set_node_attr(gg, node_attr) 52 | assert max(dict(gg.in_degree()).values()) <= self.threshold, "Wrong in-degree." 53 | return gg 54 | -------------------------------------------------------------------------------- /dynalearn/datasets/transforms/transform.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | 4 | from dynalearn.config import Config 5 | from dynalearn.datasets.data import Data, StateData, NetworkData 6 | 7 | 8 | class Transform: 9 | def __init__(self, config=None, **kwargs): 10 | if config is None: 11 | config = Config() 12 | config.__dict__ = kwargs 13 | self.config = config 14 | 15 | def setup(self, experiment): 16 | return 17 | 18 | def __call__(self, x): 19 | raise NotImplementedError() 20 | 21 | 22 | class TransformList(Transform): 23 | def __init__(self, transforms=[]): 24 | self.transforms = transforms 25 | 26 | def __call__(self, x): 27 | for t in self.transforms: 28 | x = t(x) 29 | return x 30 | 31 | def __len__(self): 32 | return len(self.transforms) 33 | 34 | def setup(self, experiment): 35 | for t in self.transforms: 36 | t.setup(experiment) 37 | 38 | 39 | class StateTransform(Transform): 40 | def _transform_state_(self, x): 41 | raise NotImplementedError() 42 | 43 | def __call__(self, x): 44 | if not issubclass(type(x), StateData): 45 | return x 46 | data = x.data 47 | assert isinstance(data, np.ndarray) 48 | x.data = self._transform_state_(data) 49 | return x 50 | 51 | 52 | class NetworkTransform(Transform): 53 | def _transform_network_(self, g): 54 | raise NotImplementedError() 55 | 56 | def __call__(self, x): 57 | if not issubclass(type(x), NetworkData): 58 | return x 59 | g = x.data 60 | x.data = self._transform_network_(g) 61 | return x 62 | -------------------------------------------------------------------------------- /dynalearn/datasets/weights/__init__.py: -------------------------------------------------------------------------------- 1 | from .weight import * 2 | from .kde import * 3 | from .discrete import * 4 | from .continuous import * 5 | from .structure import * 6 | -------------------------------------------------------------------------------- /dynalearn/datasets/weights/discrete.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | 4 | from .weight import Weight 5 | from dynalearn.util import from_nary 6 | 7 | 8 | class DiscreteStateWeight(Weight): 9 | def __init__(self, name="weights", maxlag=1, bias=1.0): 10 | self.maxlag = maxlag 11 | Weight.__init__(self, name=name, max_num_samples=maxlag, bias=bias) 12 | 13 | def setUp(self, dataset): 14 | self.num_states = dataset.num_states 15 | if dataset.lag > self.maxlag: 16 | self.lag = self.maxlag 17 | else: 18 | self.lag = dataset.lag 19 | self.num_updates = 2 * np.sum( 20 | [dataset.inputs[i].data.shape[0] for i in range(dataset.networks.size)] 21 | ) 22 | 23 | def _get_compound_states_(self, adj, state): 24 | eff_num_states = self.num_states ** self.lag 25 | s = np.array([from_nary(ss[-self.lag :], base=self.num_states) for ss in state]) 26 | ns = np.zeros((state.shape[0], eff_num_states)) 27 | for j in range(eff_num_states): 28 | ns[:, j] = adj @ (s == j) 29 | return s, ns 30 | 31 | def _get_features_(self, network, states, pb=None): 32 | adj = network.to_array() 33 | degree = network.degree() 34 | for i, x in enumerate(states): 35 | s, ns = self._get_compound_states_(adj, x) 36 | for j in range(s.shape[0]): 37 | key = (s[j], degree[j]) 38 | self._add_features_(("state", key)) 39 | if pb is not None: 40 | pb.update() 41 | 42 | def _get_weights_(self, network, states, pb=None): 43 | weights = np.zeros((states.shape[0], states.shape[1])) 44 | z = sum(self.features.values()) 45 | adj = network.to_array() 46 | degree = network.degree() 47 | for i, x in enumerate(states): 48 | s, ns = self._get_compound_states_(adj, x) 49 | for j in range(s.shape[0]): 50 | key = (s[j], degree[j]) 51 | weights[i, j] = self.features[("state", key)] / z 52 | if pb is not None: 53 | pb.update() 54 | return weights 55 | 56 | 57 | class DiscreteCompoundStateWeight(DiscreteStateWeight): 58 | def _get_features_(self, network, states, pb=None): 59 | adj = network.to_array() 60 | for i, x in enumerate(states): 61 | s, ns = self._get_compound_states_(adj, x) 62 | for j in range(s.shape[0]): 63 | key = (s[j], *ns[j]) 64 | self._add_features_(("state", key)) 65 | if pb is not None: 66 | pb.update() 67 | 68 | def _get_weights_(self, network, states, pb=None): 69 | weights = np.zeros((states.shape[0], states.shape[1])) 70 | z = sum(self.features.values()) 71 | adj = network.to_array() 72 | for i, x in enumerate(states): 73 | s, ns = self._get_compound_states_(adj, x) 74 | for j in range(s.shape[0]): 75 | key = (s[j], *ns[j]) 76 | weights[i, j] = self.features[("state", key)] / z 77 | if pb is not None: 78 | pb.update() 79 | return weights 80 | -------------------------------------------------------------------------------- /dynalearn/datasets/weights/kde.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.stats import gaussian_kde 3 | from sklearn.neighbors import KernelDensity 4 | 5 | 6 | class KernelDensityEstimator: 7 | def __init__(self, samples=None, max_num_samples=-1): 8 | self.max_num_samples = max_num_samples 9 | 10 | self.dim = None 11 | self.kde = None 12 | self._mean = None 13 | self._std = None 14 | self._norm = None 15 | self._index = None 16 | if samples is not None and len(samples) > 0: 17 | assert isinstance(samples, list) 18 | 19 | self.samples = samples 20 | if isinstance(samples[0], np.ndarray): 21 | self.dim = np.prod(samples[0].shape) 22 | elif isinstance(samples[0], (int, float)): 23 | self.dim = 1 24 | for s in samples: 25 | if isinstance(s, (int, float)): 26 | s = np.array([s]) 27 | s = s.reshape(-1) 28 | assert s.shape[0] == self.dim 29 | 30 | self.get_kde() 31 | 32 | def pdf(self, x): 33 | if isinstance(x, list): 34 | if len(x) == 0: 35 | return np.array([1.0]) 36 | x = np.array(x) 37 | x = x.reshape(x.shape[0], self.dim).T 38 | 39 | x = x.reshape(self.dim, -1) 40 | 41 | assert x.shape[0] == self.dim or self.dim is None 42 | 43 | if self.kde is None: 44 | return np.ones(x.shape[-1]) / self._norm 45 | else: 46 | y = (x[self._index] - self._mean[self._index]) / self._std[self._index] 47 | # p = self.kde.pdf(y) / self._norm 48 | p = np.exp(self.kde.score_samples(y.T)) / self._norm 49 | assert np.all(np.logical_and(p >= 0, p <= 1)), "Encountered invalid value." 50 | return p 51 | 52 | def get_kde(self): 53 | if len(self.samples) <= 1: 54 | self._norm = 1 55 | return 56 | x = np.array(self.samples) 57 | x = x.reshape(x.shape[0], self.dim).T 58 | mean = np.expand_dims(x.mean(axis=-1), -1) 59 | std = np.expand_dims(x.std(axis=-1), -1) 60 | condition = np.logical_or(std < 1e-8, np.isnan(std)) 61 | if np.all(np.logical_or(std < 1e-8, np.isnan(std))): 62 | self._norm = len(self.samples) 63 | return 64 | self._index = np.where(~condition)[0] 65 | y = (x[self._index] - mean[self._index]) / std[self._index] 66 | # self.kde = gaussian_kde(y, bw_method="silverman") 67 | self.kde = KernelDensity(kernel="gaussian").fit(y.T) 68 | self._mean = mean 69 | self._std = std 70 | # p = self.kde.pdf(y) 71 | p = np.exp(self.kde.score_samples(y.T)) 72 | self._norm = p.sum() 73 | assert np.all(p > 0), f"Encountered an invalid value" 74 | self.samples = [] 75 | -------------------------------------------------------------------------------- /dynalearn/datasets/weights/structure.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from scipy.stats import gmean 4 | from .weight import Weight 5 | from .kde import KernelDensityEstimator 6 | 7 | 8 | class DegreeWeight(Weight): 9 | def setUp(self, dataset): 10 | self.num_updates = 2 * np.sum( 11 | [ 12 | dataset.networks[i].data.number_of_nodes() 13 | for i in range(dataset.networks.size) 14 | ] 15 | ) 16 | 17 | def _get_features_(self, network, states, pb=None): 18 | for k in network.degree(): 19 | self._add_features_(k) 20 | if pb is not None: 21 | pb.update() 22 | 23 | def _get_weights_(self, network, states, pb=None): 24 | weights = np.zeros((states.shape[0], states.shape[1])) 25 | 26 | z = sum(self.features.values()) 27 | for i, k in enumerate(network.degree()): 28 | weights[:, i] = self.features[k] / z 29 | if pb is not None: 30 | pb.update() 31 | return weights 32 | 33 | 34 | class StrengthWeight(Weight): 35 | def setUp(self, dataset): 36 | self.num_updates = 2 * np.sum( 37 | [ 38 | dataset.networks[i].data.number_of_nodes() 39 | for i in range(dataset.networks.size) 40 | ] 41 | ) 42 | 43 | def _get_features_(self, network, states, pb=None): 44 | for i, k in enumerate(network.degree()): 45 | self._add_features_(("degree", k)) 46 | for j in network.neighbors(i): 47 | if "weight" in network.data.edges[i, j]: 48 | ew = network.data.edges[i, j]["weight"] 49 | else: 50 | ew = 1 51 | self._add_features_(("weight", k), ew) 52 | if pb is not None: 53 | pb.update() 54 | 55 | def _get_weights_(self, network, states, pb=None): 56 | weights = np.zeros((states.shape[0], states.shape[1])) 57 | 58 | z = 0 59 | kde = {} 60 | mean = {} 61 | std = {} 62 | for k, v in self.features.items(): 63 | if k[0] == "degree": 64 | z += v 65 | elif k[0] == "weight": 66 | kde[k[1]] = KernelDensityEstimator(samples=v) 67 | for i, k in enumerate(network.degree()): 68 | ew = [] 69 | for j in network.neighbors(i): 70 | if "weight" in network.data.edges[i, j]: 71 | ew.append(network.data.edges[i, j]["weight"]) 72 | else: 73 | ew.append(1) 74 | if k > 0: 75 | p = gmean(kde[k].pdf(ew)) 76 | else: 77 | p = 1.0 78 | weights[:, i] = self.features[("degree", k)] / z * p 79 | 80 | if pb is not None: 81 | pb.update() 82 | return weights 83 | -------------------------------------------------------------------------------- /dynalearn/dynamics/__init__.py: -------------------------------------------------------------------------------- 1 | from .stochastic_epidemics import * 2 | from .deterministic_epidemics import * 3 | from .trainable import * 4 | from .getter import * 5 | -------------------------------------------------------------------------------- /dynalearn/dynamics/activation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.special import lambertw 3 | 4 | 5 | def sigmoid(x): 6 | return 1.0 / (np.exp(-x) + 1) 7 | 8 | 9 | def constant(l, p): 10 | return np.ones(l.shape) * p 11 | 12 | 13 | def independent(l, p): 14 | return 1 - (1 - p) ** l 15 | 16 | 17 | def threshold(l, k, beta, mu): 18 | k = np.sum(np.array(neighbor_state), axis=0) 19 | l = neighbor_state[1] 20 | p = sigmoid(beta * (l / k - mu)) 21 | p[k == 0] = 0 22 | return p 23 | 24 | 25 | def nonlinear(l, tau, alpha): 26 | p = (1 - (1 - tau) ** l) ** alpha 27 | return p 28 | 29 | 30 | def sine(l, tau, epsilon, period): 31 | p = (1 - (1 - tau) ** l) * (1 - epsilon * (np.sin(np.pi * l / period)) ** 2) 32 | return p 33 | 34 | 35 | def planck(l, temperature): 36 | gamma = (lambertw(-3 * np.exp(-3)) + 3).real 37 | Z = gamma ** 3 * temperature ** 3 / (np.exp(gamma) - 1) 38 | p = np.zeros(l.shape) 39 | p[l > 0] = l[l > 0] ** 3 / (np.exp(l[l > 0] / temperature) - 1) / Z 40 | p[l == 0] = 0 41 | return p 42 | -------------------------------------------------------------------------------- /dynalearn/dynamics/deterministic_epidemics/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import * 2 | from .simple import * 3 | from .incidence import * 4 | -------------------------------------------------------------------------------- /dynalearn/dynamics/deterministic_epidemics/incidence.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .simple import ( 4 | SimpleDSIR, 5 | WeightedDSIR, 6 | MultiplexDSIR, 7 | WeightedMultiplexDSIR, 8 | ) 9 | from dynalearn.config import Config 10 | 11 | EPSILON = 0.0 12 | 13 | 14 | class SimpleIncSIR(SimpleDSIR): 15 | def __init__(self, config=None, **kwargs): 16 | super().__init__(config=config, **kwargs) 17 | self.latent_state = None 18 | self._num_states = 1 19 | 20 | def initial_state(self, init_param=None, density=None, squeeze=True): 21 | if init_param is None: 22 | init_param = self.init_param 23 | if not isinstance(init_param, (np.ndarray, list)): 24 | init_param = np.random.rand() 25 | 26 | x = np.zeros([self.num_nodes, self.num_states]) 27 | self.population = self.init_population(density=density) 28 | for i, n in enumerate(self.population): 29 | x[i] = np.random.binomial(n, init_param) / n 30 | 31 | self.latent_state = np.zeros((x.shape[0], 3)) 32 | self.latent_state[:, 0] = 1 - x.squeeze() 33 | self.latent_state[:, 1] = x.squeeze() 34 | x = x.reshape(*x.shape, 1).repeat(self.lag, -1) 35 | if squeeze: 36 | return x.squeeze() 37 | else: 38 | return x 39 | 40 | def predict(self, x): 41 | if x.ndim == 3: 42 | x = x[:, :, -1] 43 | x = x.squeeze() 44 | if self.latent_state is None: 45 | self.latent_state = np.zeros((x.shape[0], 3)) 46 | self.latent_state[:, 0] = 1 - x / self.population 47 | self.latent_state[:, 1] = x / self.population 48 | self.latent_state[:, 0] -= x / self.population 49 | self.latent_state[:, 1] += x / self.population 50 | p = super().predict(self.latent_state) 51 | self.latent_state = p * 1 52 | current_i = self.latent_state[:, 1] 53 | future_i = p[:, 1] * self.population 54 | y = (future_i - current_i).reshape(-1, 1) 55 | return y 56 | 57 | 58 | class WeightedIncSIR(SimpleIncSIR, WeightedDSIR): 59 | def __init__(self, config=None, **kwargs): 60 | WeightedDSIR.__init__(self, config=config, **kwargs) 61 | SimpleIncSIR.__init__(self, config=config, **kwargs) 62 | 63 | 64 | class MultiplexIncSIR(SimpleIncSIR, MultiplexDSIR): 65 | def __init__(self, config=None, **kwargs): 66 | MultiplexDSIR.__init__(self, config=config, **kwargs) 67 | SimpleIncSIR.__init__(self, config=config, **kwargs) 68 | 69 | 70 | class WeightedMultiplexIncSIR(SimpleIncSIR, WeightedMultiplexDSIR): 71 | def __init__(self, config=None, **kwargs): 72 | WeightedMultiplexDSIR.__init__(self, config=config, **kwargs) 73 | SimpleIncSIR.__init__(self, config=config, **kwargs) 74 | 75 | 76 | def IncSIR(config=None, **kwargs): 77 | if "is_weighted" in config.__dict__: 78 | is_weighted = config.is_weighted 79 | else: 80 | is_weighted = False 81 | 82 | if "is_multiplex" in config.__dict__: 83 | is_multiplex = config.is_multiplex 84 | else: 85 | is_multiplex = False 86 | 87 | if is_weighted and is_multiplex: 88 | return WeightedMultiplexIncSIR(config=config, **kwargs) 89 | elif is_weighted and not is_multiplex: 90 | return WeightedIncSIR(config=config, **kwargs) 91 | elif not is_weighted and is_multiplex: 92 | return MultiplexIncSIR(config=config, **kwargs) 93 | else: 94 | return SimpleIncSIR(config=config, **kwargs) 95 | -------------------------------------------------------------------------------- /dynalearn/dynamics/getter.py: -------------------------------------------------------------------------------- 1 | from .stochastic_epidemics import ( 2 | SIS, 3 | SIR, 4 | PlanckSIS, 5 | AsymmetricSISSIS, 6 | ) 7 | from .deterministic_epidemics import DSIR, IncSIR 8 | from .trainable import ( 9 | GNNSEDynamics, 10 | GNNDEDynamics, 11 | GNNIncidenceDynamics, 12 | VARDynamics, 13 | KapoorDynamics, 14 | ) 15 | 16 | __dynamics__ = { 17 | "SIS": SIS, 18 | "SIR": SIR, 19 | "DSIR": DSIR, 20 | "IncSIR": IncSIR, 21 | "PlanckSIS": PlanckSIS, 22 | "AsymmetricSISSIS": AsymmetricSISSIS, 23 | "GNNSEDynamics": GNNSEDynamics, 24 | "TrainableStochasticEpidemics": GNNSEDynamics, 25 | "GNNDEDynamics": GNNDEDynamics, 26 | "GNNIncidenceDynamics": GNNIncidenceDynamics, 27 | "VARDynamics": VARDynamics, 28 | "KapoorDynamics": KapoorDynamics, 29 | } 30 | 31 | 32 | def get(config): 33 | name = config.name 34 | if name in __dynamics__: 35 | return __dynamics__[name](config) 36 | else: 37 | raise ValueError( 38 | f"{name} is invalid, possible entries are {list(__dynamics__.keys())}" 39 | ) 40 | -------------------------------------------------------------------------------- /dynalearn/dynamics/incidence/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import * 2 | from .simple import * 3 | -------------------------------------------------------------------------------- /dynalearn/dynamics/stochastic_epidemics/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import * 2 | from .simple import * 3 | from .complex import * 4 | from .interacting import * 5 | -------------------------------------------------------------------------------- /dynalearn/dynamics/stochastic_epidemics/base.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | 5 | from dynalearn.dynamics.dynamics import Dynamics 6 | from dynalearn.nn.models import Propagator 7 | from dynalearn.util import from_binary, onehot 8 | 9 | 10 | class StochasticEpidemics(Dynamics): 11 | def __init__(self, config, num_states): 12 | Dynamics.__init__(self, config, num_states) 13 | if "init_param" in config.__dict__: 14 | self.init_param = config.init_param 15 | else: 16 | self.init_param = None 17 | self.propagator = Propagator(num_states) 18 | self.state_map = {i: i for i in range(num_states)} 19 | 20 | def sample(self, x): 21 | p = self.predict(x) 22 | dist = torch.distributions.Categorical(torch.tensor(p)) 23 | y = np.array(dist.sample()) 24 | return y 25 | 26 | def loglikelihood(self, x, y=None, g=None): 27 | if g is not None: 28 | self.network = g 29 | if y is None: 30 | y = np.roll(x, -1, axis=0)[:-1] 31 | x = x[:-1] 32 | 33 | if x.shape == (self.lag, self.num_nodes) or x.shape == (self.num_nodes): 34 | x = x.reshape(1, self.lag, self.num_nodes) 35 | y = y.reshape(1, self.num_nodes) 36 | 37 | loglikelihood = 0 38 | for i in range(x.shape[0]): 39 | p = self.predict(x[i]) 40 | onehot_y = onehot(y[i], num_class=self.num_states) 41 | p = (onehot_y * p).sum(-1) 42 | p[p <= 1e-15] = 1e-15 43 | logp = np.log(p) 44 | loglikelihood += logp.sum() 45 | return loglikelihood 46 | 47 | def neighbors_state(self, x): 48 | if len(x.shape) > 1: 49 | raise ValueError( 50 | f"Invalid shape, expected shape of size 1 and got {x.shape}" 51 | ) 52 | 53 | l = self.propagator.forward(x, self.edge_index) 54 | l = l.cpu().numpy() 55 | return l 56 | 57 | def initial_state(self, init_param=None, squeeze=True): 58 | if init_param is None: 59 | init_param = self.init_param 60 | if init_param is None: 61 | init_param = np.random.rand(self.num_states) 62 | init_param /= init_param.sum() 63 | elif isinstance(init_param, list): 64 | init_param = np.array(init_param) 65 | 66 | assert isinstance(init_param, np.ndarray) 67 | assert init_param.shape == (self.num_states,) 68 | x = np.random.multinomial(1, init_param, size=self.num_nodes) 69 | x = np.where(x == 1.0)[1] 70 | x = x.reshape(*x.shape, 1).repeat(self.lag, -1) 71 | if squeeze: 72 | return x.squeeze() 73 | else: 74 | return x 75 | 76 | def is_dead(self, x): 77 | if x.ndim == 2: 78 | x = x[:, -1] 79 | if self.number_of_infected(x) == 0: 80 | return True 81 | else: 82 | return False 83 | 84 | def nearly_dead_state(self, num_infected=None): 85 | raise NotImplementedError() 86 | 87 | def number_of_infected(self, x): 88 | raise NotImplementedError() 89 | -------------------------------------------------------------------------------- /dynalearn/dynamics/stochastic_epidemics/simple.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .base import StochasticEpidemics 4 | from dynalearn.dynamics.activation import independent 5 | from dynalearn.config import Config 6 | 7 | 8 | class SIS(StochasticEpidemics): 9 | def __init__(self, config=None, **kwargs): 10 | config = config or Config(**kwargs) 11 | StochasticEpidemics.__init__(self, config, 2) 12 | self.infection = config.infection 13 | self.recovery = config.recovery 14 | 15 | def predict(self, x): 16 | if len(x.shape) > 1: 17 | x = x[:, -1].squeeze() 18 | ltp = np.zeros((x.shape[0], self.num_states)) 19 | p = independent(self.neighbors_state(x)[1], self.infection) 20 | q = self.recovery 21 | ltp[x == 0, 0] = 1 - p[x == 0] 22 | ltp[x == 0, 1] = p[x == 0] 23 | ltp[x == 1, 0] = q 24 | ltp[x == 1, 1] = 1 - q 25 | return ltp 26 | 27 | def number_of_infected(self, x): 28 | return np.sum(x == 1) 29 | 30 | def nearly_dead_state(self, num_infected=None): 31 | num_infected = num_infected or 1 32 | x = np.zeros(self.num_nodes) 33 | i = np.random.choice(range(self.num_nodes), size=num_infected) 34 | x[i] = 1 35 | return x 36 | 37 | 38 | class SIR(StochasticEpidemics): 39 | def __init__(self, config=None, **kwargs): 40 | config = config or Config(**kwargs) 41 | StochasticEpidemics.__init__(self, config, 3) 42 | self.infection = config.infection 43 | self.recovery = config.recovery 44 | 45 | def predict(self, x): 46 | if len(x.shape) > 1: 47 | x = x[:, -1].squeeze() 48 | ltp = np.zeros((x.shape[0], self.num_states)) 49 | p = independent(self.neighbors_state(x)[1], self.infection) 50 | q = self.recovery 51 | ltp[x == 0, 0] = 1 - p[x == 0] 52 | ltp[x == 0, 1] = p[x == 0] 53 | ltp[x == 0, 2] = 0 54 | ltp[x == 1, 0] = 0 55 | ltp[x == 1, 1] = 1 - q 56 | ltp[x == 1, 2] = q 57 | ltp[x == 2, 0] = 0 58 | ltp[x == 2, 1] = 0 59 | ltp[x == 2, 2] = 1 60 | return ltp 61 | 62 | def number_of_infected(self, x): 63 | return np.sum(x == 1) 64 | 65 | def nearly_dead_state(self, num_infected=None): 66 | num_infected = num_infected or 1 67 | x = np.zeros(self.num_nodes) 68 | i = np.random.choice(range(self.num_nodes), size=num_infected) 69 | x[i] = 1 70 | return x 71 | -------------------------------------------------------------------------------- /dynalearn/dynamics/trainable/__init__.py: -------------------------------------------------------------------------------- 1 | from .stochastic_epidemics import * 2 | from .deterministic_epidemics import * 3 | from .incidence import * 4 | from .var import * 5 | from .kapoor import * 6 | -------------------------------------------------------------------------------- /dynalearn/dynamics/trainable/kapoor.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import time 4 | import torch 5 | 6 | from dynalearn.dynamics.incidence import ( 7 | IncidenceEpidemics, 8 | WeightedIncidenceEpidemics, 9 | MultiplexIncidenceEpidemics, 10 | WeightedMultiplexIncidenceEpidemics, 11 | ) 12 | from dynalearn.nn.models import Kapoor2020GNN 13 | from dynalearn.nn.optimizers import get as get_optimizer 14 | from dynalearn.util import to_edge_index 15 | from dynalearn.config import Config 16 | 17 | 18 | class KapoorDynamics(IncidenceEpidemics): 19 | def __init__(self, config=None, **kwargs): 20 | self.config = config or Config(**kwargs) 21 | IncidenceEpidemics.__init__(self, config) 22 | self.nn = Kapoor2020GNN(config) 23 | self.lag = self.nn.lag 24 | if torch.cuda.is_available(): 25 | self.nn = self.nn.cuda() 26 | 27 | def is_dead(self): 28 | return False 29 | 30 | def update(self, x): 31 | raise ValueError("This method is invalid for Trainable models.") 32 | 33 | def infection_rate(self, x): 34 | raise ValueError("This method is invalid for Trainable models.") 35 | 36 | def predict(self, x): 37 | if isinstance(x, np.ndarray): 38 | x = torch.Tensor(x) 39 | assert x.ndim == 3 40 | assert x.shape[1] == self.num_states 41 | assert x.shape[2] == self.lag 42 | x = self.nn.transformers["t_inputs"].forward(x) 43 | g = self.nn.transformers["t_networks"].forward(self.network) 44 | y = self.nn.transformers["t_targets"].backward(self.nn.forward(x, g)) 45 | return y.cpu().detach().numpy() 46 | -------------------------------------------------------------------------------- /dynalearn/dynamics/trainable/stochastic_epidemics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | import torch 4 | 5 | from dynalearn.dynamics.stochastic_epidemics import StochasticEpidemics 6 | from dynalearn.nn.models import StochasticEpidemicsGNN 7 | from dynalearn.config import Config 8 | 9 | 10 | class GNNSEDynamics(StochasticEpidemics): 11 | def __init__(self, config=None, **kwargs): 12 | self.config = config or Config(**kwargs) 13 | StochasticEpidemics.__init__(self, config, config.num_states) 14 | self.nn = StochasticEpidemicsGNN(self.config) 15 | if torch.cuda.is_available(): 16 | self.nn = self.nn.cuda() 17 | 18 | def predict(self, x): 19 | if isinstance(x, np.ndarray): 20 | x = torch.Tensor(x) 21 | assert x.ndim == 2 22 | assert x.shape[-1] == self.lag 23 | x = self.nn.transformers["t_inputs"].forward(x) 24 | g = self.nn.transformers["t_networks"].forward(self.network) 25 | y = self.nn.transformers["t_targets"].backward(self.nn.forward(x, g)) 26 | return y.cpu().detach().numpy() 27 | 28 | def number_of_infected(self, x): 29 | return np.inf 30 | 31 | def nearly_dead_state(self, **kwargs): 32 | return self.initial_state() 33 | -------------------------------------------------------------------------------- /dynalearn/experiments/__init__.py: -------------------------------------------------------------------------------- 1 | from .experiment import * 2 | from dynalearn.experiments.metrics import * 3 | -------------------------------------------------------------------------------- /dynalearn/experiments/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .metrics import * 2 | from .forecast import * 3 | from .ltp import * 4 | from .prediction import * 5 | from .statistics import * 6 | from .stationary import * 7 | from .getter import * 8 | -------------------------------------------------------------------------------- /dynalearn/experiments/metrics/getter.py: -------------------------------------------------------------------------------- 1 | from .metrics import CustomMetrics 2 | from .ltp import * 3 | from .prediction import * 4 | from .statistics import * 5 | from .stationary import * 6 | from .forecast import * 7 | from .attention import * 8 | 9 | __metrics__ = { 10 | "TrueLTPMetrics": TrueLTPMetrics, 11 | "GNNLTPMetrics": GNNLTPMetrics, 12 | "MLELTPMetrics": MLELTPMetrics, 13 | "PredictionMetrics": PredictionMetrics, 14 | "StatisticsMetrics": StatisticsMetrics, 15 | "TruePSSMetrics": TruePSSMetrics, 16 | "GNNPSSMetrics": GNNPSSMetrics, 17 | "TrueERSSMetrics": TrueERSSMetrics, 18 | "GNNERSSMetrics": GNNERSSMetrics, 19 | "TrueForecastMetrics": TrueForecastMetrics, 20 | "GNNForecastMetrics": GNNForecastMetrics, 21 | "VARForecastMetrics": VARForecastMetrics, 22 | "AttentionMetrics": AttentionMetrics, 23 | "AttentionStatesNMIMetrics": AttentionStatesNMIMetrics, 24 | "AttentionNodeAttrNMIMetrics": AttentionNodeAttrNMIMetrics, 25 | "AttentionEdgeAttrNMIMetrics": AttentionEdgeAttrNMIMetrics, 26 | } 27 | 28 | 29 | def get(config): 30 | names = config.names 31 | metrics = {} 32 | for n in names: 33 | if n in __metrics__: 34 | metrics[n] = __metrics__[n](config) 35 | else: 36 | metrics[n] = CustomMetrics(config) 37 | return metrics 38 | -------------------------------------------------------------------------------- /dynalearn/experiments/metrics/metrics.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy as np 3 | import tqdm 4 | 5 | from dynalearn.util import Verbose 6 | 7 | 8 | class Metrics: 9 | def __init__(self, config): 10 | self.config = config 11 | self.data = {} 12 | self.names = [] 13 | self.get_data = {} 14 | self.num_updates = 0 15 | 16 | def initialize(self, experiment): 17 | raise NotImplementedError() 18 | 19 | def exit(self, experiment): 20 | return 21 | 22 | def compute(self, experiment, verbose=Verbose()): 23 | self.verbose = verbose 24 | self.initialize(experiment) 25 | 26 | pb = self.verbose.progress_bar(self.__class__.__name__, self.num_updates) 27 | for k in self.names: 28 | d = self.get_data[k](pb=pb) 29 | if isinstance(d, dict): 30 | for kk, vv in d.items(): 31 | self.data[k + "/" + kk] = vv 32 | 33 | elif isinstance(d, (float, int, np.ndarray)): 34 | self.data[k] = d 35 | 36 | if pb is not None: 37 | pb.close() 38 | 39 | self.exit(experiment) 40 | 41 | def update(self, data): 42 | self.data.update(data) 43 | 44 | def save(self, h5file, name=None): 45 | if not isinstance(h5file, (h5py.File, h5py.Group)): 46 | raise ValueError("Dataset file format must be HDF5.") 47 | 48 | name = name or self.__class__.__name__ 49 | 50 | for k, v in self.data.items(): 51 | path = name + "/" + str(k) 52 | if path in h5file: 53 | del h5file[path] 54 | h5file.create_dataset(path, data=v) 55 | 56 | def load(self, h5file, name=None): 57 | if not isinstance(h5file, (h5py.File, h5py.Group)): 58 | raise ValueError("Dataset file format must be HDF5.") 59 | 60 | name = name or self.__class__.__name__ 61 | 62 | if name in h5file: 63 | self.data = self.read_h5_recursively(h5file[name]) 64 | 65 | def read_h5_recursively(self, h5file, prefix=""): 66 | ans_dict = {} 67 | for key in h5file: 68 | item = h5file[key] 69 | if prefix == "": 70 | path = f"{key}" 71 | else: 72 | path = f"{prefix}/{key}" 73 | 74 | if isinstance(item, h5py.Dataset): 75 | ans_dict[path] = item[...] 76 | elif isinstance(item, h5py.Group): 77 | d = self.read_h5_recursively(item, path) 78 | ans_dict.update(d) 79 | else: 80 | raise ValueError() 81 | return ans_dict 82 | 83 | 84 | class CustomMetrics(Metrics): 85 | def initialize(self, experiment): 86 | return 87 | -------------------------------------------------------------------------------- /dynalearn/experiments/metrics/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .initializer import * 2 | from .model_sampler import * 3 | from .statistics import * 4 | from .mutual_info import * 5 | -------------------------------------------------------------------------------- /dynalearn/experiments/metrics/util/initializer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Initializer: 5 | def __init__(self, config): 6 | self.config = config 7 | self._init_param = config.init_param 8 | self.current_param = self._init_param.copy() 9 | self.init_epsilon = config.init_epsilon 10 | self.adaptive = config.adaptive 11 | self.all_modes = list(self._init_param.keys()) 12 | self.num_modes = len(self._init_param.keys()) 13 | self._mode = self.all_modes[0] 14 | self.mode_index = 0 15 | 16 | def __call__(self): 17 | _x0 = self.dynamics.initial_state(init_param=self.current_param[self.mode]) 18 | x0 = np.zeros((*_x0.shape, self.lag * self.lagstep)) 19 | x0.T[0] = _x0.T 20 | for i in range(1, self.lag * self.lagstep): 21 | x0.T[i] = self.dynamics.sample(x0[i - 1]).T 22 | return x0 23 | 24 | def setUp(self, metrics): 25 | self.dynamics = metrics.dynamics 26 | self.num_states = metrics.model.num_states 27 | self.lag = metrics.model.lag 28 | self.lagstep = metrics.model.lagstep 29 | 30 | def update(self, x): 31 | assert x.shape == (self.num_states,) 32 | if self.adaptive: 33 | x[x < self.init_epsilon] = self.init_epsilon 34 | self.current_param[self.mode] = x 35 | self.current_param[self.mode] /= self.current_param[self.mode].sum() 36 | 37 | def next_mode(self): 38 | self.mode_index += 1 39 | if self.mode_index == len(self.all_modes): 40 | self.mode_index = 0 41 | self.mode = self.all_modes[self.mode_index] 42 | return self.mode 43 | 44 | @property 45 | def mode(self): 46 | return self._mode 47 | 48 | @mode.setter 49 | def mode(self, mode): 50 | if mode in self.all_modes: 51 | self._mode = mode 52 | else: 53 | raise ValueError( 54 | f"Mode `{mode}` is not invalid, available modes are `{self.all_modes}`" 55 | ) 56 | -------------------------------------------------------------------------------- /dynalearn/experiments/metrics/util/mutual_info.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from scipy.special import digamma 4 | from sklearn.neighbors import NearestNeighbors, KDTree 5 | 6 | 7 | def mutual_info(x, y, n_neighbors=3, metric="euclidean"): 8 | n_samples = x.shape[0] 9 | if n_samples != y.shape[0]: 10 | raise ValueError( 11 | f"Invalid shapes: got {x.shape} for `x` and {y.shape} for `y`." 12 | ) 13 | xy = np.hstack((x, y)) 14 | 15 | # Here we rely on NearestNeighbors to select the fastest algorithm. 16 | nn = NearestNeighbors(metric=metric, n_neighbors=n_neighbors) 17 | 18 | nn.fit(xy) 19 | radius = nn.kneighbors()[0] 20 | radius = np.nextafter(radius[:, -1], 0) 21 | 22 | # KDTree is explicitly fit to allow for the querying of number of 23 | # neighbors within a specified radius 24 | kd = KDTree(x, metric=metric) 25 | nx = kd.query_radius(x, radius, count_only=True, return_distance=False) 26 | nx = np.array(nx) - 1.0 27 | 28 | kd = KDTree(y, metric=metric) 29 | ny = kd.query_radius(y, radius, count_only=True, return_distance=False) 30 | ny = np.array(ny) - 1.0 31 | 32 | mi = ( 33 | digamma(n_samples) 34 | + digamma(n_neighbors) 35 | - np.mean(digamma(nx + 1)) 36 | - np.mean(digamma(ny + 1)) 37 | ) 38 | return max(0, mi) 39 | -------------------------------------------------------------------------------- /dynalearn/experiments/metrics/util/statistics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Statistics: 5 | def __call__(self, x): 6 | if isinstance(x, list): 7 | x = np.array(x) 8 | return x 9 | 10 | @classmethod 11 | def getter(self, config): 12 | __all_statistics__ = { 13 | "Statistics": Statistics, 14 | "MeanVarStatistics": MeanVarStatistics, 15 | } 16 | 17 | if config.statistics in __all_statistics__: 18 | return __all_statistics__[config.statistics]() 19 | else: 20 | raise ValueError( 21 | f"`{config.statistics}` is invalid, valid entries are `{__all_statistics__.key()}`" 22 | ) 23 | 24 | def avg(self, s): 25 | y = np.mean(s, axis=(0, 1)) 26 | assert y.shape == (s.shape[-1],) 27 | return y 28 | 29 | 30 | class MeanVarStatistics(Statistics): 31 | def __call__(self, x): 32 | if isinstance(x, list): 33 | x = np.array(x) 34 | s = np.zeros((2, x.shape[-1])) 35 | s[0] = np.mean(x, axis=0) 36 | s[1] = np.var(x, axis=0) 37 | return s 38 | 39 | def avg(self, s): 40 | y = s[0] 41 | assert y.shape == (s.shape[-1],) 42 | return y 43 | -------------------------------------------------------------------------------- /dynalearn/networks/__init__.py: -------------------------------------------------------------------------------- 1 | from .network import * 2 | from .transform import * 3 | from .weight import * 4 | from .generator import * 5 | from .random import * 6 | from .getter import * 7 | -------------------------------------------------------------------------------- /dynalearn/networks/generator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import networkx as nx 3 | 4 | from dynalearn.config import Config 5 | 6 | 7 | class NetworkGenerator: 8 | def __init__(self, config=None): 9 | config = config or Config() 10 | self._config = config 11 | self.is_weighted = False 12 | self.is_multiplex = False 13 | if "num_nodes" in config.__dict__: 14 | self.num_nodes = config.num_nodes 15 | else: 16 | self.num_nodes = None 17 | if "layers" in config.__dict__: 18 | self.layers = config.layers 19 | self.is_multiplex = True 20 | else: 21 | self.layers = None 22 | 23 | def generate(self, seed): 24 | raise NotImplementedError() 25 | 26 | @property 27 | def config(self): 28 | return self._config 29 | -------------------------------------------------------------------------------- /dynalearn/networks/getter.py: -------------------------------------------------------------------------------- 1 | from .random import * 2 | from .weight import * 3 | from .transform import * 4 | 5 | 6 | __networks__ = { 7 | "GNPNetworkGenerator": GNPNetworkGenerator, 8 | "GNMNetworkGenerator": GNMNetworkGenerator, 9 | "BANetworkGenerator": BANetworkGenerator, 10 | "ConfigurationNetworkGenerator": ConfigurationNetworkGenerator, 11 | } 12 | 13 | __weights__ = { 14 | "EmptyWeightGenerator": EmptyWeightGenerator, 15 | "UniformWeightGenerator": UniformWeightGenerator, 16 | "LogUniformWeightGenerator": LogUniformWeightGenerator, 17 | "NormalWeightGenerator": NormalWeightGenerator, 18 | "LogNormalWeightGenerator": LogNormalWeightGenerator, 19 | "DegreeWeightGenerator": DegreeWeightGenerator, 20 | "BetweennessWeightGenerator": BetweennessWeightGenerator, 21 | } 22 | 23 | __transforms__ = {"SparcifierTransform": SparcifierTransform} 24 | 25 | 26 | def get(config): 27 | name = config.name 28 | weight_gen = None 29 | transforms = [] 30 | 31 | if "weights" in config.__dict__: 32 | if config.weights.name in __weights__: 33 | weight_gen = __weights__[config.weights.name](config.weights) 34 | else: 35 | raise ValueError( 36 | f"{config.weights.name} is invalid, possible entries are {list(__weights__.keys())}" 37 | ) 38 | 39 | if "transforms" in config.__dict__: 40 | assert isinstance(config.transforms.names, list) 41 | for n in config.transforms.names: 42 | if n in __transforms__: 43 | transforms.append(__transforms__[n](config.transforms)) 44 | else: 45 | raise ValueError( 46 | f"{n} is invalid, possible entries are {list(__transforms__.keys())}" 47 | ) 48 | 49 | if name in __networks__: 50 | return __networks__[name](config, weight_gen, transforms) 51 | else: 52 | raise ValueError( 53 | f"{name} is invalid, possible entries are {list(__networks__.keys())}" 54 | ) 55 | -------------------------------------------------------------------------------- /dynalearn/networks/random.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | 4 | from dynalearn.config import Config 5 | from .network import Network, MultiplexNetwork 6 | from .generator import NetworkGenerator 7 | from .transform import NetworkTransformList 8 | from .weight import EmptyWeightGenerator 9 | 10 | 11 | class RandomNetworkGenerator(NetworkGenerator): 12 | def __init__(self, config=None, weights=None, transforms=[], **kwargs): 13 | config = config or Config(**kwargs) 14 | NetworkGenerator.__init__(self, config) 15 | self.weights = weights or EmptyWeightGenerator() 16 | if isinstance(self.weights, EmptyWeightGenerator): 17 | self.is_weighted = False 18 | else: 19 | self.is_weighted = True 20 | self.transforms = NetworkTransformList(transforms) 21 | 22 | def generate(self, seed=None): 23 | if seed is None: 24 | seed = np.random.randint(2 ** 31) 25 | if self.layers is not None: 26 | g = {} 27 | for l in self.layers: 28 | g[l] = self.network(seed) 29 | if self.weights is not None: 30 | g[l] = self.weights(g[l]) 31 | g[l] = self.transforms(g[l]) 32 | return MultiplexNetwork(data=g) 33 | 34 | else: 35 | g = self.network(seed) 36 | if self.weights is not None: 37 | g = self.weights(g) 38 | g = self.transforms(g) 39 | return Network(data=g) 40 | 41 | 42 | class GNPNetworkGenerator(RandomNetworkGenerator): 43 | def network(self, seed=None): 44 | return nx.gnp_random_graph(self.num_nodes, self.config.p, seed=seed) 45 | 46 | 47 | class GNMNetworkGenerator(RandomNetworkGenerator): 48 | def network(self, seed=None): 49 | return nx.gnm_random_graph(self.num_nodes, self.config.m, seed=seed) 50 | 51 | 52 | class BANetworkGenerator(RandomNetworkGenerator): 53 | def network(self, seed=None): 54 | return nx.barabasi_albert_graph(self.num_nodes, self.config.m, seed) 55 | 56 | 57 | class ConfigurationNetworkGenerator(RandomNetworkGenerator): 58 | def __init__(self, config=None, weights=None, **kwargs): 59 | config = config or Config(**kwargs) 60 | RandomNetworkGenerator.__init__(self, config, weights=weights, **kwargs) 61 | self.p_k = config.p_k 62 | if "maxiter" in config.__dict__: 63 | self.maxiter = config.maxiter 64 | else: 65 | self.maxiter = 100 66 | 67 | def network(self, seed=None): 68 | if "maxiter" in self.config.__dict__: 69 | maxiter = self.config.maxiter 70 | else: 71 | maxiter = 100 72 | it = 0 73 | while it < maxiter: 74 | seq = self.p_k.sample(self.num_nodes) 75 | if np.sum(seq) % 2 == 0: 76 | g = nx.expected_degree_graph(seq, seed=seed) 77 | return g 78 | it += 1 79 | raise ValueError("Invalid degree sequence.") 80 | -------------------------------------------------------------------------------- /dynalearn/networks/transform.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from random import sample 4 | 5 | 6 | class NetworkTransform: 7 | def __init__(self, config): 8 | self.config = config 9 | 10 | def __call__(self, g): 11 | return g 12 | 13 | 14 | class NetworkTransformList: 15 | def __init__(self, transforms=[]): 16 | self.transforms = transforms 17 | 18 | def __call__(self, g): 19 | for t in self.transforms: 20 | g = t(g) 21 | return g 22 | 23 | 24 | class SparcifierTransform(NetworkTransform): 25 | def __call__(self, g): 26 | _g = g.copy() 27 | for i in range(self.config.maxiter): 28 | if self.config.p == -1: 29 | p = np.random.rand() 30 | p = 1 - np.log((1 - p) + np.exp(1) * p) 31 | else: 32 | p = self.config.p 33 | num_edges = np.random.binomial(_g.number_of_edges(), p) 34 | removed_edges = sample(_g.edges, num_edges) 35 | _g.remove_edges_from(removed_edges) 36 | if _g.number_of_edges() == 0: 37 | _g = g.copy() 38 | else: 39 | break 40 | return _g 41 | -------------------------------------------------------------------------------- /dynalearn/nn/__init__.py: -------------------------------------------------------------------------------- 1 | import dynalearn.nn.callbacks 2 | import dynalearn.nn.optimizers 3 | import dynalearn.nn.models 4 | from .history import * 5 | from .loss import * 6 | from .metrics import * 7 | from .transformers import * 8 | -------------------------------------------------------------------------------- /dynalearn/nn/activation.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | __activations__ = { 4 | "sigmoid": nn.Sigmoid(), 5 | "softmax": nn.Softmax(dim=-1), 6 | "relu": nn.ReLU(), 7 | "tanh": nn.Tanh(), 8 | "elu": nn.ELU(), 9 | "identity": nn.Identity(), 10 | } 11 | 12 | 13 | def get(name): 14 | if name in __activations__: 15 | return __activations__[name] 16 | else: 17 | raise ValueError( 18 | f"{name} is invalid, possible entries are {list(__activations__.keys())}" 19 | ) 20 | -------------------------------------------------------------------------------- /dynalearn/nn/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .callbacks import * 2 | from .best_model_restore import * 3 | from .lr_scheduler import * 4 | from .getter import * 5 | -------------------------------------------------------------------------------- /dynalearn/nn/callbacks/best_model_restore.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from .callbacks import Callback 4 | from dynalearn.util import Verbose 5 | 6 | 7 | class BestModelRestore(Callback): 8 | def __init__(self, *, monitor="val_loss", mode="min", verbose=Verbose()): 9 | super().__init__() 10 | self.monitor = monitor 11 | 12 | if mode not in ["min", "max"]: 13 | raise ValueError("Invalid mode '%s'" % mode) 14 | if mode == "min": 15 | self.monitor_op = lambda x, y: x < y 16 | self.current_best = float("Inf") 17 | elif mode == "max": 18 | self.monitor_op = lambda x, y: x > y 19 | self.current_best = -float("Inf") 20 | self.best_weights = None 21 | self.verbose = verbose 22 | 23 | def on_epoch_end(self, epoch_number, logs): 24 | if self.monitor_op(logs[self.monitor], self.current_best): 25 | old_best = self.current_best 26 | self.current_best = logs[self.monitor] 27 | 28 | self.verbose( 29 | "Epoch %d: %s improved from %0.5f to %0.5f" 30 | % (epoch_number, self.monitor, old_best, self.current_best) 31 | ) 32 | self.best_weights = self.get_weight_copies() 33 | 34 | def on_train_end(self, logs): 35 | if self.best_weights is not None: 36 | self.verbose("Restoring best model") 37 | self.model.set_weights(self.best_weights) 38 | else: 39 | warnings.warn("No weights to restore!") 40 | 41 | def get_weight_copies(self): 42 | weights = self.model.get_weights() 43 | for k in weights: 44 | weights[k] = weights[k].cpu().clone() 45 | return weights 46 | -------------------------------------------------------------------------------- /dynalearn/nn/callbacks/callbacks.py: -------------------------------------------------------------------------------- 1 | class CallbackList: 2 | def __init__(self, callbacks=None): 3 | callbacks = callbacks or [] 4 | self.callbacks = list(callbacks) 5 | 6 | def append(self, callback): 7 | self.callbacks.append(callback) 8 | 9 | def set_params(self, params): 10 | for callback in self.callbacks: 11 | callback.set_params(params) 12 | 13 | def set_model(self, model): 14 | for callback in self.callbacks: 15 | callback.set_model(model) 16 | 17 | def on_epoch_begin(self, epoch_number, logs=None): 18 | logs = logs or {} 19 | for callback in self.callbacks: 20 | callback.on_epoch_begin(epoch_number, logs) 21 | 22 | def on_epoch_end(self, epoch_number, logs=None): 23 | logs = logs or {} 24 | for callback in self.callbacks: 25 | callback.on_epoch_end(epoch_number, logs) 26 | 27 | def on_batch_begin(self, batch_number, logs=None): 28 | logs = logs or {} 29 | for callback in self.callbacks: 30 | callback.on_batch_begin(batch_number, logs) 31 | 32 | def on_batch_end(self, batch_number, logs=None): 33 | logs = logs or {} 34 | for callback in self.callbacks: 35 | callback.on_batch_end(batch_number, logs) 36 | 37 | def on_backward_end(self, batch_number): 38 | for callback in self.callbacks: 39 | callback.on_backward_end(batch_number) 40 | 41 | def on_train_begin(self, logs=None): 42 | logs = logs or {} 43 | for callback in self.callbacks: 44 | callback.on_train_begin(logs) 45 | 46 | def on_train_end(self, logs=None): 47 | logs = logs or {} 48 | for callback in self.callbacks: 49 | callback.on_train_end(logs) 50 | 51 | def __iter__(self): 52 | return iter(self.callbacks) 53 | 54 | 55 | class Callback: 56 | def __init__(self): 57 | self.model = None 58 | 59 | def set_params(self, params): 60 | self.params = params 61 | 62 | def set_model(self, model): 63 | self.model = model 64 | 65 | def on_epoch_begin(self, epoch_number, logs): 66 | pass 67 | 68 | def on_epoch_end(self, epoch_number, logs): 69 | pass 70 | 71 | def on_batch_begin(self, batch_number, logs): 72 | pass 73 | 74 | def on_batch_end(self, batch_number, logs): 75 | pass 76 | 77 | def on_backward_end(self, batch_number): 78 | pass 79 | 80 | def on_train_begin(self, logs): 81 | pass 82 | 83 | def on_train_end(self, logs): 84 | pass 85 | -------------------------------------------------------------------------------- /dynalearn/nn/callbacks/checkpoint.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from .periodic import PeriodicSaveCallback 4 | from .lr_scheduler import _PyTorchLRSchedulerWrapper, ReduceLROnPlateau 5 | 6 | 7 | class ModelCheckpoint(PeriodicSaveCallback): 8 | """ 9 | Save the model after every epoch. See 10 | :class:`~poutyne.framework.callbacks.PeriodicSaveCallback` for the arguments' descriptions. 11 | Args: 12 | restore_best (bool): If `restore_best` is true, the weights of the network will be reset to 13 | the last best checkpoint done. This option only works when `save_best_only` is also true. 14 | (Default value = False) 15 | See: 16 | :class:`~poutyne.framework.callbacks.PeriodicSaveCallback` 17 | """ 18 | 19 | def __init__(self, *args, restore_best=False, **kwargs): 20 | super().__init__(*args, **kwargs) 21 | 22 | self.restore_best = restore_best 23 | if self.restore_best and not self.save_best_only: 24 | raise ValueError( 25 | "The 'restore_best' argument only works when 'save_best_only' is also true." 26 | ) 27 | 28 | def save_file(self, fd, epoch_number, logs): 29 | self.model.save_weights(fd) 30 | 31 | def on_train_end(self, logs): 32 | if self.restore_best: 33 | if self.best_filename is not None: 34 | self.verbose("Restoring model from %s" % self.best_filename) 35 | self.model.load_weights(self.best_filename) 36 | else: 37 | warnings.warn("No weights to restore!") 38 | 39 | 40 | class OptimizerCheckpoint(PeriodicSaveCallback): 41 | """ 42 | Save the state of the optimizer after every epoch. The optimizer can be reloaded as follows. 43 | .. code-block:: python 44 | model = Model(model, optimizer, loss_function) 45 | model.load_optimizer_state(filename) 46 | See :class:`~poutyne.framework.callbacks.PeriodicSaveCallback` for the arguments' descriptions. 47 | See: 48 | :class:`~poutyne.framework.callbacks.PeriodicSaveCallback` 49 | """ 50 | 51 | def save_file(self, fd, epoch_number, logs): 52 | self.model.save_optimizer_state(fd) 53 | -------------------------------------------------------------------------------- /dynalearn/nn/callbacks/getter.py: -------------------------------------------------------------------------------- 1 | from .callbacks import Callback, CallbackList 2 | from .checkpoint import * 3 | from .lr_scheduler import * 4 | from . import lr_scheduler 5 | from .lr_scheduler import * 6 | from torch.optim.lr_scheduler import _LRScheduler 7 | 8 | 9 | import inspect 10 | 11 | __callbacks__ = { 12 | "Callback": lambda config: Callback(), 13 | "CallbackList": lambda config: CallbackList(), 14 | "BestModelRestore": lambda config: BestModelRestore(), 15 | "ModelCheckpoint": lambda config: ModelCheckpoint( 16 | config.path_to_best, save_best_only=True 17 | ), 18 | "OptimizerCheckpoint": lambda config: OptimizerCheckpoint( 19 | config.path_to_best, save_best_only=True 20 | ), 21 | "LambdaLR": lambda config: LambdaLR(config.lr_lambda), 22 | "MultiplicativeLR": lambda config: MultiplicativeLR(config.lr_lambda), 23 | "StepLR": lambda config: StepLR(config.step_size, gamma=config.gamma), 24 | "MultiStepLR": lambda config: MultiStepLR(config.milestones, gamma=config.gamma), 25 | "ExponentialLR": lambda config: ExponentialLR(config.gamma), 26 | "CosineAnnealingLR": lambda config: CosineAnnealingLR( 27 | confg.t_max, eta_min=config.eta_min 28 | ), 29 | } 30 | 31 | 32 | def get(config): 33 | names = config.names 34 | callbacks = [] 35 | for n in names: 36 | if n in __callbacks__: 37 | callbacks.append(__callbacks__[n](config)) 38 | else: 39 | raise ValueError( 40 | f"{name} is invalid, possible entries are {list(__callbacks__.keys())}" 41 | ) 42 | return callbacks 43 | -------------------------------------------------------------------------------- /dynalearn/nn/callbacks/periodic.py: -------------------------------------------------------------------------------- 1 | from .callbacks import Callback 2 | from .util import atomic_lambda_save 3 | from dynalearn.util import Verbose 4 | 5 | 6 | class PeriodicSaveCallback(Callback): 7 | def __init__( 8 | self, 9 | filename, 10 | *, 11 | monitor="val_loss", 12 | mode="min", 13 | save_best_only=False, 14 | period=1, 15 | verbose=Verbose(), 16 | temporary_filename=None, 17 | atomic_write=True, 18 | open_mode="wb" 19 | ): 20 | super().__init__() 21 | self.filename = filename 22 | self.monitor = monitor 23 | self.verbose = verbose 24 | self.save_best_only = save_best_only 25 | self.temporary_filename = temporary_filename 26 | self.atomic_write = atomic_write 27 | self.open_mode = open_mode 28 | self.best_filename = None 29 | 30 | if self.save_best_only: 31 | if mode not in ["min", "max"]: 32 | raise ValueError("Invalid mode '%s'" % mode) 33 | if mode == "min": 34 | self.monitor_op = lambda x, y: x < y 35 | self.current_best = float("Inf") 36 | elif mode == "max": 37 | self.monitor_op = lambda x, y: x > y 38 | self.current_best = -float("Inf") 39 | 40 | self.period = period 41 | 42 | def save_file(self, fd, epoch_number, logs): 43 | raise NotImplementedError 44 | 45 | def _save_file(self, filename, epoch_number, logs): 46 | atomic_lambda_save( 47 | filename, 48 | self.save_file, 49 | (epoch_number, logs), 50 | temporary_filename=self.temporary_filename, 51 | open_mode=self.open_mode, 52 | atomic=self.atomic_write, 53 | ) 54 | 55 | def on_epoch_end(self, epoch_number, logs): 56 | filename = self.filename.format_map(logs) 57 | 58 | if self.save_best_only: 59 | if self.monitor_op(logs[self.monitor], self.current_best): 60 | old_best = self.current_best 61 | self.current_best = logs[self.monitor] 62 | self.best_filename = filename 63 | 64 | self.verbose( 65 | "%s improved from %0.5f to %0.5f, saving file to %s" 66 | % (self.monitor, old_best, self.current_best, self.best_filename) 67 | ) 68 | self._save_file(self.best_filename, epoch_number, logs) 69 | elif epoch_number % self.period == 0: 70 | self.verbose("Epoch %d: saving file to %s" % (epoch_number, filename)) 71 | self._save_file(filename, epoch_number, logs) 72 | 73 | 74 | class PeriodicSaveLambda(PeriodicSaveCallback): 75 | """ 76 | Call a lambda with a file descriptor after every epoch. See 77 | :class:`~poutyne.framework.callbacks.PeriodicSaveCallback` for the arguments' 78 | descriptions. 79 | 80 | Args: 81 | func (Callable[[fd, int, dict], None]): The lambda that will be called 82 | with a file descriptor, the epoch number and the epoch logs. 83 | 84 | See: 85 | :class:`~poutyne.framework.callbacks.PeriodicSaveCallback` 86 | """ 87 | 88 | def __init__(self, func, *args, **kwargs): 89 | super().__init__(*args, **kwargs) 90 | self.func = func 91 | 92 | def save_file(self, fd, epoch_number, logs): 93 | self.func(fd, epoch_number, logs) 94 | -------------------------------------------------------------------------------- /dynalearn/nn/callbacks/util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import warnings 4 | 5 | 6 | def atomic_lambda_save(filename, save_lambda, args, *, temporary_filename=None, open_mode='w', atomic=True): 7 | if atomic: 8 | fd = None 9 | if temporary_filename is not None: 10 | fd = open(temporary_filename, open_mode) 11 | tmp_filename = temporary_filename 12 | else: 13 | fd = tempfile.NamedTemporaryFile(mode=open_mode, delete=False) 14 | tmp_filename = fd.name 15 | 16 | with fd: 17 | save_lambda(fd, *args) 18 | 19 | try: 20 | os.replace(tmp_filename, filename) 21 | except OSError as e: 22 | # This may happen if the temp filesystem is not the same as the final destination's. 23 | warnings.warn("Impossible to move the file to its final destination: " 24 | "os.replace(%s, %s) -> %s" % (tmp_filename, filename, e)) 25 | os.remove(tmp_filename) 26 | 27 | warnings.warn('Saving %s non-atomically instead.' % filename) 28 | with open(filename, open_mode) as fd: 29 | save_lambda(fd, *args) 30 | else: 31 | with open(filename, open_mode) as fd: 32 | save_lambda(fd, *args) 33 | -------------------------------------------------------------------------------- /dynalearn/nn/history.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import numpy as np 3 | 4 | 5 | class History: 6 | def __init__(self): 7 | self.epoch = 0 8 | self.batch = 0 9 | self._epoch_logs = {} 10 | self._batch_logs = {} 11 | 12 | def reset(self): 13 | self.epoch = 0 14 | self.batch = 0 15 | self._epoch_logs = {} 16 | self._batch_logs = {} 17 | 18 | def update_epoch(self, logs): 19 | self._epoch_logs[self.epoch] = logs 20 | self.epoch += 1 21 | 22 | def update_batch(self, logs): 23 | self._batch_logs[self.batch] = logs 24 | self.batch += 1 25 | 26 | def display(self, epoch=None): 27 | if epoch is None: 28 | epoch = self.epoch - 1 29 | log_str = "" 30 | for k, v in self._epoch_logs[epoch].items(): 31 | if k is not "epoch": 32 | log_str += f"{k}: {v:.4f}\t" 33 | 34 | return f"\t{log_str}" 35 | 36 | def save(self, file): 37 | data = {"epoch_logs": self._epoch_logs, "batch_logs": self._batch_logs} 38 | pickle.dump(data, file, indent=4) 39 | 40 | def load(self, file): 41 | data = pickle.load(file) 42 | if "epoch_logs" in dat: 43 | self._epoch_logs.update(data["epoch_logs"]) 44 | if "batch_logs" in dat: 45 | self._batch_logs.update(data["batch_logs"]) 46 | 47 | self.epoch = max(self._epoch_logs.keys()) + 1 48 | self.batch = max(self._batch_logs.keys()) + 1 49 | 50 | def get_from_epoch_logs(self, key): 51 | epochs = list(self._epoch_logs.keys()) 52 | logs = [] 53 | for e, log in self._epoch_logs.items(): 54 | if key in log: 55 | logs.append(log[key]) 56 | else: 57 | logs.append(np.nan) 58 | 59 | return epochs, logs 60 | 61 | def get_from_batch_logs(self, key): 62 | batches = list(self._batch_logs.keys()) 63 | logs = [] 64 | for b, log in self._batch_logs.items(): 65 | if key in log: 66 | logs.append(log[key]) 67 | else: 68 | logs.append(np.nan) 69 | 70 | return batches, logs 71 | -------------------------------------------------------------------------------- /dynalearn/nn/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from dynalearn.util import onehot 5 | 6 | 7 | def weighted_cross_entropy(y_true, y_pred, weights=None): 8 | if weights is None: 9 | weights = torch.ones([y_true.size(i) for i in range(y_true.dim() - 1)]) 10 | if torch.cuda.is_available(): 11 | y_pred = y_pred.cuda() 12 | y_true = y_true.cuda() 13 | weights = weights.cuda() 14 | weights /= weights.sum() 15 | y_pred = torch.clamp(y_pred, 1e-15, 1 - 1e-15) 16 | loss = weights * (-y_true * torch.log(y_pred)).sum(-1) 17 | return loss.sum() 18 | 19 | 20 | def weighted_dkl(y_true, y_pred, weights=None): 21 | if weights is None: 22 | weights = torch.ones([y_true.size(i) for i in range(y_true.dim() - 1)]) 23 | if torch.cuda.is_available(): 24 | y_pred = y_pred.cuda() 25 | y_true = y_true.cuda() 26 | weights = weights.cuda() 27 | weights = weights / torch.sum(weights) 28 | y_true = torch.clamp(y_true, 1e-15, 1 - 1e-15) 29 | y_pred = torch.clamp(y_pred, 1e-15, 1 - 1e-15) 30 | loss = weights * torch.sum(y_true * (torch.log(y_true) - torch.log(y_pred)), -1) 31 | return torch.sum(loss) 32 | 33 | 34 | def weighted_jsd(y_true, y_pred, weights=None): 35 | m = 0.5 * (y_true + y_pred) 36 | return weighted_dkl(y_true, m, weights=weights) + weighted_dkl( 37 | y_pred, m, weights=weights 38 | ) 39 | 40 | 41 | def weighted_mse(y_true, y_pred, weights=None): 42 | if weights is None: 43 | weights = torch.ones([y_true.size(i) for i in range(y_true.dim() - 1)]) 44 | weights /= weights.sum() 45 | if torch.cuda.is_available(): 46 | y_pred = y_pred.cuda() 47 | y_true = y_true.cuda() 48 | weights = weights.cuda() 49 | loss = weights * torch.sum((y_true - y_pred) ** 2, axis=-1) 50 | return loss.sum() 51 | 52 | 53 | __losses__ = { 54 | "weighted_cross_entropy": weighted_cross_entropy, 55 | "weighted_mse": weighted_mse, 56 | "cross_entropy": torch.nn.CrossEntropyLoss(), 57 | "mse": torch.nn.MSELoss(), 58 | } 59 | 60 | 61 | def get(loss): 62 | if loss in __losses__: 63 | return __losses__[loss] 64 | else: 65 | raise ValueError( 66 | f"{name} is invalid, possible entries are {list(__losses__.keys())}" 67 | ) 68 | -------------------------------------------------------------------------------- /dynalearn/nn/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from sklearn.metrics import r2_score 4 | from dynalearn.util import onehot 5 | from .loss import weighted_cross_entropy 6 | 7 | EPSILON = 1e-8 8 | 9 | 10 | def model_entropy(y_true, y_pred, weights=None): 11 | y_pred = torch.clamp(y_pred, EPSILON, 1 - EPSILON) 12 | if weights is None: 13 | x = -torch.mean((y_pred * torch.log(y_pred)).sum(-1)) 14 | else: 15 | weights /= weights.sum() 16 | x = -torch.sum(weights * (y_pred * torch.log(y_pred)).sum(-1)) 17 | return x 18 | 19 | 20 | def relative_entropy(y_true, y_pred, weights=None): 21 | if y_true.dim() + 1 == y_pred.dim(): 22 | y_true = onehot(y_pred, y_true.size(-1)) 23 | y_true = torch.clamp(y_true, EPSILON, 1 - EPSILON) 24 | y_pred = torch.clamp(y_pred, EPSILON, 1 - EPSILON) 25 | cross_entropy = weighted_cross_entropy(y_true, y_pred, weights=weights) 26 | entropy = weighted_cross_entropy(y_true, y_true, weights=weights) 27 | return cross_entropy - entropy 28 | 29 | 30 | def approx_relative_entropy(y_true, y_pred, weights=None): 31 | if y_true.dim() + 1 == y_pred.dim(): 32 | y_true = onehot(y_pred, y_true.size(-1)) 33 | y_true = torch.clamp(y_true, EPSILON, 1 - EPSILON) 34 | y_pred = torch.clamp(y_pred, EPSILON, 1 - EPSILON) 35 | cross_entropy = weighted_cross_entropy(y_true, y_pred, weights=weights) 36 | entropy = weighted_cross_entropy(y_pred, y_pred, weights=weights) 37 | return entropy - cross_entropy 38 | 39 | 40 | def jensenshannon(y_true, y_pred, weights=None): 41 | if y_true.dim() + 1 == y_pred.dim(): 42 | y_true = onehot(y_true, y_pred.size(-1)) 43 | y_true = torch.clamp(y_true, EPSILON, 1 - EPSILON) 44 | y_pred = torch.clamp(y_pred, EPSILON, 1 - EPSILON) 45 | m = 0.5 * (y_true + y_pred) 46 | return 0.5 * (relative_entropy(y_true, m) + relative_entropy(y_pred, m)) 47 | 48 | 49 | def acc(y_true, y_pred, weights=None): 50 | x = y_true.cpu().detach().numpy() 51 | y = y_pred.cpu().detach().numpy() 52 | a = r2_score(x, y) 53 | return torch.tensor(a, dtype=torch.float) 54 | 55 | 56 | __metrics__ = { 57 | "model_entropy": model_entropy, 58 | "relative_entropy": relative_entropy, 59 | "approx_relative_entropy": approx_relative_entropy, 60 | "jensenshannon": jensenshannon, 61 | "acc": acc, 62 | } 63 | 64 | 65 | def get(names): 66 | metrics = {} 67 | for n in names: 68 | if n in __metrics__: 69 | metrics[n] = __metrics__[n] 70 | else: 71 | raise ValueError( 72 | f"{name} is invalid, possible entries are {list(__metrics__.keys())}" 73 | ) 74 | return metrics 75 | -------------------------------------------------------------------------------- /dynalearn/nn/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .dgat import * 2 | from .model import * 3 | from .gnn import * 4 | from .kapoor import * 5 | from .stochastic_epidemics import * 6 | from .deterministic_epidemics import * 7 | from .incidence import * 8 | from .reaction_diffusion import * 9 | from .propagator import * 10 | -------------------------------------------------------------------------------- /dynalearn/nn/models/deterministic_epidemics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .gnn import GraphNeuralNetwork 5 | from dynalearn.config import Config 6 | from dynalearn.nn.loss import weighted_cross_entropy 7 | 8 | 9 | class DeterministicEpidemicsGNN(GraphNeuralNetwork): 10 | def __init__(self, config=None, **kwargs): 11 | if config is None: 12 | config = Config() 13 | config.__dict__ = kwargs 14 | if "is_weighted" in config.__dict__ and config.is_weighted: 15 | edgeattr_size = 1 16 | else: 17 | edgeattr_size = 0 18 | self.num_states = config.num_states 19 | GraphNeuralNetwork.__init__( 20 | self, 21 | config.num_states, 22 | config.num_states, 23 | lag=config.lag, 24 | nodeattr_size=1, 25 | edgeattr_size=edgeattr_size, 26 | out_act="softmax", 27 | config=config, 28 | **kwargs 29 | ) 30 | 31 | def loss(self, y_true, y_pred, weights): 32 | return weighted_cross_entropy(y_true, y_pred, weights=weights) 33 | -------------------------------------------------------------------------------- /dynalearn/nn/models/incidence.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .gnn import GraphNeuralNetwork 5 | from dynalearn.config import Config 6 | from dynalearn.nn.loss import weighted_mse 7 | 8 | 9 | class IncidenceEpidemicsGNN(GraphNeuralNetwork): 10 | def __init__(self, config=None, **kwargs): 11 | if config is None: 12 | config = Config() 13 | config.__dict__ = kwargs 14 | if "is_weighted" in config.__dict__ and config.is_weighted: 15 | edgeattr_size = 1 16 | else: 17 | edgeattr_size = 0 18 | self.num_states = config.num_states 19 | GraphNeuralNetwork.__init__( 20 | self, 21 | 1, 22 | 1, 23 | lag=config.lag, 24 | nodeattr_size=1, 25 | edgeattr_size=edgeattr_size, 26 | out_act="identity", 27 | normalize=True, 28 | config=config, 29 | **kwargs 30 | ) 31 | 32 | def loss(self, y_true, y_pred, weights): 33 | return weighted_mse(y_true, y_pred, weights=weights) 34 | -------------------------------------------------------------------------------- /dynalearn/nn/models/kapoor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from dynalearn.config import Config 5 | from .model import Model 6 | from torch_geometric.nn import GCNConv 7 | from .dgat import DynamicsGATConv 8 | from torch.nn.init import kaiming_normal_ 9 | from dynalearn.nn.activation import get as get_activation 10 | from dynalearn.nn.transformers import BatchNormalizer 11 | from dynalearn.nn.loss import weighted_mse 12 | 13 | 14 | class Kapoor2020GNN(Model): 15 | def __init__(self, config=None, **kwargs): 16 | Model.__init__(self, config=config, **kwargs) 17 | self.lag = 7 18 | self.lagstep = 1 19 | self.nodeattr_size = 1 20 | self.num_states = 1 21 | self.in_layers = nn.Linear(self.num_states * self.lag + self.nodeattr_size, 64) 22 | self.gnn1 = GCNConv(64, 32) 23 | self.gnn2 = GCNConv(32, 32) 24 | self.out_layers = nn.Linear(32, 1) 25 | self.activation = nn.ReLU() 26 | self.dropout = nn.Dropout(0.0) 27 | 28 | self.transformers = BatchNormalizer(input_size=1, target_size=1, node_size=1) 29 | 30 | self.reset_parameters() 31 | self.optimizer = self.get_optimizer(self.parameters()) 32 | if torch.cuda.is_available(): 33 | self = self.cuda() 34 | 35 | def forward(self, x, network_attr): 36 | edge_index, edge_attr, node_attr = network_attr 37 | x = x.view(-1, self.num_states * self.lag) 38 | x = torch.cat([x, node_attr], axis=-1) 39 | x = self.dropout(self.activation(self.in_layers(x))) 40 | x = self.dropout(self.activation(self.gnn1(x, edge_index))) 41 | x = self.dropout(self.activation(self.gnn2(x, edge_index))) 42 | x = self.out_layers(x) 43 | return x 44 | 45 | def reset_parameters(self, initialize_inplace=None): 46 | if initialize_inplace is None: 47 | initialize_inplace = kaiming_normal_ 48 | 49 | initialize_inplace(self.in_layers.weight) 50 | if self.in_layers.bias is not None: 51 | self.in_layers.bias.data.fill_(0) 52 | 53 | initialize_inplace(self.out_layers.weight) 54 | if self.out_layers.bias is not None: 55 | self.out_layers.bias.data.fill_(0) 56 | self.gnn1.reset_parameters() 57 | self.gnn2.reset_parameters() 58 | 59 | def loss(self, y_true, y_pred, weights): 60 | return weighted_mse(y_true, y_pred) 61 | -------------------------------------------------------------------------------- /dynalearn/nn/models/propagator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | from torch_geometric.nn.conv import MessagePassing 5 | from dynalearn.util import onehot 6 | 7 | 8 | class Propagator(MessagePassing): 9 | def __init__(self, num_states=None): 10 | MessagePassing.__init__(self, aggr="add") 11 | self.num_states = num_states 12 | 13 | def forward(self, x, edge_index, w=None): 14 | if isinstance(x, np.ndarray): 15 | x = torch.Tensor(x) 16 | if isinstance(edge_index, np.ndarray): 17 | edge_index = torch.LongTensor(edge_index) 18 | if isinstance(w, np.ndarray): 19 | assert w.shape[0] == edge_index.shape[-1] 20 | w = torch.Tensor(w).view(-1, 1) 21 | if isinstance(self.num_states, int): 22 | x = onehot(x, num_class=self.num_states) 23 | else: 24 | x = x.view(-1, 1) 25 | if torch.cuda.is_available(): 26 | x = x.cuda() 27 | edge_index = edge_index.cuda() 28 | if w is not None: 29 | w = w.cuda() 30 | return self.propagate(edge_index, x=x, w=w).T 31 | 32 | def message(self, x_j, w=None): 33 | if w is None: 34 | return x_j 35 | else: 36 | out = w * x_j 37 | return out 38 | 39 | def update(self, x): 40 | return x 41 | -------------------------------------------------------------------------------- /dynalearn/nn/models/reaction_diffusion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .gnn import GraphNeuralNetwork 5 | from dynalearn.config import Config 6 | from dynalearn.nn.loss import weighted_mse 7 | 8 | 9 | class ReactionDiffusionGNN(GraphNeuralNetwork): 10 | def __init__(self, config=None, **kwargs): 11 | if config is None: 12 | config = Config() 13 | config.__dict__ = kwargs 14 | if "is_weighted" in config.__dict__ and config.is_weighted: 15 | edgeattr_size = 1 16 | else: 17 | edgeattr_size = 0 18 | self.num_states = config.num_states 19 | self.alpha = config.alpha 20 | GraphNeuralNetwork.__init__( 21 | self, 22 | config.num_states, 23 | config.num_states, 24 | edgeattr_size=edgeattr_size, 25 | lag=config.lag, 26 | normalize=True, 27 | config=config, 28 | **kwargs 29 | ) 30 | 31 | def loss(self, y_true, y_pred, weights): 32 | l1 = weighted_mse(y_true, y_pred, weights=weights) 33 | sizes_true = torch.sum(y_true, axis=-1) 34 | sizes_pred = torch.sum(y_pred, axis=-1) 35 | l2 = torch.sum(weights * torch.abs(sizes_true - sizes_pred)) 36 | return self.alpha[0] * l1 + self.alpha[0] * l2 37 | -------------------------------------------------------------------------------- /dynalearn/nn/models/stochastic_epidemics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .gnn import GraphNeuralNetwork 5 | from dynalearn.config import Config 6 | from dynalearn.nn.loss import weighted_cross_entropy 7 | 8 | 9 | class StochasticEpidemicsGNN(GraphNeuralNetwork): 10 | def __init__(self, config=None, **kwargs): 11 | config = config or Config(**kwargs) 12 | self.num_states = config.num_states 13 | if "is_weighted" in config.__dict__ and config.is_weighted: 14 | edgeattr_size = 1 15 | else: 16 | edgeattr_size = 0 17 | GraphNeuralNetwork.__init__( 18 | self, 19 | 1, 20 | config.num_states, 21 | edgeattr_size=edgeattr_size, 22 | lag=config.lag, 23 | out_act="softmax", 24 | config=config, 25 | **kwargs 26 | ) 27 | 28 | def loss(self, y_true, y_pred, weights): 29 | return weighted_cross_entropy(y_true, y_pred, weights=weights) 30 | -------------------------------------------------------------------------------- /dynalearn/nn/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .optimizer import * 2 | from .radam import * 3 | -------------------------------------------------------------------------------- /dynalearn/nn/optimizers/optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .radam import * 3 | 4 | 5 | __optimizers__ = { 6 | "Adam": lambda config: lambda p: torch.optim.Adam( 7 | p, 8 | lr=config.lr, 9 | betas=config.betas, 10 | eps=config.eps, 11 | weight_decay=config.weight_decay, 12 | amsgrad=config.amsgrad, 13 | ), 14 | "RAdam": lambda config: lambda p: RAdam( 15 | p, 16 | lr=config.lr, 17 | betas=config.betas, 18 | eps=config.eps, 19 | weight_decay=config.weight_decay, 20 | ), 21 | } 22 | 23 | 24 | def get(config): 25 | name = config.name 26 | 27 | if name in __optimizers__: 28 | return __optimizers__[name](config) 29 | else: 30 | raise ValueError( 31 | f"{name} is invalid, possible entries are {list(__optimizers__.keys())}" 32 | ) 33 | -------------------------------------------------------------------------------- /dynalearn/nn/transformers/__init__.py: -------------------------------------------------------------------------------- 1 | from .transformer import * 2 | from .normalizer import * 3 | from .batch import * 4 | -------------------------------------------------------------------------------- /dynalearn/nn/transformers/batch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .transformer import TransformerDict, CUDATransformer 4 | from .normalizer import InputNormalizer, TargetNormalizer, NetworkNormalizer 5 | from dynalearn.util import get_node_attr 6 | 7 | 8 | class BatchNormalizer(TransformerDict): 9 | def __init__( 10 | self, 11 | input_size=0, 12 | target_size=0, 13 | node_size=0, 14 | edge_size=0, 15 | layers=None, 16 | auto_cuda=True, 17 | ): 18 | transformer_dict = {"t_cuda": CUDATransformer()} 19 | if input_size is not None: 20 | transformer_dict["t_inputs"] = InputNormalizer( 21 | input_size, auto_cuda=auto_cuda 22 | ) 23 | else: 24 | transformer_dict["t_inputs"] = CUDATransformer() 25 | 26 | if target_size is not None: 27 | transformer_dict["t_targets"] = TargetNormalizer(target_size) 28 | else: 29 | transformer_dict["t_targets"] = CUDATransformer() 30 | 31 | transformer_dict["t_networks"] = NetworkNormalizer( 32 | node_size, edge_size, layers=layers, auto_cuda=auto_cuda 33 | ) 34 | 35 | TransformerDict.__init__(self, transformer_dict) 36 | 37 | def forward(self, data): 38 | (x, g), y, w = data 39 | x = self["t_inputs"].forward(x) 40 | g = self["t_networks"].forward(g) 41 | y = self["t_targets"].forward(y) 42 | w = self["t_cuda"].forward(w) 43 | return (x, g), y, w 44 | 45 | def backward(self, data): 46 | (x, g), y, w = data 47 | x = self["t_inputs"].backward(x) 48 | g = self["t_networks"].backward(g) 49 | y = self["t_targets"].backward(y) 50 | w = self["t_cuda"].backward(w) 51 | return (x, g), y, w 52 | -------------------------------------------------------------------------------- /dynalearn/nn/transformers/transformer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from dynalearn.util import get_edge_attr, get_node_attr 4 | 5 | 6 | class Transformer(torch.nn.Module): 7 | def __init__(self, name): 8 | torch.nn.Module.__init__(self) 9 | self.name = name 10 | self.is_empty = False 11 | if torch.cuda.is_available(): 12 | self = self.cuda() 13 | 14 | def forward(self, x): 15 | raise NotImplementedError() 16 | 17 | def backward(self, x): 18 | raise NotImplementedError() 19 | 20 | def setUp(self, dataset): 21 | for method in dir(self): 22 | if method[: len("_setUp_")] == "_setUp_": 23 | label = method[len("_setUp_") :] 24 | m = getattr(self, method)(dataset) 25 | if isinstance(m, torch.Tensor) and torch.cuda.is_available(): 26 | m = m.cuda() 27 | setattr(self, f"{self.name}_{label}", m) 28 | 29 | 30 | class IdentityTransformer(Transformer): 31 | def __init__(self): 32 | Transformer.__init__(self, "identity") 33 | 34 | def forward(self, x): 35 | return x 36 | 37 | def backward(self, x): 38 | return x 39 | 40 | 41 | class CUDATransformer(Transformer): 42 | def __init__(self): 43 | Transformer.__init__(self, "cuda") 44 | 45 | def forward(self, x): 46 | if torch.cuda.is_available(): 47 | if isinstance(x, dict): 48 | for k in x.keys(): 49 | assert isinstance(v, torch.Tensor) 50 | x[k] = v.cuda() 51 | else: 52 | assert isinstance(x, torch.Tensor) 53 | x = x.cuda() 54 | return x 55 | 56 | def backward(self, x): 57 | if isinstance(x, dict): 58 | for k, v in x.items(): 59 | assert isinstance(v, torch.Tensor) 60 | x[k] = v.cpu() 61 | else: 62 | assert isinstance(x, torch.Tensor) 63 | x = x.cpu() 64 | return x 65 | 66 | 67 | class TransformerDict(torch.nn.ModuleDict): 68 | def __init__(self, transformers={}): 69 | torch.nn.Module.__init__(self) 70 | assert isinstance(transformers, dict) 71 | for t in transformers.values(): 72 | assert issubclass( 73 | t.__class__, Transformer 74 | ), f"{t.__class__.__name__} is not a subclass of Transformer." 75 | torch.nn.ModuleDict.__init__(self, modules=transformers) 76 | 77 | def forward(self, x, key): 78 | if key in self.keys(): 79 | return self[key].forward(x) 80 | else: 81 | return x 82 | 83 | def backward(self, x, key): 84 | if key in self.keys(): 85 | return self[key].backward(x) 86 | else: 87 | return x 88 | 89 | def setUp(self, dataset): 90 | for t in self.values(): 91 | t.setUp(dataset) 92 | -------------------------------------------------------------------------------- /dynalearn/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .distribution import * 2 | from .loggers import * 3 | from .util import * 4 | from .verbose import * 5 | from .display import * 6 | -------------------------------------------------------------------------------- /dynalearn/util/display.py: -------------------------------------------------------------------------------- 1 | import dynalearn 2 | import h5py 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import os 6 | 7 | from matplotlib.lines import Line2D 8 | from matplotlib.legend_handler import HandlerTuple 9 | 10 | 11 | 12 | locations = { 13 | "center center": (0.5, 0.5, "center", "center"), 14 | "upper right": (0.95, 0.95, "top", "right"), 15 | "lower right": (0.95, 0.05, "bottom", "right"), 16 | "upper left": (0.05, 0.95, "top", "left"), 17 | "lower left": (0.05, 0.05, "bottom", "left"), 18 | } 19 | 20 | color_dark = { 21 | "blue": "#1f77b4", 22 | "orange": "#f19143", 23 | "purple": "#9A80B9", 24 | "red": "#d73027", 25 | "grey": "#525252", 26 | "green": "#33b050", 27 | } 28 | 29 | color_pale = { 30 | "blue": "#7bafd3", 31 | "orange": "#f7be90", 32 | "purple": "#c3b4d6", 33 | "red": "#e78580", 34 | "grey": "#999999", 35 | "green": "#9fdaac", 36 | } 37 | 38 | colormap = "bone" 39 | 40 | m_list = ["o", "s", "v", "^"] 41 | l_list = ["solid", "dashed", "dotted", "dashdot"] 42 | cd_list = [ 43 | color_dark["blue"], 44 | color_dark["orange"], 45 | color_dark["purple"], 46 | color_dark["red"], 47 | ] 48 | cp_list = [ 49 | color_pale["blue"], 50 | color_pale["orange"], 51 | color_pale["purple"], 52 | color_pale["red"], 53 | ] 54 | 55 | large_fontsize=18 56 | small_fontsize=14 57 | 58 | plt.rc("text", usetex=True) 59 | plt.rc("font", family="serif") 60 | plt.rcParams.update({'text.latex.preamble' : [r'\usepackage{amsmath}']}) 61 | 62 | def label_plot(ax, label, loc="center center", fontsize=large_fontsize): 63 | if isinstance(loc, tuple): 64 | h, v, va, ha = loc 65 | elif isinstance(loc, str): 66 | h, v, va, ha = locations[loc] 67 | ax.text(h, v, label, color="k", transform=ax.transAxes, 68 | verticalalignment=va, horizontalalignment=ha, fontsize=fontsize, 69 | ) -------------------------------------------------------------------------------- /dynalearn/util/distribution.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from scipy.stats import poisson 4 | from scipy.optimize import fsolve 5 | 6 | 7 | class DiscreteDistribution(object): 8 | def __init__(self, values): 9 | super(DiscreteDistribution, self).__init__() 10 | self.val_dict = {k: v for k, v in zip(*values)} 11 | self.values = values[0] # Size K 12 | self.weights = values[1] # Size K 13 | 14 | def expect(self, func): 15 | x = func(self.values) # Size K x D 16 | return self.weights @ x 17 | 18 | def mean(self): 19 | f = lambda k: k 20 | return self.expect(f) 21 | 22 | def var(self): 23 | f = lambda k: (k - self.mean()) ^ 2 24 | return self.expect(f) 25 | 26 | def std(self): 27 | return np.sqrt(self.var()) 28 | 29 | def sample(self, num_samples=1): 30 | return np.random.choice(self.values, size=num_samples, p=self.weights) 31 | 32 | 33 | def kronecker_distribution(k): 34 | k = np.array([k]) 35 | p_k = np.array([1]) 36 | return DiscreteDistribution((k, p_k)) 37 | 38 | 39 | def poisson_distribution(avgk, num_k): 40 | mid_k = np.ceil(avgk) 41 | if mid_k < num_k: 42 | down = 0 43 | up = 2 * num_k + 1 44 | else: 45 | down = mid_k - num_k + 1 46 | up = mid_k + num_k + 2 47 | 48 | k = np.arange(down, up).astype("int") 49 | p_k = lambda mu: poisson(mu).pmf(k) / np.sum(poisson(mu).pmf(k)) 50 | f_to_solve = lambda mu: np.sum(k * p_k(mu)) - avgk 51 | l = fsolve(f_to_solve, x0=avgk)[0] 52 | 53 | return DiscreteDistribution((k, p_k(l))) 54 | -------------------------------------------------------------------------------- /dynalearn/util/loggers/__init__.py: -------------------------------------------------------------------------------- 1 | from .logger import * 2 | from .memory import * 3 | from .time import * 4 | -------------------------------------------------------------------------------- /dynalearn/util/loggers/logger.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | class Logger: 5 | def __init__(self): 6 | self.log = {} 7 | 8 | def on_task_begin(self): 9 | return 10 | 11 | def on_task_end(self): 12 | return 13 | 14 | def on_task_update(self, stepname=None): 15 | return 16 | 17 | def save(self, f): 18 | json.dump(self.log, f, indent=4) 19 | 20 | def load(self, f): 21 | self.log = json.load(f) 22 | 23 | 24 | class LoggerDict: 25 | def __init__(self, loggers=None): 26 | self.loggers = loggers or {} 27 | assert isinstance(self.loggers, dict) 28 | 29 | def __getitem__(self, key): 30 | return self.loggers[key] 31 | 32 | def keys(self): 33 | return self.loggers.keys() 34 | 35 | def values(self): 36 | return self.loggers.values() 37 | 38 | def items(self): 39 | return self.loggers.items() 40 | 41 | def on_task_begin(self): 42 | for l in self.values(): 43 | l.on_task_begin() 44 | 45 | def on_task_end(self): 46 | for l in self.values(): 47 | l.on_task_end() 48 | 49 | def on_task_update(self, stepname=None): 50 | for l in self.values(): 51 | l.on_task_update(stepname) 52 | 53 | def save(self, f): 54 | log_dict = {} 55 | for k, l in self.items(): 56 | log_dict[k] = l.log 57 | json.dump(log_dict, f, indent=4) 58 | 59 | def load(self, f): 60 | log_dict = json.load(f) 61 | for k, v in log_dict.items(): 62 | for _k, _v in self.items(): 63 | if _k == k: 64 | _v.log = v 65 | break 66 | -------------------------------------------------------------------------------- /dynalearn/util/loggers/memory.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | 3 | from numpy import mean, round 4 | from .logger import Logger 5 | 6 | 7 | class MemoryLogger(Logger): 8 | def __init__(self, unit="gb"): 9 | if unit == "b": 10 | self.factor = 1 11 | elif unit == "kb": 12 | self.factor = 1024 13 | elif unit == "mb": 14 | self.factor = 1024 ** 2 15 | elif unit == "gb": 16 | self.factor = 1024 ** 3 17 | else: 18 | raise ValueError( 19 | f"`{unit}` is an invalid unit, valid units are `[b, kb, mb, gb]`" 20 | ) 21 | 22 | Logger.__init__(self) 23 | 24 | def on_task_begin(self): 25 | memory_usage = round(psutil.virtual_memory().used / self.factor, 4) 26 | self.log["memory-begin"] = memory_usage 27 | 28 | def on_task_end(self): 29 | self.log["min"] = min(self.all) 30 | self.log["max"] = max(self.all) 31 | self.log["mean"] = mean(self.all) 32 | 33 | def on_task_update(self, stepname=None): 34 | memory_usage = round(psutil.virtual_memory().used / self.factor, 4) 35 | if f"memory-{stepname}" in self.log: 36 | self.log[f"memory-{stepname}"].append(memory_usage) 37 | else: 38 | self.log[f"memory-{stepname}"] = [memory_usage] 39 | 40 | @property 41 | def all(self): 42 | _all = [] 43 | for k, v in self.log.items(): 44 | if k[:6] == "memory": 45 | v = [v] if not isinstance(v, list) else v 46 | _all.extend(v) 47 | return _all 48 | -------------------------------------------------------------------------------- /dynalearn/util/loggers/time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from .logger import Logger 4 | 5 | 6 | class TimeLogger(Logger): 7 | def __init__(self): 8 | Logger.__init__(self) 9 | self.begin = None 10 | self.update = None 11 | self.end = None 12 | 13 | def on_task_begin(self): 14 | 15 | self.begin = datetime.now() 16 | self.update = datetime.now() 17 | self.log["begin"] = self.begin.strftime("%Y-%m-%d %H:%M:%S") 18 | 19 | def on_task_end(self): 20 | self.end = datetime.now() 21 | self.log["end"] = self.begin.strftime("%Y-%m-%d %H:%M:%S") 22 | days, hours, mins, secs = self.format_diff(self.begin, self.end) 23 | self.log["time"] = f"{days:0=2d}-{hours:0=2d}:{mins:0=2d}:{secs:0=2d}" 24 | self.log["total"] = self.format_diff(self.begin, self.end, to_sec=True) 25 | 26 | def on_task_update(self, stepname=None): 27 | stepname = stepname or "update" 28 | now = datetime.now() 29 | dt = self.format_diff(self.update, now, to_sec=True) 30 | if f"time-{stepname}" in self.log: 31 | self.log[f"time-{stepname}"].append(dt) 32 | else: 33 | self.log[f"time-{stepname}"] = [dt] 34 | self.update = now 35 | 36 | def format_diff(self, t0, t1, to_sec=False): 37 | dt = t1 - t0 38 | days = dt.days 39 | hours, r = divmod(dt.seconds, 60 * 60) 40 | mins, r = divmod(r, 60) 41 | secs = r 42 | if to_sec: 43 | return ((days * 24 + hours) * 60 + mins) * 60 + secs 44 | else: 45 | return days, hours, mins, secs 46 | -------------------------------------------------------------------------------- /dynalearn/util/verbose.py: -------------------------------------------------------------------------------- 1 | import tqdm 2 | 3 | from datetime import datetime 4 | 5 | 6 | class Verbose: 7 | def __init__(self, filename=None, vtype=0, pbar=None): 8 | self.filename = filename 9 | self.to_file = filename is not None 10 | self.vtype = vtype 11 | if pbar == "notebook": 12 | self.pbar = tqdm.tqdm_notebook 13 | elif pbar is not None: 14 | self.pbar = pbar 15 | else: 16 | self.pbar = tqdm.tqdm 17 | if self.to_file: 18 | _file = open(self.filename, "w") 19 | _file.close() 20 | 21 | def __call__(self, msg): 22 | self.save_msg(msg) 23 | self.print_msg(msg) 24 | 25 | def save_msg(self, msg): 26 | if self.filename is None: 27 | return 28 | _file = open(self.filename, "a") 29 | _file.write(f"{msg}\n") 30 | _file.close() 31 | 32 | def print_msg(self, msg): 33 | if self.vtype != 0: 34 | print(msg) 35 | 36 | def progress_bar(self, name, num_update): 37 | if self.vtype == 1: 38 | self.save_msg(name) 39 | return self.pbar(range(num_update), name) 40 | else: 41 | self(name) 42 | return None 43 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.1.0 2 | scipy>=1.6.2 3 | torch>=1.6.0 4 | torch_geometric>=1.6.3 5 | networkx>=2.5.0 6 | h5py>=2.10.0 7 | sklearn>=0.24.1 8 | psutil>=5.8.0 9 | -------------------------------------------------------------------------------- /scripts/figure-6/run-covid.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | sys.path.append("../sources") 5 | 6 | from script import launch_scan 7 | 8 | sys.path.append("../figure-6") 9 | 10 | name = "exp" 11 | specs = json.load(open("./specs.json", "r"))["default"] 12 | config = { 13 | "name": name, 14 | "path_to_covid": specs["path_to_data"], 15 | "epochs": 200, 16 | "type": ["rnn"], 17 | "model": [ 18 | "DynamicsGATConv", 19 | "FullyConnectedGNN", 20 | "IndependentGNN", 21 | "KapoorConv", 22 | ], 23 | "lag": [5], 24 | "bias": [0.0, 0.25, 0.5, 0.75, 1.0], 25 | "val_fraction": 0.1, 26 | } 27 | launch_scan( 28 | name, 29 | os.path.join(specs["path_to_data"], "covid"), 30 | "../sources/run-covid.py", 31 | command=specs["command"], 32 | time="15:00:00", 33 | memory="8G", 34 | account=specs["account"], 35 | modules_to_load=specs["modules_to_load"], 36 | source_path=specs["source_path"], 37 | config=config, 38 | devices=specs["devices"], 39 | verbose=2, 40 | ) 41 | -------------------------------------------------------------------------------- /scripts/figures-234/run-synthetic-continuous.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | 6 | sys.path.append("../sources") 7 | 8 | from script import launch_scan 9 | 10 | sys.path.append("../figures-234") 11 | 12 | 13 | specs = json.load(open("../sources/specs.json", "r"))["default"] 14 | 15 | 16 | def launching(config): 17 | launch_scan( 18 | name, 19 | os.path.join(specs["path_to_data"], "case-study"), 20 | "../sources/run.py", 21 | command=specs["command"], 22 | time="12:00:00", 23 | memory="8G", 24 | account=specs["account"], 25 | modules_to_load=specs["modules_to_load"], 26 | source_path=specs["source_path"], 27 | config=config, 28 | device=specs["device"], 29 | verbose=2, 30 | ) 31 | 32 | 33 | name = "exp" 34 | config = { 35 | "dynamics": ["dsir"], 36 | "tasks": ( 37 | "generate_data", 38 | "partition_val_dataset", 39 | "train_model", 40 | "compute_metrics", 41 | ), 42 | "to_zip": ("config.pickle", "metrics.h5", "history.pickle", "model.pt", "optim.pt"), 43 | "train_details/num_samples": 100, 44 | "train_details/num_networks": 50, 45 | "train_details/use_groundtruth": 1, 46 | "train_details/resampling": 100, 47 | "train_details/val_bias": 0.5, 48 | "train_details/val_fraction": 0.01, 49 | "train_details/train_bias": 0.5, 50 | "train_details/epochs": 60, 51 | "networks/num_nodes": 1000, 52 | "weight_type": "state", 53 | } 54 | 55 | config["network"] = ["w_ba"] 56 | config["metrics"] = ("pred", "stationary") 57 | launching(config) 58 | 59 | config["network"] = ["w_gnp"] 60 | config["metrics"] = "pred" 61 | launching(config) 62 | -------------------------------------------------------------------------------- /scripts/figures-234/run-synthetic-discrete.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | 6 | sys.path.append("../sources") 7 | 8 | from script import launch_scan 9 | 10 | sys.path.append("../") 11 | 12 | specs = json.load(open("../sources/specs.json", "r"))["default"] 13 | 14 | 15 | def launching(config): 16 | launch_scan( 17 | name, 18 | os.path.join(specs["path_to_data"], "case-study"), 19 | "../sources/run.py", 20 | command=specs["command"], 21 | time="12:00:00", 22 | memory="8G", 23 | account=specs["account"], 24 | modules_to_load=specs["modules_to_load"], 25 | source_path=specs["source_path"], 26 | config=config, 27 | device=specs["device"], 28 | verbose=2, 29 | ) 30 | 31 | 32 | name = "exp" 33 | config = { 34 | "dynamics": ["sis", "plancksis", "sissis"], 35 | "tasks": ( 36 | "generate_data", 37 | "partition_val_dataset", 38 | "train_model", 39 | "compute_metrics", 40 | ), 41 | "to_zip": ("config.pickle", "metrics.h5", "history.pickle", "model.pt", "optim.pt"), 42 | "train_details/num_samples": 10000, 43 | "train_details/num_networks": 1, 44 | "train_details/use_groundtruth": [0, 1], 45 | "train_details/resampling": 2, 46 | "train_details/val_bias": 0.5, 47 | "train_details/val_fraction": 0.01, 48 | "train_details/train_bias": 0.5, 49 | "train_details/epochs": 60, 50 | "networks/num_nodes": 1000, 51 | "weight_type": "state", 52 | } 53 | 54 | config["network"] = ["ba"] 55 | config["metrics"] = ("ltp", "stationary", "attention") 56 | launching(config) 57 | 58 | config["network"] = ["gnp"] 59 | config["metrics"] = "ltp" 60 | launching(config) 61 | -------------------------------------------------------------------------------- /scripts/run-test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | import shutil 6 | 7 | sys.path.append("sources") 8 | from script import launch_scan 9 | 10 | sys.path.append("../") 11 | 12 | specs = json.load(open("./sources/specs.json", "r"))["default"] 13 | 14 | 15 | def launching(config): 16 | launch_scan( 17 | name, 18 | "test", 19 | "./sources/run.py", 20 | command=specs["command"], 21 | time="12:00:00", 22 | memory="8G", 23 | account=specs["account"], 24 | modules_to_load=specs["modules_to_load"], 25 | source_path=specs["source_path"], 26 | config=config, 27 | device=specs["device"], 28 | verbose=2, 29 | ) 30 | 31 | 32 | name = "exp" 33 | config = { 34 | "dynamics": "sis", 35 | "network": "gnp", 36 | "tasks": ( 37 | "generate_data", 38 | "partition_val_dataset", 39 | "train_model", 40 | "compute_metrics", 41 | ), 42 | "metrics": ("ltp"), 43 | "to_zip": ("config.pickle", "metrics.h5", "history.pickle", "model.pt", "optim.pt"), 44 | "train_details/num_samples": 10, 45 | "train_details/num_networks": 1, 46 | "train_details/use_groundtruth": 1, 47 | "train_details/resampling": 2, 48 | "train_details/val_bias": 0.5, 49 | "train_details/val_fraction": 0.01, 50 | "train_details/train_bias": 0.5, 51 | "train_details/epochs": 5, 52 | "model/gnn_name": "KapoorConv", 53 | "num_nodes": 100, 54 | "weight_type": "state", 55 | } 56 | 57 | launching(config) 58 | shutil.rmtree("test") 59 | -------------------------------------------------------------------------------- /scripts/si-figures/run-bias.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | 6 | sys.path.append("../sources") 7 | from script import launch_scan 8 | 9 | sys.path.append("../si-figures") 10 | 11 | specs = json.load(open("../sources/specs.json", "r"))["default"] 12 | 13 | 14 | def launching(config): 15 | launch_scan( 16 | name, 17 | os.path.join(specs["path_to_data"], "bias"), 18 | "../sources/run.py", 19 | command=specs["command"], 20 | time="12:00:00", 21 | memory="8G", 22 | account=specs["account"], 23 | modules_to_load=specs["modules_to_load"], 24 | source_path=specs["source_path"], 25 | config=config, 26 | device=specs["device"], 27 | verbose=2, 28 | ) 29 | 30 | 31 | name = "exp" 32 | config = { 33 | "dynamics": ["sis", "plancksis", "sissis"], 34 | "network": ["gnp", "ba"], 35 | "tasks": ( 36 | "generate_data", 37 | "partition_val_dataset", 38 | "train_model", 39 | "compute_metrics", 40 | ), 41 | "metrics": ("ltp"), 42 | "to_zip": ("config.pickle", "metrics.h5"), 43 | "train_details/num_samples": 10000, 44 | "train_details/use_groundtruth": [0, 1], 45 | "train_details/resampling": 2, 46 | "train_details/val_bias": 0.5, 47 | "train_details/val_fraction": 0.01, 48 | "train_details/train_bias": [0.0, 0.25, 0.5, 0.75, 1.0], 49 | "train_details/epochs": 60, 50 | "networks/num_nodes": 1000, 51 | "weight_type": "state", 52 | "seed": 0, 53 | } 54 | launching(config) 55 | -------------------------------------------------------------------------------- /scripts/si-figures/run-datasize.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | 6 | sys.path.append("../sources") 7 | from script import launch_scan 8 | 9 | sys.path.append("../si-figures") 10 | 11 | specs = json.load(open("../sources/specs.json", "r"))["default"] 12 | 13 | 14 | def launching(config): 15 | launch_scan( 16 | name, 17 | os.path.join(specs["path_to_data"], "datasize"), 18 | "../sources/run.py", 19 | command=specs["command"], 20 | time="12:00:00", 21 | memory="8G", 22 | account=specs["account"], 23 | modules_to_load=specs["modules_to_load"], 24 | source_path=specs["source_path"], 25 | config=config, 26 | device=specs["device"], 27 | verbose=2, 28 | ) 29 | 30 | 31 | name = "exp" 32 | config = { 33 | "dynamics": ["sis", "plancksis", "sissis"], 34 | "network": ["gnp", "ba"], 35 | "tasks": ( 36 | "generate_data", 37 | "partition_val_dataset", 38 | "train_model", 39 | "compute_metrics", 40 | ), 41 | "metrics": ("ltp"), 42 | "to_zip": ("config.pickle", "metrics.h5"), 43 | "train_details/num_samples": [100, 500, 1000, 5000, 10000], 44 | "train_details/use_groundtruth": 0, 45 | "train_details/resampling": 2, 46 | "train_details/val_bias": 0.5, 47 | "train_details/val_fraction": 0.01, 48 | "train_details/train_bias": 0.5, 49 | "train_details/epochs": 60, 50 | "networks/num_nodes": 1000, 51 | "weight_type": "state", 52 | "seed": 0, 53 | } 54 | launching(config) 55 | -------------------------------------------------------------------------------- /scripts/si-figures/run-gnnlayers.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | 6 | sys.path.append("../sources") 7 | from script import launch_scan 8 | 9 | sys.path.append("../si-figures") 10 | 11 | specs = json.load(open("../sources/specs.json", "r"))["default"] 12 | 13 | 14 | def launching(config): 15 | launch_scan( 16 | name, 17 | os.path.join(specs["path_to_data"], "gnn-layers"), 18 | "..sources/run.py", 19 | command=specs["command"], 20 | time="12:00:00", 21 | memory="8G", 22 | account=specs["account"], 23 | modules_to_load=specs["modules_to_load"], 24 | source_path=specs["source_path"], 25 | config=config, 26 | device=specs["device"], 27 | verbose=2, 28 | ) 29 | 30 | 31 | name = "exp" 32 | config = { 33 | "dynamics": ["sis", "plancksis", "sissis"], 34 | "network": ["gnp", "ba"], 35 | "tasks": ( 36 | "generate_data", 37 | "partition_val_dataset", 38 | "train_model", 39 | "compute_metrics", 40 | ), 41 | "metrics": ("ltp"), 42 | "to_zip": ("config.pickle", "metrics.h5"), 43 | "train_details/num_samples": 10000, 44 | "train_details/use_groundtruth": 0, 45 | "train_details/resampling": 2, 46 | "train_details/val_bias": 0.5, 47 | "train_details/val_fraction": 0.01, 48 | "train_details/train_bias": 0.5, 49 | "train_details/epochs": 60, 50 | "networks/num_nodes": 1000, 51 | "model/gnn_name": [ 52 | "GATConv", 53 | "SAGEConv", 54 | "GCNConv", 55 | "MeanGraphConv", 56 | "MaxGraphConv", 57 | "AddGraphConv", 58 | "KapoorConv", 59 | "DynamicsGATConv", 60 | ], 61 | "weight_type": "state", 62 | "seed": 0, 63 | } 64 | launching(config) 65 | -------------------------------------------------------------------------------- /scripts/si-figures/run-netsize.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | 6 | sys.path.append("../sources") 7 | from script import launch_scan 8 | 9 | sys.path.append("../si-figures") 10 | 11 | specs = json.load(open("../sources/specs.json", "r"))["default"] 12 | 13 | 14 | def launching(config): 15 | launch_scan( 16 | name, 17 | os.path.join(specs["path_to_data"], "netsize"), 18 | "../sources/run.py", 19 | command=specs["command"], 20 | time="12:00:00", 21 | memory="8G", 22 | account=specs["account"], 23 | modules_to_load=specs["modules_to_load"], 24 | source_path=specs["source_path"], 25 | config=config, 26 | device=specs["device"], 27 | verbose=2, 28 | ) 29 | 30 | 31 | name = "exp" 32 | config = { 33 | "dynamics": ["sis", "plancksis", "sissis"], 34 | "network": ["gnp", "ba"], 35 | "tasks": ( 36 | "generate_data", 37 | "partition_val_dataset", 38 | "train_model", 39 | "compute_metrics", 40 | ), 41 | "metrics": ("ltp"), 42 | "to_zip": ("config.pickle", "metrics.h5"), 43 | "train_details/num_samples": 10000, 44 | "train_details/use_groundtruth": 0, 45 | "train_details/resampling": 2, 46 | "train_details/val_bias": 0.5, 47 | "train_details/val_fraction": 0.01, 48 | "train_details/train_bias": 0.5, 49 | "train_details/epochs": 60, 50 | "networks/num_nodes": [100, 250, 500, 1000, 5000, 10000], 51 | "weight_type": "state", 52 | "seed": 0, 53 | } 54 | launching(config) 55 | -------------------------------------------------------------------------------- /scripts/si-figures/run-resamp.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import numpy as np 4 | import sys 5 | 6 | sys.path.append("../sources") 7 | from script import launch_scan 8 | 9 | sys.path.append("../si-figures") 10 | 11 | specs = json.load(open("../sources/specs.json", "r"))["default"] 12 | 13 | 14 | def launching(config): 15 | launch_scan( 16 | name, 17 | os.path.join(specs["path_to_data"], "resamp"), 18 | "../sources/run.py", 19 | command=specs["command"], 20 | time="12:00:00", 21 | memory="8G", 22 | account=specs["account"], 23 | modules_to_load=specs["modules_to_load"], 24 | source_path=specs["source_path"], 25 | config=config, 26 | device=specs["device"], 27 | verbose=2, 28 | ) 29 | 30 | 31 | name = "exp" 32 | config = { 33 | "dynamics": ["sis", "plancksis", "sissis"], 34 | "network": ["gnp", "ba"], 35 | "tasks": ( 36 | "generate_data", 37 | "partition_val_dataset", 38 | "train_model", 39 | "compute_metrics", 40 | ), 41 | "metrics": ("ltp"), 42 | "to_zip": ("config.pickle", "metrics.h5"), 43 | "train_details/num_samples": 10000, 44 | "train_details/use_groundtruth": 0, 45 | "train_details/resampling": np.logspace(0, 4, 5).astype("int").tolist(), 46 | "train_details/val_bias": 0.5, 47 | "train_details/val_fraction": 0.01, 48 | "train_details/train_bias": 0.5, 49 | "train_details/epochs": 60, 50 | "networks/num_nodes": 1000, 51 | "weight_type": "state", 52 | "seed": 0, 53 | } 54 | launching(config) 55 | -------------------------------------------------------------------------------- /scripts/sources/specs.json: -------------------------------------------------------------------------------- 1 | { 2 | "default":{ 3 | "path_to_data": "../data/", 4 | "path_to_script": "./sources", 5 | "modules_to_load": [], 6 | "source_path": "./", 7 | "account": "", 8 | "command": "bash", 9 | "device": "cpu" 10 | }, 11 | "hector":{ 12 | "path_to_data": "/home/charles_murphy/Documents/ulaval/doctorat/projects/data/dynalearn-data/", 13 | "path_to_script": "/home/charles_murphy/Documents/ulaval/doctorat/projects/codes/dynalearn/dynalearn/scripts", 14 | "modules_to_load": [], 15 | "source_path": "./", 16 | "account": "", 17 | "command": "bash", 18 | "device": "cpu" 19 | }, 20 | "bernard":{ 21 | "path_to_data": "/home/charles/Documents/ulaval/doctorat/projects/data/dynalearn-data/", 22 | "path_to_script": "/home/charles/Documents/ulaval/doctorat/projects/codes/dynalearn/dynalearn/scripts", 23 | "modules_to_load": [], 24 | "source_path": "./", 25 | "account": "", 26 | "command": "bash", 27 | "device": "cpu" 28 | }, 29 | "beluga":{ 30 | "path_to_data": "/home/murphy9/projects/def-aallard/murphy9/data/dynalearn-data/", 31 | "path_to_script": "/home/murphy9/codes/dynalearn/dynalearn/scripts", 32 | "modules_to_load": ["StdEnv/2020", "gcc/9", "python/3.8", "graph-tool"], 33 | "source_path": "/home/murphy9/.dynalearn-env/bin/activate", 34 | "account": "def-aallard", 35 | "command": "sbatch", 36 | "device": "cpu" 37 | }, 38 | "graham":{ 39 | "path_to_data": "/home/murphy9/projects/def-aallard/murphy9/data/dynalearn-data/", 40 | "path_to_script": "/home/murphy9/codes/dynalearn/dynalearn/scripts", 41 | "modules_to_load": ["StdEnv/2020", "gcc/9", "python/3.8", "graph-tool"], 42 | "source_path": "/home/murphy9/.dynalearn-env/bin/activate", 43 | "account": "def-aallard", 44 | "command": "sbatch", 45 | "device": "cpu" 46 | }, 47 | "cedar":{ 48 | "path_to_data": "/home/murphy9/projects/def-aallard/murphy9/data/dynalearn-data/", 49 | "path_to_script": "/home/murphy9/scratch/codes/dynalearn/dynalearn/scripts", 50 | "modules_to_load": ["StdEnv/2020", "gcc/9", "python/3.8", "graph-tool", "scipy-stack"], 51 | "source_path": "/home/murphy9/.dynalearn-env/bin/activate", 52 | "account": "def-aallard", 53 | "command": "sbatch", 54 | "device": "cpu" 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | from setuptools.extension import Extension 3 | import numpy as np 4 | 5 | extensions = [] 6 | setup( 7 | name="dynalearn", 8 | version=1.0, 9 | packages=find_packages(), 10 | include_package_data=True, 11 | ext_modules=extensions, 12 | setup_requires=[], 13 | ) 14 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.config 2 | import tests.datasets 3 | import tests.dynamics 4 | import tests.experiments 5 | import tests.networks 6 | import tests.nn 7 | import tests.util 8 | -------------------------------------------------------------------------------- /tests/all.py: -------------------------------------------------------------------------------- 1 | from .config.all import * 2 | from .datasets.all import * 3 | from .dynamics.all import * 4 | from .experiments.all import * 5 | from .networks.all import * 6 | from .nn.all import * 7 | from .util.all import * 8 | -------------------------------------------------------------------------------- /tests/config/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.config.test_callback 2 | import tests.config.test_config 3 | import tests.config.test_dataset 4 | import tests.config.test_dynamics 5 | import tests.config.test_experiment 6 | import tests.config.test_metrics 7 | import tests.config.test_networks 8 | import tests.config.test_optimizer 9 | import tests.config.test_trainable 10 | import tests.config.test_training 11 | import tests.config.test_weights 12 | -------------------------------------------------------------------------------- /tests/config/all.py: -------------------------------------------------------------------------------- 1 | from .test_callback import * 2 | from .test_config import * 3 | from .test_dataset import * 4 | from .test_dynamics import * 5 | from .test_experiment import * 6 | from .test_metrics import * 7 | from .test_networks import * 8 | from .test_optimizer import * 9 | from .test_trainable import * 10 | from .test_training import * 11 | from .test_weights import * 12 | -------------------------------------------------------------------------------- /tests/config/templates.py: -------------------------------------------------------------------------------- 1 | class TemplateConfigTest: 2 | def test_attributes(self): 3 | for k in self.attributes: 4 | v = self.config.get(k) 5 | if v is None: 6 | print(f"Config with key `{k}` is `None`.") 7 | self.assertTrue(v is not None) 8 | 9 | def test_name(self): 10 | if "name" in self.__dict__: 11 | self.assertEqual(self.config.name, self.name) 12 | -------------------------------------------------------------------------------- /tests/config/test_callback.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | 4 | sys.path.append("../") 5 | 6 | from dynalearn.config.util import CallbackConfig 7 | from .templates import TemplateConfigTest 8 | 9 | 10 | class CallbackConfigTest(TemplateConfigTest, unittest.TestCase): 11 | def setUp(self): 12 | self.config = CallbackConfig.default() 13 | self.attributes = ["step_size", "gamma", "path_to_best"] 14 | -------------------------------------------------------------------------------- /tests/config/test_config.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config import Config, DynamicsConfig 4 | 5 | 6 | class ConfigTest(unittest.TestCase): 7 | def setUp(self): 8 | self.dynamics = DynamicsConfig.sis() 9 | self.config = Config() 10 | self.config.dynamics = self.dynamics 11 | self.config.seed = 0 12 | 13 | def test_init(self): 14 | self.config 15 | 16 | def test_getitem(self): 17 | self.assertEqual( 18 | self.config["dynamics/infection"], self.config.dynamics.infection 19 | ) 20 | self.assertEqual(self.config["seed"], 0) 21 | 22 | def test_haslist(self): 23 | config_with_list = self.config.copy() 24 | config_with_list.dynamics.alpha = [0.0, 0.5, 1.0] 25 | self.assertTrue(config_with_list.has_list()) 26 | self.assertFalse(self.config.has_list()) 27 | 28 | def test_statedict(self): 29 | self.assertEqual(self.config.state_dict["dynamics/name"], "SIS") 30 | self.assertEqual(self.config.state_dict["dynamics/infection"], 0.04) 31 | self.assertEqual(self.config.state_dict["dynamics/recovery"], 0.08) 32 | self.assertEqual(self.config.state_dict["dynamics/init_param"], None) 33 | self.assertEqual(self.config.state_dict["seed"], 0) 34 | 35 | 36 | if __name__ == "__main__": 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /tests/config/test_dataset.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config import DiscreteDatasetConfig, ContinuousDatasetConfig 4 | from .templates import TemplateConfigTest 5 | 6 | 7 | class DiscretePlainDatasetConfigTest(TemplateConfigTest, unittest.TestCase): 8 | def setUp(self): 9 | self.config = DiscreteDatasetConfig.plain() 10 | self.attributes = ["modes", "bias", "replace", "use_groundtruth"] 11 | self.name = "DiscreteDataset" 12 | 13 | 14 | class DiscreteStructureDatasetConfigTest(TemplateConfigTest, unittest.TestCase): 15 | def setUp(self): 16 | self.config = DiscreteDatasetConfig.structure() 17 | self.attributes = [ 18 | "modes", 19 | "bias", 20 | "replace", 21 | "use_groundtruth", 22 | "use_strength", 23 | ] 24 | self.name = "DiscreteStructureWeightDataset" 25 | 26 | 27 | class DiscreteStateDatasetConfigTest(TemplateConfigTest, unittest.TestCase): 28 | def setUp(self): 29 | self.config = DiscreteDatasetConfig.state() 30 | self.attributes = [ 31 | "modes", 32 | "bias", 33 | "replace", 34 | "use_groundtruth", 35 | "use_strength", 36 | "compounded", 37 | ] 38 | self.name = "DiscreteStateWeightDataset" 39 | 40 | 41 | class ContinuousPlainDatasetConfigTest(TemplateConfigTest, unittest.TestCase): 42 | def setUp(self): 43 | self.config = ContinuousDatasetConfig.plain() 44 | self.attributes = ["modes", "bias", "replace", "use_groundtruth"] 45 | self.name = "ContinuousDataset" 46 | 47 | 48 | class ContinuousStructureDatasetConfigTest(TemplateConfigTest, unittest.TestCase): 49 | def setUp(self): 50 | self.config = ContinuousDatasetConfig.structure() 51 | self.attributes = [ 52 | "modes", 53 | "bias", 54 | "replace", 55 | "use_groundtruth", 56 | "use_strength", 57 | ] 58 | self.name = "ContinuousStructureWeightDataset" 59 | 60 | 61 | class ContinuousStateDatasetConfigTest(TemplateConfigTest, unittest.TestCase): 62 | def setUp(self): 63 | self.config = ContinuousDatasetConfig.state() 64 | self.attributes = [ 65 | "modes", 66 | "bias", 67 | "replace", 68 | "use_groundtruth", 69 | "use_strength", 70 | "compounded", 71 | "total", 72 | "reduce", 73 | "max_num_points", 74 | ] 75 | self.name = "ContinuousStateWeightDataset" 76 | -------------------------------------------------------------------------------- /tests/config/test_dynamics.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config import DynamicsConfig 4 | from .templates import TemplateConfigTest 5 | 6 | 7 | class SISConfigTest(TemplateConfigTest, unittest.TestCase): 8 | def setUp(self): 9 | self.config = DynamicsConfig.sis() 10 | self.name = "SIS" 11 | self.attributes = [ 12 | "infection", 13 | "recovery", 14 | ] 15 | 16 | 17 | class PlanckSISConfigTest(TemplateConfigTest, unittest.TestCase): 18 | def setUp(self): 19 | self.config = DynamicsConfig.plancksis() 20 | self.name = "PlanckSIS" 21 | self.attributes = [ 22 | "temperature", 23 | "recovery", 24 | ] 25 | 26 | 27 | class SISSISConfigTest(TemplateConfigTest, unittest.TestCase): 28 | def setUp(self): 29 | self.config = DynamicsConfig.sissis() 30 | self.name = "AsymmetricSISSIS" 31 | self.attributes = [ 32 | "infection1", 33 | "infection2", 34 | "recovery1", 35 | "recovery2", 36 | "coupling", 37 | "boost", 38 | ] 39 | 40 | 41 | class DSIRConfigTest(TemplateConfigTest, unittest.TestCase): 42 | def setUp(self): 43 | self.config = DynamicsConfig.dsir() 44 | self.name = "DSIR" 45 | self.attributes = [ 46 | "infection_prob", 47 | "recovery_prob", 48 | "infection_type", 49 | "density", 50 | ] 51 | 52 | 53 | class DSIRConfigTest(TemplateConfigTest, unittest.TestCase): 54 | def setUp(self): 55 | self.config = DynamicsConfig.incsir() 56 | self.name = "IncSIR" 57 | self.attributes = [ 58 | "infection_prob", 59 | "recovery_prob", 60 | "infection_type", 61 | "density", 62 | ] 63 | 64 | 65 | if __name__ == "__main__": 66 | unittest.main() 67 | -------------------------------------------------------------------------------- /tests/config/test_experiment.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | 4 | from dynalearn.config import ExperimentConfig 5 | from .templates import TemplateConfigTest 6 | 7 | EXP_ATTRIBUTES = [ 8 | "path_to_data", 9 | "path_to_best", 10 | "path_to_summary", 11 | "dynamics", 12 | "networks", 13 | "model", 14 | "metrics", 15 | "train_metrics", 16 | "callbacks", 17 | "seed", 18 | ] 19 | 20 | 21 | class ExperimentConfigTest(TemplateConfigTest, unittest.TestCase): 22 | def setUp(self): 23 | self.config = ExperimentConfig.default("test-exp", "sis", "ba") 24 | self.attributes = EXP_ATTRIBUTES 25 | self.name = "test-exp" 26 | 27 | def tearDown(self): 28 | os.removedirs(f"./{self.name}") 29 | 30 | 31 | class COVIDExperimentConfigTest(TemplateConfigTest, unittest.TestCase): 32 | def setUp(self): 33 | self.config = ExperimentConfig.covid("test-covid-exp") 34 | self.attributes = EXP_ATTRIBUTES 35 | self.name = "test-covid-exp" 36 | 37 | def tearDown(self): 38 | os.removedirs(f"./{self.name}") 39 | -------------------------------------------------------------------------------- /tests/config/test_metrics.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .templates import TemplateConfigTest 4 | from dynalearn.config.util import ( 5 | AttentionConfig, 6 | ForecastConfig, 7 | LTPConfig, 8 | PredictionConfig, 9 | StationaryConfig, 10 | StatisticsConfig, 11 | ) 12 | 13 | 14 | class AttentionConfigTest(TemplateConfigTest, unittest.TestCase): 15 | def setUp(self): 16 | self.config = AttentionConfig.default() 17 | self.attributes = ["max_num_points"] 18 | 19 | 20 | class ForecastConfigTest(TemplateConfigTest, unittest.TestCase): 21 | def setUp(self): 22 | self.config = ForecastConfig.default() 23 | self.attributes = ["num_steps"] 24 | 25 | 26 | class LTPConfigTest(TemplateConfigTest, unittest.TestCase): 27 | def setUp(self): 28 | self.config = LTPConfig.default() 29 | self.attributes = ["max_num_sample", "max_num_points"] 30 | 31 | 32 | class PredictionConfigTest(TemplateConfigTest, unittest.TestCase): 33 | def setUp(self): 34 | self.config = PredictionConfig.default() 35 | self.attributes = ["max_num_points"] 36 | 37 | 38 | class StoContStationaryConfigTest(TemplateConfigTest, unittest.TestCase): 39 | def setUp(self): 40 | self.config = StationaryConfig.sis() 41 | self.attributes = [ 42 | "adaptive", 43 | "num_nodes", 44 | "init_param", 45 | "sampler", 46 | "burn", 47 | "T", 48 | "tol", 49 | "num_samples", 50 | "statistics", 51 | "parameters", 52 | ] 53 | 54 | 55 | class MetapopStationaryConfigTest(TemplateConfigTest, unittest.TestCase): 56 | def setUp(self): 57 | self.config = StationaryConfig.dsir() 58 | self.attributes = [ 59 | "adaptive", 60 | "num_nodes", 61 | "init_param", 62 | "sampler", 63 | "initial_burn", 64 | "init_epsilon", 65 | "mid_burn", 66 | "tol", 67 | "maxiter", 68 | "num_samples", 69 | "statistics", 70 | "parameters", 71 | ] 72 | 73 | 74 | class StatisticsConfigTest(TemplateConfigTest, unittest.TestCase): 75 | def setUp(self): 76 | self.config = StatisticsConfig.default() 77 | self.attributes = ["maxlag", "max_num_points"] 78 | -------------------------------------------------------------------------------- /tests/config/test_networks.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config import NetworkConfig 4 | from .templates import TemplateConfigTest 5 | 6 | 7 | class GNPConfigTest(TemplateConfigTest, unittest.TestCase): 8 | def setUp(self): 9 | self.config = NetworkConfig.gnp(num_nodes=1000, p=0.004) 10 | self.attributes = ["num_nodes", "p"] 11 | self.name = "GNPNetworkGenerator" 12 | 13 | 14 | class WGNPConfigTest(TemplateConfigTest, unittest.TestCase): 15 | def setUp(self): 16 | self.config = NetworkConfig.w_gnp(num_nodes=1000, p=0.004) 17 | self.attributes = ["num_nodes", "p", "weights", "transforms"] 18 | self.name = "GNPNetworkGenerator" 19 | 20 | 21 | class BAConfigTest(TemplateConfigTest, unittest.TestCase): 22 | def setUp(self): 23 | self.config = NetworkConfig.ba(num_nodes=1000, m=2) 24 | self.attributes = ["num_nodes", "m"] 25 | self.name = "BANetworkGenerator" 26 | 27 | 28 | class WBAConfigTest(TemplateConfigTest, unittest.TestCase): 29 | def setUp(self): 30 | self.config = NetworkConfig.w_ba(num_nodes=1000, m=2) 31 | self.attributes = ["num_nodes", "m", "weights", "transforms"] 32 | self.name = "BANetworkGenerator" 33 | 34 | 35 | class MWBAConfigTest(TemplateConfigTest, unittest.TestCase): 36 | def setUp(self): 37 | self.config = NetworkConfig.mw_ba(num_nodes=1000, m=2) 38 | self.attributes = ["num_nodes", "m", "weights", "transforms", "layers"] 39 | self.name = "BANetworkGenerator" 40 | -------------------------------------------------------------------------------- /tests/config/test_optimizer.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config.util import OptimizerConfig 4 | from .templates import TemplateConfigTest 5 | 6 | 7 | class OptimizerConfigTest(TemplateConfigTest, unittest.TestCase): 8 | def setUp(self): 9 | self.config = OptimizerConfig.default() 10 | self.attributes = ["lr", "weight_decay", "betas", "eps", "amsgrad"] 11 | self.name = "RAdam" 12 | -------------------------------------------------------------------------------- /tests/config/test_trainable.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config import TrainableConfig 4 | from .templates import TemplateConfigTest 5 | 6 | GNN_ATTRIBUTES = [ 7 | "gnn_name", 8 | "type", 9 | "num_states", 10 | "lag", 11 | "lagstep", 12 | "optimizer", 13 | "in_activation", 14 | "gnn_activation", 15 | "out_activation", 16 | "in_channels", 17 | "gnn_channels", 18 | "out_channels", 19 | "heads", 20 | "concat", 21 | "bias", 22 | "self_attention", 23 | ] 24 | 25 | WGNN_ATTRIBUTES = GNN_ATTRIBUTES + [ 26 | "weighted", 27 | "node_activation", 28 | "edge_activation", 29 | "node_channels", 30 | "edge_channels", 31 | "edge_gnn_channels", 32 | ] 33 | 34 | 35 | class TrainableSISConfigTest(TemplateConfigTest, unittest.TestCase): 36 | def setUp(self): 37 | self.config = TrainableConfig.sis() 38 | self.name = "GNNSEDynamics" 39 | self.attributes = GNN_ATTRIBUTES 40 | 41 | 42 | class TrainablePlanckSISConfigTest(TemplateConfigTest, unittest.TestCase): 43 | def setUp(self): 44 | self.config = TrainableConfig.plancksis() 45 | self.name = "GNNSEDynamics" 46 | self.attributes = GNN_ATTRIBUTES 47 | 48 | 49 | class TrainableSISSISConfigTest(TemplateConfigTest, unittest.TestCase): 50 | def setUp(self): 51 | self.config = TrainableConfig.sissis() 52 | self.name = "GNNSEDynamics" 53 | self.attributes = GNN_ATTRIBUTES 54 | 55 | 56 | class TrainableDSIRConfigTest(TemplateConfigTest, unittest.TestCase): 57 | def setUp(self): 58 | self.config = TrainableConfig.dsir() 59 | self.name = "GNNDEDynamics" 60 | self.attributes = WGNN_ATTRIBUTES 61 | 62 | 63 | class TrainableIncSIRConfigTest(TemplateConfigTest, unittest.TestCase): 64 | def setUp(self): 65 | self.config = TrainableConfig.incsir() 66 | self.name = "GNNIncidenceDynamics" 67 | self.attributes = WGNN_ATTRIBUTES 68 | 69 | 70 | class TrainableKapoorConfigTest(TemplateConfigTest, unittest.TestCase): 71 | def setUp(self): 72 | self.config = TrainableConfig.kapoor() 73 | self.name = "KapoorDynamics" 74 | self.attributes = ["lag", "lagstep", "num_states", "optimizer"] 75 | -------------------------------------------------------------------------------- /tests/config/test_training.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config.util import TrainingConfig 4 | from .templates import TemplateConfigTest 5 | 6 | TRAIN_ATTRIBUTES = [ 7 | "val_fraction", 8 | "val_bias", 9 | "epochs", 10 | "batch_size", 11 | "num_networks", 12 | "num_samples", 13 | "resampling", 14 | "maxlag", 15 | "resample_when_dead", 16 | ] 17 | 18 | 19 | class DiscreteTrainingConfigTest(TemplateConfigTest, unittest.TestCase): 20 | def setUp(self): 21 | self.config = TrainingConfig.discrete() 22 | self.attributes = TRAIN_ATTRIBUTES 23 | 24 | 25 | class ContinuousTrainingConfigTest(TemplateConfigTest, unittest.TestCase): 26 | def setUp(self): 27 | self.config = TrainingConfig.continuous() 28 | self.attributes = TRAIN_ATTRIBUTES 29 | -------------------------------------------------------------------------------- /tests/config/test_weights.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from dynalearn.config.util import WeightConfig 4 | from .templates import TemplateConfigTest 5 | 6 | 7 | class UniformWeightConfigTest(TemplateConfigTest, unittest.TestCase): 8 | def setUp(self): 9 | self.config = WeightConfig.uniform() 10 | self.attributes = ["low", "high"] 11 | self.name = "UniformWeightGenerator" 12 | -------------------------------------------------------------------------------- /tests/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.datasets.test_continuousdataset 2 | import tests.datasets.test_data 3 | import tests.datasets.test_discretedataset 4 | import tests.datasets.test_kde 5 | import tests.datasets.test_remap 6 | import tests.datasets.test_sampler 7 | import tests.datasets.test_threshold 8 | import tests.datasets.test_weightdata 9 | -------------------------------------------------------------------------------- /tests/datasets/all.py: -------------------------------------------------------------------------------- 1 | from .test_continuousdataset import * 2 | from .test_data import * 3 | from .test_discretedataset import * 4 | from .test_kde import * 5 | from .test_remap import * 6 | from .test_sampler import * 7 | from .test_threshold import * 8 | from .test_weightdata import * 9 | -------------------------------------------------------------------------------- /tests/datasets/test_continuousdataset.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import os 4 | import torch 5 | import unittest 6 | 7 | from .templates import TemplateDatasetTest 8 | from dynalearn.config import ExperimentConfig 9 | 10 | 11 | class ContinuousDatasetTest(TemplateDatasetTest, unittest.TestCase): 12 | def get_config(self): 13 | return ExperimentConfig.test(config="continuous") 14 | 15 | def get_input_shape(self): 16 | return self.num_nodes, self.num_states, self.lag 17 | 18 | 19 | if __name__ == "__main__": 20 | unittest.main() 21 | -------------------------------------------------------------------------------- /tests/datasets/test_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from unittest import TestCase 4 | from dynalearn.datasets import DataCollection, StateData 5 | 6 | 7 | class TestData(TestCase): 8 | def setUp(self): 9 | self.name = "data" 10 | self.size = 10 11 | self.shape = (5, 6) 12 | self.num_points = 14 13 | self.data = DataCollection( 14 | name=self.name, 15 | data_list=[ 16 | StateData(name=self.name, data=np.random.rand(self.size, *self.shape)) 17 | for i in range(self.num_points) 18 | ], 19 | ) 20 | 21 | def test_get(self): 22 | index = np.random.randint(self.num_points) 23 | self.assertEqual(self.data[index].shape, self.shape) 24 | 25 | def test_size(self): 26 | index = np.random.randint(self.num_points) 27 | self.assertEqual(self.data[index].size, self.size) 28 | 29 | def test_num_points(self): 30 | self.assertEqual(self.data.size, self.num_points) 31 | 32 | 33 | if __name__ == "__main__": 34 | unittest.main() 35 | -------------------------------------------------------------------------------- /tests/datasets/test_discretedataset.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | import unittest 5 | 6 | from .templates import TemplateDatasetTest 7 | from dynalearn.config import ExperimentConfig 8 | from dynalearn.experiments import Experiment 9 | 10 | 11 | class DiscreteDatasetTest(TemplateDatasetTest, unittest.TestCase): 12 | def get_config(self): 13 | return ExperimentConfig.test(config="discrete") 14 | 15 | def get_input_shape(self): 16 | return self.num_nodes, self.lag 17 | 18 | 19 | if __name__ == "__main__": 20 | unittest.main() 21 | -------------------------------------------------------------------------------- /tests/datasets/test_kde.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from scipy.stats import gaussian_kde 5 | from sklearn.neighbors import KernelDensity 6 | from dynalearn.datasets.weights import KernelDensityEstimator 7 | 8 | 9 | class KernelDensityEstimatorTest(unittest.TestCase): 10 | def setUp(self): 11 | self.num_points = 10 12 | self.shape = 3 13 | self.dataset1 = [np.random.randn(self.shape) for i in range(self.num_points)] 14 | self.dataset2 = [self.dataset1[0] for i in range(self.num_points)] 15 | self.dataset3 = [np.random.randn(self.shape)] 16 | 17 | def test_init(self): 18 | kde = KernelDensityEstimator(self.dataset1) 19 | kde = KernelDensityEstimator(self.dataset2) 20 | kde = KernelDensityEstimator(self.dataset3) 21 | 22 | def test_mean(self): 23 | kde = KernelDensityEstimator(self.dataset1) 24 | mean = np.expand_dims(np.array(self.dataset1).mean(0), axis=-1) 25 | np.testing.assert_array_almost_equal(kde._mean, mean) 26 | 27 | kde = KernelDensityEstimator(self.dataset2) 28 | self.assertEqual(kde._mean, None) 29 | 30 | kde = KernelDensityEstimator(self.dataset3) 31 | self.assertEqual(kde._mean, None) 32 | 33 | def test_std(self): 34 | kde = KernelDensityEstimator(self.dataset1) 35 | std = np.expand_dims(np.array(self.dataset1).std(0), axis=-1) 36 | np.testing.assert_array_almost_equal(kde._std, std) 37 | 38 | kde = KernelDensityEstimator(self.dataset2) 39 | self.assertEqual(kde._std, None) 40 | 41 | kde = KernelDensityEstimator(self.dataset3) 42 | self.assertEqual(kde._std, None) 43 | 44 | def test_pdf(self): 45 | index = np.random.randint(len(self.dataset1)) 46 | x = np.array(self.dataset1).reshape(self.num_points, self.shape).T 47 | y = (x - np.expand_dims(x.mean(-1), axis=-1)) / np.expand_dims( 48 | x.std(-1), axis=-1 49 | ) 50 | my_kde = KernelDensityEstimator(self.dataset1) 51 | kde = KernelDensity(kernel="gaussian").fit(y.T) 52 | p = np.exp(kde.score_samples(y.T)) 53 | p /= p.sum() 54 | np.testing.assert_array_almost_equal(my_kde.pdf(self.dataset1), p) 55 | 56 | my_kde = KernelDensityEstimator(self.dataset2) 57 | np.testing.assert_array_almost_equal( 58 | my_kde.pdf(self.dataset2), np.ones(len(self.dataset2)) / len(self.dataset2) 59 | ) 60 | 61 | my_kde = KernelDensityEstimator(self.dataset3) 62 | np.testing.assert_array_almost_equal( 63 | my_kde.pdf(self.dataset3), np.ones(len(self.dataset3)) / len(self.dataset3) 64 | ) 65 | 66 | 67 | if __name__ == "__main__": 68 | unittest.main() 69 | -------------------------------------------------------------------------------- /tests/datasets/test_remap.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | 5 | from dynalearn.datasets import RemapStateTransform, StateData 6 | from unittest import TestCase 7 | 8 | 9 | class RemapStateTransformTest(TestCase): 10 | def setUp(self): 11 | self.transform = RemapStateTransform() 12 | self.transform.state_map = {0: 0, 1: 1, 2: 0, 3: 1} 13 | return 14 | 15 | def test_call(self): 16 | x = StateData(data=np.random.randint(4, size=1000)) 17 | y = self.transform(x) 18 | x_ref = x.data * 1 19 | x_ref[x.data == 2] = 0 20 | x_ref[x.data == 3] = 1 21 | np.testing.assert_array_equal(x_ref, y.data) 22 | 23 | 24 | if __name__ == "__main__": 25 | unittest.main() 26 | -------------------------------------------------------------------------------- /tests/datasets/test_sampler.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from shutil import rmtree 4 | from .templates import * 5 | from dynalearn.config import ExperimentConfig 6 | from dynalearn.experiments import Experiment 7 | 8 | NUM_SAMPLES = 10 9 | 10 | 11 | class SamplerTest(unittest.TestCase): 12 | def setUp(self): 13 | self.config = ExperimentConfig.test(config="discrete") 14 | self.exp = Experiment(self.config, verbose=0) 15 | self.dataset = self.exp.dataset 16 | self.dataset.setup(self.exp) 17 | self.exp.generate_data() 18 | self.sampler = self.dataset.sampler 19 | 20 | def tearDown(self): 21 | if os.path.exists("./test"): 22 | rmtree("./test") 23 | 24 | def test_call(self): 25 | for i in range(NUM_SAMPLES): 26 | out = self.sampler() 27 | self.assertTrue(isinstance(out, tuple)) 28 | self.assertEqual(len(out), 2) 29 | self.assertTrue(out[0] < self.exp.train_details.num_networks) 30 | self.assertTrue(out[1] < self.exp.train_details.num_samples) 31 | 32 | def template_update(self, replace=True): 33 | self.sampler.replace = replace 34 | networks = self.sampler.avail_networks.copy() 35 | states = {k: v.copy() for k, v in self.sampler.avail_states.items()} 36 | i = 0 37 | for g in networks: 38 | for s in states[g]: 39 | self.assertEqual(i, self.sampler.counter) 40 | self.sampler.update(g, s) 41 | if replace: 42 | self.assertTrue(s in self.sampler.avail_states[g]) 43 | else: 44 | self.assertFalse(s in self.sampler.avail_states[g]) 45 | i += 1 46 | if replace: 47 | self.assertTrue(g in self.sampler.avail_networks) 48 | else: 49 | self.assertFalse(g in self.sampler.avail_networks) 50 | 51 | def test_update_with_replace(self): 52 | self.template_update(replace=True) 53 | 54 | def test_update_without_replace(self): 55 | self.template_update(replace=False) 56 | 57 | def test_reset(self): 58 | self.sampler.replace = False 59 | networks = self.sampler.avail_networks.copy() 60 | states = {k: v.copy() for k, v in self.sampler.avail_states.items()} 61 | for i in range(NUM_SAMPLES): 62 | self.sampler() 63 | self.assertNotEqual(networks, self.sampler.avail_networks) 64 | self.assertNotEqual(states, self.sampler.avail_states) 65 | self.assertEqual(self.sampler.counter, NUM_SAMPLES) 66 | self.sampler.reset() 67 | self.assertEqual(self.sampler.counter, 0) 68 | self.assertEqual(networks, self.sampler.avail_networks) 69 | self.assertEqual(states, self.sampler.avail_states) 70 | 71 | 72 | if __name__ == "__main__": 73 | unittest.main() 74 | -------------------------------------------------------------------------------- /tests/datasets/test_threshold.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | 5 | from dynalearn.config import NetworkConfig 6 | from dynalearn.datasets import ThresholdNetworkTransform, NetworkData 7 | from dynalearn.networks.getter import get as get_network 8 | from dynalearn.networks import Network, MultiplexNetwork 9 | from unittest import TestCase 10 | 11 | 12 | class ThresholdNetworkTransformTest(TestCase): 13 | def setUp(self): 14 | self.threshold = 10 15 | self.collapse = True 16 | 17 | self.network = get_network(NetworkConfig.w_ba()) 18 | self.multiplex_network = get_network(NetworkConfig.mw_ba(layers=3)) 19 | 20 | self.transform_coll = ThresholdNetworkTransform( 21 | threshold=self.threshold, collapse=True 22 | ) 23 | self.transform_ncoll = ThresholdNetworkTransform( 24 | threshold=self.threshold, collapse=False 25 | ) 26 | return 27 | 28 | def test_call(self): 29 | g = self.network.generate() 30 | x = NetworkData(data=g) 31 | x = self.transform_coll(x) 32 | self.assertTrue(isinstance(x.data, Network)) 33 | 34 | g = self.multiplex_network.generate() 35 | x = NetworkData(data=g) 36 | x = self.transform_coll(x) 37 | self.assertTrue(isinstance(x.data, Network)) 38 | 39 | g = self.multiplex_network.generate() 40 | x = NetworkData(data=g) 41 | x = self.transform_ncoll(x) 42 | self.assertTrue(isinstance(x.data, MultiplexNetwork)) 43 | 44 | 45 | if __name__ == "__main__": 46 | unittest.main() 47 | -------------------------------------------------------------------------------- /tests/datasets/test_weightdata.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .templates import DiscreteWeightTest, ContinuousWeightTest 4 | from dynalearn.datasets.weights import ( 5 | Weight, 6 | DegreeWeight, 7 | StrengthWeight, 8 | DiscreteStateWeight, 9 | DiscreteCompoundStateWeight, 10 | ContinuousStateWeight, 11 | ContinuousCompoundStateWeight, 12 | StrengthContinuousStateWeight, 13 | StrengthContinuousCompoundStateWeight, 14 | ) 15 | 16 | 17 | class DWeightTest(DiscreteWeightTest, unittest.TestCase): 18 | def get_weight(self): 19 | return Weight() 20 | 21 | 22 | class DDegreeWeightTest(DiscreteWeightTest, unittest.TestCase): 23 | def get_weight(self): 24 | return DegreeWeight() 25 | 26 | 27 | class DStrengthWeightTest(DiscreteWeightTest, unittest.TestCase): 28 | def get_weight(self): 29 | return StrengthWeight() 30 | 31 | 32 | class DiscreteStateWeightTest(DiscreteWeightTest, unittest.TestCase): 33 | def get_weight(self): 34 | return DiscreteStateWeight() 35 | 36 | 37 | class DiscreteCompoundStateWeightTest(DiscreteWeightTest, unittest.TestCase): 38 | def get_weight(self): 39 | return DiscreteCompoundStateWeight() 40 | 41 | 42 | class CWeightTest(ContinuousWeightTest, unittest.TestCase): 43 | def get_weight(self): 44 | return Weight() 45 | 46 | 47 | class CDegreeWeightTest(ContinuousWeightTest, unittest.TestCase): 48 | def get_weight(self): 49 | return DegreeWeight() 50 | 51 | 52 | class CStrengthWeightTest(ContinuousWeightTest, unittest.TestCase): 53 | def get_weight(self): 54 | return StrengthWeight() 55 | 56 | 57 | class ContinuousStateWeightTest(ContinuousWeightTest, unittest.TestCase): 58 | def get_weight(self): 59 | return ContinuousStateWeight() 60 | 61 | 62 | class ContinuousCompoundStateWeightTest(ContinuousWeightTest, unittest.TestCase): 63 | def get_weight(self): 64 | return ContinuousCompoundStateWeight() 65 | 66 | 67 | class StrengthContinuousStateWeightTest(ContinuousWeightTest, unittest.TestCase): 68 | def get_weight(self): 69 | return StrengthContinuousStateWeight() 70 | 71 | 72 | class StrengthContinuousCompoundStateWeightTest( 73 | ContinuousWeightTest, unittest.TestCase 74 | ): 75 | def get_weight(self): 76 | return StrengthContinuousCompoundStateWeight() 77 | 78 | 79 | if __name__ == "__main__": 80 | unittest.main() 81 | -------------------------------------------------------------------------------- /tests/dynamics/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.dynamics.test_deterministic 2 | import tests.dynamics.test_incidence 3 | import tests.dynamics.test_stochasticepidemics 4 | import tests.dynamics.test_trainable 5 | import tests.dynamics.test_var 6 | -------------------------------------------------------------------------------- /tests/dynamics/all.py: -------------------------------------------------------------------------------- 1 | from .test_deterministic import * 2 | from .test_incidence import * 3 | from .test_stochasticepidemics import * 4 | from .test_trainable import * 5 | from .test_var import * 6 | -------------------------------------------------------------------------------- /tests/dynamics/templates.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | 4 | from dynalearn.config import NetworkConfig 5 | from dynalearn.networks.getter import get as get_network 6 | 7 | NUM_SAMPLES = 10 8 | 9 | 10 | class StoContTemplateTest: 11 | def get_model(self): 12 | raise NotImplemented() 13 | 14 | @property 15 | def num_states(self): 16 | if self._num_states is not None: 17 | return self._num_states 18 | else: 19 | raise ValueError("`num_states` is not defined.") 20 | 21 | def setUp(self): 22 | self.num_nodes = 100 23 | self.model = self.get_model() 24 | self.network = get_network(NetworkConfig.ba(self.num_nodes, 2)) 25 | self.model.network = self.network.generate(int(time.time())) 26 | 27 | def test_change_network(self): 28 | self.assertEqual(self.model.num_nodes, self.num_nodes) 29 | self.model.network = self.network.generate(int(time.time())) 30 | self.assertEqual(self.model.num_nodes, self.num_nodes) 31 | 32 | def test_predict(self): 33 | x = np.random.randint(self.num_states, size=self.num_nodes) 34 | y = self.model.predict(x) 35 | self.assertFalse(np.any(y == np.nan)) 36 | self.assertEqual(y.shape, (self.num_nodes, self.num_states)) 37 | np.testing.assert_array_almost_equal(y.sum(-1), np.ones(self.num_nodes)) 38 | 39 | def test_sample(self): 40 | success = 0 41 | x = np.zeros(self.num_nodes) 42 | self.assertTrue(np.all(x == self.model.sample(x))) 43 | for i in range(NUM_SAMPLES): 44 | x = np.random.randint(self.num_states, size=self.num_nodes) 45 | y = self.model.sample(x) 46 | success += int(np.any(x != y)) 47 | self.assertGreater(success, 0) 48 | 49 | def test_initialstate(self): 50 | x = self.model.initial_state() 51 | self.assertEqual(x.shape, (self.num_nodes,)) 52 | -------------------------------------------------------------------------------- /tests/dynamics/test_deterministic.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | import time 5 | import unittest 6 | 7 | from .templates import * 8 | from dynalearn.dynamics import DSIS, DSIR 9 | from dynalearn.config import DynamicsConfig, NetworkConfig 10 | from dynalearn.networks.getter import get as get_network 11 | 12 | 13 | class DSIRTest(unittest.TestCase): 14 | def get_model(self): 15 | self._num_states = 3 16 | return DSIR(DynamicsConfig.dsir()) 17 | 18 | @property 19 | def num_states(self): 20 | if self._num_states is not None: 21 | return self._num_states 22 | else: 23 | raise ValueError("`num_states` is not defined.") 24 | 25 | def setUp(self): 26 | self.num_nodes = 100 27 | self.model = self.get_model() 28 | self.network = get_network(NetworkConfig.ba(self.num_nodes, 2)) 29 | self.model.network = self.network.generate(int(time.time())) 30 | 31 | def test_change_network(self): 32 | self.assertEqual(self.model.num_nodes, self.num_nodes) 33 | self.model.network = self.network.generate(int(time.time())) 34 | self.assertEqual(self.model.num_nodes, self.num_nodes) 35 | 36 | def test_predict(self): 37 | x = self.model.initial_state() 38 | T = 10 39 | for t in range(T): 40 | x = self.model.predict(x) 41 | self.assertFalse(np.any(x == np.nan)) 42 | self.assertEqual(x.shape, (self.num_nodes, self.num_states)) 43 | np.testing.assert_array_almost_equal(x.sum(-1), np.ones(self.num_nodes)) 44 | 45 | def test_sample(self): 46 | x = self.model.initial_state() 47 | y = self.model.sample(x) 48 | self.assertTrue(np.any(x != y)) 49 | 50 | def test_initialstate(self): 51 | x = self.model.initial_state() 52 | self.assertEqual(x.shape, (self.num_nodes, self.num_states)) 53 | -------------------------------------------------------------------------------- /tests/dynamics/test_incidence.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | import time 5 | import unittest 6 | 7 | from .templates import * 8 | from dynalearn.dynamics import IncSIR 9 | from dynalearn.config import DynamicsConfig, NetworkConfig 10 | from dynalearn.networks.getter import get as get_network 11 | 12 | 13 | class IncidenceSIRTest(unittest.TestCase): 14 | def setUp(self): 15 | self.num_nodes = 10 16 | self.model = IncSIR(DynamicsConfig.incsir()) 17 | self.num_states = self.model.num_states 18 | self.lag = self.model.lag 19 | self.network = get_network(NetworkConfig.ba(self.num_nodes, 2)) 20 | self.model.network = self.network.generate(int(time.time())) 21 | 22 | def test_change_network(self): 23 | self.assertEqual(self.model.num_nodes, self.num_nodes) 24 | self.model.network = self.network.generate(int(time.time())) 25 | self.assertEqual(self.model.num_nodes, self.num_nodes) 26 | 27 | def test_predict(self): 28 | x = np.random.poisson(5, size=(self.num_nodes, 1)) 29 | T = 10 30 | for t in range(T): 31 | x = self.model.predict(x) 32 | self.assertFalse(np.any(x == np.nan)) 33 | self.assertEqual(x.shape, (self.num_nodes, self.num_states)) 34 | np.testing.assert_array_almost_equal( 35 | self.model.latent_state.sum(-1), np.ones(self.num_nodes) 36 | ) 37 | self.assertTrue(np.all(self.model.latent_state >= 0)) 38 | self.assertTrue(np.all(self.model.latent_state <= 1)) 39 | 40 | def test_sample(self): 41 | x = self.model.initial_state() 42 | y = self.model.sample(x) 43 | self.assertTrue(np.any(x != y)) 44 | 45 | def test_initialstate(self): 46 | x = self.model.initial_state(squeeze=False) 47 | self.assertEqual(x.shape, (self.num_nodes, self.num_states, self.lag)) 48 | -------------------------------------------------------------------------------- /tests/dynamics/test_stochasticepidemics.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import networkx as nx 3 | import numpy as np 4 | import time 5 | 6 | from .templates import * 7 | from dynalearn.dynamics import SIS, SIR, SISSIS, PlanckSIS 8 | from dynalearn.config import DynamicsConfig, NetworkConfig 9 | from dynalearn.networks.getter import get as get_network 10 | 11 | 12 | class SISTest(StoContTemplateTest, unittest.TestCase): 13 | def get_model(self): 14 | self._num_states = 2 15 | return SIS(DynamicsConfig.sis()) 16 | 17 | 18 | class SIRTest(StoContTemplateTest, unittest.TestCase): 19 | def get_model(self): 20 | self._num_states = 3 21 | return SIR(DynamicsConfig.sir()) 22 | 23 | 24 | class SISSISTest(StoContTemplateTest, unittest.TestCase): 25 | def get_model(self): 26 | self._num_states = 4 27 | return SISSIS(DynamicsConfig.sissis()) 28 | 29 | 30 | class PlanckSISTest(StoContTemplateTest, unittest.TestCase): 31 | def get_model(self): 32 | self._num_states = 2 33 | return PlanckSIS(DynamicsConfig.plancksis()) 34 | 35 | 36 | if __name__ == "__main__": 37 | unittest.main() 38 | -------------------------------------------------------------------------------- /tests/dynamics/test_var.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from .templates import * 5 | from dynalearn.dynamics import VARDynamics 6 | from statsmodels.tsa.vector_ar.var_model import VAR 7 | 8 | 9 | class VARDynamicsTest(unittest.TestCase): 10 | def setUp(self): 11 | self.num_states = 1 12 | self.num_nodes = 5 13 | self.T = 1000 14 | self.coefs = np.random.rand(self.num_nodes) 15 | self.coefs = np.column_stack((self.coefs, 1 - self.coefs)) 16 | self.eps = 0.5 17 | self.lag = 3 18 | self.A = 10 19 | 20 | dataset1 = self.get_data(with_y=False) 21 | dataset2 = self.get_data(with_y=True) 22 | 23 | self.model1 = VARDynamics(self.num_states, lag=self.lag) 24 | self.model2 = VARDynamics(self.num_states, lag=self.lag) 25 | self.model1.fit(dataset1) 26 | self.model2.fit(*dataset2) 27 | self.ref = VAR(dataset1).fit(maxlags=self.lag) 28 | 29 | def get_data(self, with_y=False): 30 | X = np.linspace(0, 8 * np.pi, self.T) 31 | dataset = ( 32 | self.A 33 | * np.array( 34 | [ 35 | c0 * np.sin(X) 36 | + c1 * np.exp(-0.1 * X) 37 | + self.eps * np.random.randn(self.T) 38 | for c0, c1 in self.coefs 39 | ] 40 | ).T 41 | ) 42 | if with_y: 43 | x = np.array( 44 | [dataset[t - self.lag : t][::-1] for t in range(self.lag, len(X))] 45 | ) 46 | self.T = x.shape[0] 47 | y = dataset[self.lag :].reshape(self.T, -1) 48 | x = np.transpose(x, (0, 2, 1)).reshape( 49 | self.T, self.num_nodes, self.num_states, self.lag 50 | ) 51 | y = y.reshape(self.T, self.num_nodes, self.num_states) 52 | return (x, y) 53 | else: 54 | self.T = dataset.shape[0] 55 | return dataset.reshape((self.T, self.num_nodes, self.num_states)) 56 | 57 | def test_initialstate(self): 58 | x0 = self.model1.initial_state() 59 | self.assertEqual(x0.shape, (self.num_nodes, self.num_states, self.lag)) 60 | 61 | x0 = self.model2.initial_state() 62 | self.assertEqual(x0.shape, (self.num_nodes, self.num_states, self.lag)) 63 | 64 | def test_predict(self): 65 | x0 = self.model1.initial_state() 66 | yp = self.model1.predict(x0) 67 | self.assertEqual(yp.shape, (self.num_nodes, self.num_states)) 68 | x0 = np.transpose(x0, (1, 2, 0)) 69 | yt = self.ref.forecast(x0.reshape(self.lag, -1), steps=1).reshape( 70 | self.num_nodes, self.num_states 71 | ) 72 | np.testing.assert_array_almost_equal(yt, yp, decimal=4) 73 | 74 | x0 = self.model2.initial_state() 75 | yp = self.model2.predict(x0) 76 | self.assertEqual(yp.shape, (self.num_nodes, self.num_states)) 77 | 78 | def test_sample(self): 79 | x0 = self.model1.initial_state() 80 | y = self.model1.sample(x0) 81 | self.assertEqual(y.shape, (self.num_nodes, self.num_states)) 82 | 83 | def test_loglikelihood(self): 84 | ts = self.model1.data 85 | logp = self.model1.loglikelihood(ts) 86 | self.assertTrue(isinstance(logp, float)) 87 | self.assertTrue(logp <= 0) 88 | -------------------------------------------------------------------------------- /tests/experiments/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.experiments.test_attention 2 | import tests.experiments.test_experiment 3 | import tests.experiments.test_forecast 4 | import tests.experiments.test_ltp 5 | import tests.experiments.test_pred 6 | import tests.experiments.test_stationary 7 | import tests.experiments.test_statistics 8 | -------------------------------------------------------------------------------- /tests/experiments/all.py: -------------------------------------------------------------------------------- 1 | from .test_attention import * 2 | from .test_experiment import * 3 | from .test_forecast import * 4 | from .test_ltp import * 5 | from .test_pred import * 6 | from .test_stationary import * 7 | from .test_statistics import * 8 | -------------------------------------------------------------------------------- /tests/experiments/templates.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import torch 4 | 5 | from shutil import rmtree 6 | from dynalearn.config import ExperimentConfig 7 | from dynalearn.experiments import Experiment 8 | 9 | 10 | class MetricsTest: 11 | @property 12 | def name(self): 13 | raise NotImplemented() 14 | 15 | def setUp(self): 16 | self.config = ExperimentConfig.test() 17 | self.config.networks.num_nodes = 10 18 | self.config.train_details.num_samples = 15 19 | self.config.train_details.num_networks = 1 20 | self.additional_configs() 21 | self.config.metrics.names = [self.name] 22 | self.exp = Experiment(self.config, verbose=0) 23 | self.exp.generate_data() 24 | self.exp.partition_test_dataset() 25 | self.exp.partition_val_dataset() 26 | 27 | def tearDown(self): 28 | if os.path.exists("./test"): 29 | rmtree("./test") 30 | 31 | def check_data(selt, data): 32 | pass 33 | 34 | def additional_configs(self): 35 | pass 36 | 37 | def test_compute(self): 38 | self.exp.metrics[self.name].compute(self.exp) 39 | self.check_data(self.exp.metrics[self.name].data) 40 | -------------------------------------------------------------------------------- /tests/experiments/test_attention.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from .templates import * 3 | 4 | 5 | class AttentionMetricsTest(MetricsTest, unittest.TestCase): 6 | @property 7 | def name(self): 8 | return "AttentionMetrics" 9 | 10 | def additional_configs(self): 11 | self.config.model.heads = 1 12 | 13 | 14 | class AttentionStatesNMIMetricsTest(AttentionMetricsTest): 15 | @property 16 | def name(self): 17 | return "AttentionStatesNMIMetrics" 18 | 19 | 20 | class AttentionNodeAttrNMIMetricsTest(AttentionMetricsTest): 21 | @property 22 | def name(self): 23 | return "AttentionNodeAttrNMIMetrics" 24 | 25 | 26 | class AttentionEdgeAttrNMIMetricsTest(AttentionMetricsTest): 27 | @property 28 | def name(self): 29 | return "AttentionEdgeAttrNMIMetrics" 30 | -------------------------------------------------------------------------------- /tests/experiments/test_experiment.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import unittest 4 | 5 | from dynalearn.config import ExperimentConfig 6 | from dynalearn.experiments import Experiment 7 | 8 | 9 | class ExperimentTest(unittest.TestCase): 10 | def setUp(self): 11 | self.config = ExperimentConfig.test() 12 | self.experiment = Experiment(self.config, verbose=0) 13 | self.experiment.begin() 14 | 15 | def tearDown(self): 16 | self.experiment.end() 17 | if os.path.exists("test.pt"): 18 | os.remove("test.pt") 19 | 20 | def test_generate(self): 21 | self.experiment.generate_data(save=False) 22 | expected = ( 23 | self.config.train_details.num_samples 24 | * self.config.train_details.num_networks 25 | ) 26 | actual = len(self.experiment.dataset) 27 | self.assertEqual(expected, actual) 28 | 29 | def test_train(self): 30 | self.experiment.generate_data(save=False) 31 | self.experiment.partition_val_dataset() 32 | self.experiment.train_model(save=False) 33 | logs = self.experiment.model.nn.history._epoch_logs 34 | for epoch, log in logs.items(): 35 | self.assertFalse(np.isnan(log["loss"])) 36 | 37 | def test_compute_metrics(self): 38 | self.experiment.generate_data(save=False) 39 | self.experiment.compute_metrics(save=False) 40 | -------------------------------------------------------------------------------- /tests/experiments/test_forecast.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from .templates import * 3 | 4 | 5 | class TrueForecastMetricsTest(MetricsTest, unittest.TestCase): 6 | @property 7 | def name(self): 8 | return "TrueForecastMetrics" 9 | 10 | def additional_configs(self): 11 | self.config = ExperimentConfig.test(config="continuous") 12 | self.config.networks.num_nodes = 10 13 | self.config.train_details.num_samples = 10 14 | self.config.train_details.num_networks = 1 15 | 16 | 17 | class GNNForecastMetricsTest(TrueForecastMetricsTest): 18 | @property 19 | def name(self): 20 | return "GNNForecastMetrics" 21 | 22 | 23 | class VARForecastMetricsTest(TrueForecastMetricsTest): 24 | @property 25 | def name(self): 26 | return "VARForecastMetrics" 27 | -------------------------------------------------------------------------------- /tests/experiments/test_ltp.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from .templates import * 3 | 4 | 5 | class TrueLTPMetricsTest(MetricsTest, unittest.TestCase): 6 | @property 7 | def name(self): 8 | return "TrueLTPMetrics" 9 | 10 | def check_data(self, data): 11 | ltp = data["ltp"] 12 | np.testing.assert_array_almost_equal(ltp.sum(-1), np.ones(ltp.shape[0])) 13 | 14 | 15 | class GNNLTPMetricsTest(TrueLTPMetricsTest): 16 | @property 17 | def name(self): 18 | return "GNNLTPMetrics" 19 | -------------------------------------------------------------------------------- /tests/experiments/test_pred.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from .templates import * 3 | 4 | 5 | class PredictionMetricsTest(MetricsTest, unittest.TestCase): 6 | @property 7 | def name(self): 8 | return "PredictionMetrics" 9 | 10 | def check_data(self, data): 11 | pred = data["pred"] 12 | self.assertTrue(np.all(pred <= 1)) 13 | self.assertTrue(np.all(pred >= 0)) 14 | np.testing.assert_array_almost_equal(pred.sum(-1), np.ones(pred.shape[0])) 15 | -------------------------------------------------------------------------------- /tests/experiments/test_stationary.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from .templates import * 3 | from dynalearn.config.util import StationaryConfig 4 | 5 | 6 | class TrueERSSMetricsTest(MetricsTest, unittest.TestCase): 7 | @property 8 | def name(self): 9 | return "TrueERSSMetrics" 10 | 11 | 12 | class GNNERSSMetricsTest(MetricsTest, unittest.TestCase): 13 | @property 14 | def name(self): 15 | return "GNNERSSMetrics" 16 | -------------------------------------------------------------------------------- /tests/experiments/test_statistics.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from .templates import * 3 | 4 | 5 | class TestStatisticsMetrics(MetricsTest, unittest.TestCase): 6 | @property 7 | def name(self): 8 | return "StatisticsMetrics" 9 | -------------------------------------------------------------------------------- /tests/networks/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.networks.test_ba 2 | import tests.networks.test_gnp 3 | import tests.networks.test_multiplex 4 | import tests.networks.test_network 5 | -------------------------------------------------------------------------------- /tests/networks/all.py: -------------------------------------------------------------------------------- 1 | from .test_ba import * 2 | from .test_gnp import * 3 | from .test_multiplex import * 4 | from .test_network import * 5 | -------------------------------------------------------------------------------- /tests/networks/test_ba.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import networkx as nx 3 | import numpy as np 4 | 5 | from dynalearn.networks import BANetworkGenerator 6 | from dynalearn.config import NetworkConfig 7 | 8 | 9 | class BANetworkTest(unittest.TestCase): 10 | def setUp(self): 11 | self.n = 100 12 | self.m = 2 13 | config = NetworkConfig.ba(self.n, self.m) 14 | self.network = BANetworkGenerator(config) 15 | 16 | def test_generate(self): 17 | self.network.generate() 18 | 19 | 20 | if __name__ == "__main__": 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/networks/test_gnp.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import networkx as nx 3 | import numpy as np 4 | 5 | from dynalearn.networks import GNPNetworkGenerator 6 | from dynalearn.config import NetworkConfig 7 | 8 | 9 | class ERNetworkTest(unittest.TestCase): 10 | def setUp(self): 11 | self.p = 0.5 12 | self.n = 100 13 | config = NetworkConfig.gnp(self.n, self.p) 14 | self.network = GNPNetworkGenerator(config) 15 | 16 | def test_generate(self): 17 | self.network.generate() 18 | 19 | 20 | if __name__ == "__main__": 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/networks/test_multiplex.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import networkx as nx 3 | import numpy as np 4 | 5 | from dynalearn.networks import MultiplexNetwork 6 | 7 | 8 | class MultiplexNetworkTest(unittest.TestCase): 9 | def setUp(self): 10 | self.n = 100 11 | self.p = 0.5 12 | self.layers = ["a", "b", "c"] 13 | self.labels = ["a", "b", "c"] 14 | self.network = MultiplexNetwork() 15 | 16 | def _generate_network_(self, layer): 17 | g = nx.gnp_random_graph(self.n, self.p) 18 | g = nx.to_directed(g) 19 | n = g.number_of_nodes() 20 | m = g.number_of_edges() 21 | if self.node_attr is None: 22 | self.node_attr = {k: np.random.randn(n) for k in self.labels} 23 | self.edge_attr[layer] = {k: np.random.randn(m) for k in self.labels} 24 | 25 | for n in g.nodes(): 26 | for k in self.labels: 27 | g.nodes[n][k] = self.node_attr[k][n] 28 | 29 | for i, (u, v) in enumerate(g.edges()): 30 | for k in self.labels: 31 | g.edges[u, v][k] = self.edge_attr[layer][k][i] 32 | 33 | return g 34 | 35 | def _generate_multiplex_(self): 36 | self.node_attr = None 37 | self.edge_attr = {} 38 | return {l: self._generate_network_(l) for l in self.layers} 39 | 40 | def test_set_data(self): 41 | self.network.data = self._generate_multiplex_() 42 | 43 | def test_get_node_data(self): 44 | self.network.data = self._generate_multiplex_() 45 | node_data = self.network.get_node_data() 46 | ref_node_data = np.concatenate( 47 | [self.node_attr[k].reshape(-1, 1) for k in self.labels], axis=-1 48 | ) 49 | np.testing.assert_array_equal(ref_node_data, node_data) 50 | 51 | def test_get_edge_data(self): 52 | self.network.data = self._generate_multiplex_() 53 | edge_data = self.network.get_edge_data() 54 | for l in self.layers: 55 | ref_edge_data = np.concatenate( 56 | [self.network.edge_attr[l][k].reshape(-1, 1) for k in self.labels], 57 | axis=-1, 58 | ) 59 | np.testing.assert_array_equal(ref_edge_data, edge_data[l]) 60 | 61 | 62 | if __name__ == "__main__": 63 | unittest.main() 64 | -------------------------------------------------------------------------------- /tests/networks/test_network.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import networkx as nx 3 | import numpy as np 4 | 5 | from dynalearn.networks import Network 6 | 7 | 8 | class NetworkTest(unittest.TestCase): 9 | def setUp(self): 10 | self.n = 100 11 | self.p = 0.5 12 | self.labels = ["a", "b", "c"] 13 | self.network = Network() 14 | 15 | def _generate_network_(self): 16 | g = nx.gnp_random_graph(self.n, self.p) 17 | g = nx.to_directed(g) 18 | n = g.number_of_nodes() 19 | m = g.number_of_edges() 20 | self.node_attr = {k: np.random.randn(n) for k in self.labels} 21 | self.edge_attr = {k: np.random.randn(m) for k in self.labels} 22 | 23 | for n in g.nodes(): 24 | for k in self.labels: 25 | g.nodes[n][k] = self.node_attr[k][n] 26 | 27 | for i, (u, v) in enumerate(g.edges()): 28 | for k in self.labels: 29 | g.edges[u, v][k] = self.edge_attr[k][i] 30 | 31 | return g 32 | 33 | def test_set_data(self): 34 | self.network.data = self._generate_network_() 35 | 36 | def test_get_node_data(self): 37 | self.network.data = self._generate_network_() 38 | node_data = self.network.get_node_data() 39 | ref_node_data = np.concatenate( 40 | [self.node_attr[k].reshape(-1, 1) for k in self.labels], axis=-1 41 | ) 42 | np.testing.assert_array_equal(ref_node_data, node_data) 43 | 44 | def test_get_edge_data(self): 45 | self.network.data = self._generate_network_() 46 | edge_data = self.network.get_edge_data() 47 | ref_edge_data = np.concatenate( 48 | [self.network.edge_attr[k].reshape(-1, 1) for k in self.labels], axis=-1 49 | ) 50 | np.testing.assert_array_equal(ref_edge_data, edge_data) 51 | 52 | 53 | if __name__ == "__main__": 54 | unittest.main() 55 | -------------------------------------------------------------------------------- /tests/nn/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.nn.test_gat 2 | import tests.nn.test_gnn 3 | import tests.nn.test_history 4 | import tests.nn.test_normalizer 5 | import tests.nn.test_propagator 6 | -------------------------------------------------------------------------------- /tests/nn/all.py: -------------------------------------------------------------------------------- 1 | from .test_gat import * 2 | from .test_gnn import * 3 | from .test_history import * 4 | from .test_normalizer import * 5 | from .test_propagator import * 6 | -------------------------------------------------------------------------------- /tests/nn/test_gat.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | import unittest 5 | 6 | 7 | from dynalearn.nn.models import DynamicsGATConv 8 | from dynalearn.util import to_edge_index 9 | 10 | 11 | class DynamicsGATConvTest(unittest.TestCase): 12 | def setUp(self): 13 | self.num_nodes = 10 14 | self.p = 0.5 15 | self.in_channels = 8 16 | self.out_channels = 4 17 | self.heads = 7 18 | self.concat = True 19 | self.bias = True 20 | self.attn_bias = True 21 | self.edge_in_channels = 0 22 | self.edge_out_channels = 4 23 | self.self_attention = True 24 | self.gat = DynamicsGATConv( 25 | in_channels=self.in_channels, 26 | out_channels=self.out_channels, 27 | heads=self.heads, 28 | concat=self.concat, 29 | bias=self.bias, 30 | edge_in_channels=self.edge_in_channels, 31 | edge_out_channels=self.edge_out_channels, 32 | self_attention=self.self_attention, 33 | ) 34 | 35 | def test_forward(self): 36 | x = torch.ones((self.num_nodes, self.in_channels)) 37 | g = nx.gnp_random_graph(self.num_nodes, self.p).to_directed() 38 | num_edges = g.number_of_edges() 39 | edge_index = torch.LongTensor(to_edge_index(g)) 40 | if self.edge_in_channels > 0: 41 | edge_attr = torch.rand(edge_index.shape[-1], self.edge_in_channels) 42 | else: 43 | edge_attr = None 44 | out = self.gat.forward(x, edge_index) 45 | if self.concat: 46 | c1 = self.heads * self.out_channels 47 | c2 = self.heads * self.edge_out_channels 48 | else: 49 | c1 = self.out_channels 50 | c2 = self.edge_out_channels 51 | if isinstance(out, tuple): 52 | x, edge_attr = out 53 | self.assertEqual(x.shape, torch.Size([self.num_nodes, c1])) 54 | self.assertEqual(edge_attr.shape, torch.Size([num_edges, c2])) 55 | else: 56 | self.assertEqual(out.shape, torch.Size([self.num_nodes, c1])) 57 | 58 | 59 | if __name__ == "__main__": 60 | unittest.main() 61 | -------------------------------------------------------------------------------- /tests/nn/test_gnn.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | import unittest 5 | 6 | from dynalearn.nn.models import GraphNeuralNetwork 7 | from dynalearn.config import TrainableConfig, NetworkConfig 8 | from dynalearn.networks.getter import get as get_network 9 | 10 | 11 | class GraphNeuralNetworkTest(unittest.TestCase): 12 | def setUp(self): 13 | self.in_size = 3 14 | self.out_size = 3 15 | self.lag = 2 16 | self.nodeattr_size = 1 17 | self.edgeattr_size = 1 18 | self.num_nodes = 10 19 | self.model = GraphNeuralNetwork( 20 | self.in_size, 21 | self.out_size, 22 | lag=self.lag, 23 | nodeattr_size=self.nodeattr_size, 24 | edgeattr_size=self.edgeattr_size, 25 | out_act="softmax", 26 | normalize=True, 27 | config=TrainableConfig.sis(), 28 | ) 29 | self.network = get_network(NetworkConfig.w_ba(self.num_nodes, 2)) 30 | 31 | def test_forward(self): 32 | x = torch.randn(self.num_nodes, self.in_size, self.lag) 33 | g = self.network.generate() 34 | g.node_attr = {"na": np.random.randn(self.num_nodes, 1)} 35 | g.edge_attr = {"ea": np.random.randn(2 * g.number_of_edges(), 1)} 36 | x = self.model.transformers["t_inputs"].forward(x) 37 | _g = self.model.transformers["t_networks"].forward(g) 38 | y = self.model.forward(x, _g).cpu().detach().numpy() 39 | self.assertTrue(y.shape == (self.num_nodes, self.out_size)) 40 | np.testing.assert_array_almost_equal(y.sum(-1), np.ones((y.shape[0]))) 41 | 42 | 43 | if __name__ == "__main__": 44 | unittest.main() 45 | -------------------------------------------------------------------------------- /tests/nn/test_history.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | import unittest 5 | 6 | from dynalearn.nn import History 7 | 8 | 9 | class HistoryTest(unittest.TestCase): 10 | def setUp(self): 11 | self.history = History() 12 | return 13 | 14 | def test_update_log(self): 15 | logs = {"time": 0, "loss": 1, "metrics": 2} 16 | self.history.update_epoch(logs) 17 | self.assertEqual({0: logs}, self.history._epoch_logs) 18 | self.history.update_epoch(logs) 19 | self.assertEqual({0: logs, 1: logs}, self.history._epoch_logs) 20 | 21 | self.history.update_batch(logs) 22 | self.assertEqual({0: logs}, self.history._batch_logs) 23 | self.history.update_batch(logs) 24 | self.assertEqual({0: logs, 1: logs}, self.history._batch_logs) 25 | self.history.reset() 26 | 27 | 28 | if __name__ == "__main__": 29 | unittest.main() 30 | -------------------------------------------------------------------------------- /tests/nn/test_propagator.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | import numpy as np 3 | import torch 4 | import unittest 5 | 6 | from dynalearn.nn.models import Propagator 7 | from dynalearn.util import to_edge_index 8 | 9 | 10 | class PropagatorTest(unittest.TestCase): 11 | def setUp(self): 12 | self.num_nodes = 5 13 | self.num_states = 4 14 | self.g = nx.gnp_random_graph(self.num_nodes, 0.5) 15 | self.edge_index = to_edge_index(self.g.to_directed()) 16 | self.propagator = Propagator(self.num_states) 17 | 18 | def test_forward(self): 19 | x = np.random.randint(self.num_states, size=self.num_nodes) 20 | adj = nx.to_numpy_array(self.g) 21 | ref_l = np.array([adj @ (x == i) for i in range(self.num_states)]) 22 | l = self.propagator.forward(x, self.edge_index) 23 | l = l.cpu().numpy() 24 | torch.testing.assert_allclose(l, ref_l) 25 | 26 | 27 | if __name__ == "__main__": 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /tests/util/__init__.py: -------------------------------------------------------------------------------- 1 | import tests.util.test_logger 2 | import tests.util.test_timelogger 3 | import tests.util.test_util 4 | import tests.util.test_verbose 5 | -------------------------------------------------------------------------------- /tests/util/all.py: -------------------------------------------------------------------------------- 1 | from .test_logger import * 2 | from .test_timelogger import * 3 | from .test_util import * 4 | from .test_verbose import * 5 | -------------------------------------------------------------------------------- /tests/util/test_logger.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import unittest 4 | import json 5 | 6 | from dynalearn.util import Logger, LoggerDict 7 | 8 | 9 | class LoggerTest(unittest.TestCase): 10 | def setUp(self): 11 | self.logger = Logger() 12 | self.logger.log = {"something": 1, "something else": 2} 13 | self.log = {"new things": 2, "further new things": 5} 14 | self.filename = "./logger.json" 15 | 16 | def tearDown(self): 17 | if os.path.exists(self.filename): 18 | os.remove(self.filename) 19 | 20 | def test_on_task_begin(self): 21 | pass 22 | 23 | def test_on_task_end(self): 24 | pass 25 | 26 | def test_on_task_update(self): 27 | pass 28 | 29 | def test_save(self): 30 | with open(self.filename, "w") as f: 31 | self.logger.save(f) 32 | with open(self.filename, "r") as f: 33 | log = json.load(f) 34 | self.assertEqual(log, self.logger.log) 35 | 36 | def test_load(self): 37 | with open(self.filename, "w") as f: 38 | json.dump(self.log, f, indent=4) 39 | with open(self.filename, "r") as f: 40 | self.logger.load(f) 41 | self.assertEqual(self.logger.log, self.log) 42 | -------------------------------------------------------------------------------- /tests/util/test_timelogger.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import time 3 | 4 | from datetime import datetime 5 | from dynalearn.util import TimeLogger 6 | 7 | 8 | class TimeLoggerTest(unittest.TestCase): 9 | def setUp(self): 10 | self.logger = TimeLogger() 11 | self.num_updates = 10 12 | 13 | def test_on_task_begin(self): 14 | self.logger.on_task_begin() 15 | self.assertTrue(self.logger.begin is not None) 16 | self.assertTrue(isinstance(self.logger.begin, datetime)) 17 | self.assertTrue(self.logger.end is None) 18 | for k in ["begin"]: 19 | self.assertTrue(k in self.logger.log) 20 | 21 | def test_on_task_end(self): 22 | self.logger.on_task_begin() 23 | self.logger.on_task_end() 24 | self.assertTrue(self.logger.begin is not None) 25 | self.assertTrue(isinstance(self.logger.begin, datetime)) 26 | self.assertTrue(self.logger.end is not None) 27 | self.assertTrue(isinstance(self.logger.end, datetime)) 28 | for k in ["begin", "end", "time", "total"]: 29 | self.assertTrue(k in self.logger.log) 30 | 31 | def test_on_task_update(self): 32 | self.logger.on_task_begin() 33 | self.assertTrue(self.logger.begin is not None) 34 | self.assertTrue(isinstance(self.logger.begin, datetime)) 35 | for i in range(self.num_updates): 36 | self.logger.on_task_update() 37 | self.assertTrue(self.logger.update is not None) 38 | self.logger.on_task_end() 39 | self.assertTrue(self.logger.end is not None) 40 | self.assertTrue(isinstance(self.logger.end, datetime)) 41 | for k in ["begin", "end", "time", "total", "time-update"]: 42 | self.assertTrue(k in self.logger.log) 43 | 44 | def test_format_diff(self): 45 | t0 = datetime.now() 46 | time.sleep(1) 47 | t1 = datetime.now() 48 | self.assertEqual(self.logger.format_diff(t0, t1, to_sec=True), 1) 49 | d = self.logger.format_diff(t0, t1, to_sec=False) 50 | self.assertEqual(len(d), 4) 51 | self.assertEqual(d[0], 0) 52 | self.assertEqual(d[1], 0) 53 | self.assertEqual(d[2], 0) 54 | self.assertEqual(d[3], 1) 55 | -------------------------------------------------------------------------------- /tests/util/test_util.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | import torch 4 | from dynalearn.util import * 5 | 6 | 7 | class UtilitiesTest(unittest.TestCase): 8 | def test_from_binary(self): 9 | self.assertEqual(from_binary([1, 0, 1]), 5) 10 | 11 | def test_to_binary(self): 12 | self.assertTrue(np.all(np.array([1, 0, 1]) == to_binary(5))) 13 | 14 | def test_logbase(self): 15 | self.assertEqual(logbase(4, 2), 2) 16 | self.assertEqual(logbase(9, 3), 2) 17 | self.assertEqual(logbase(27, 3), 3) 18 | 19 | def test_from_nary(self): 20 | self.assertEqual(from_nary([1, 0, 2], base=3), 11) 21 | self.assertEqual(from_nary([1, 0, 1, 2], base=4), 70) 22 | 23 | def test_to_nary(self): 24 | self.assertTrue(np.all(to_nary(11, base=3) == np.array([[1, 0, 2]]).T)) 25 | self.assertTrue(np.all(to_nary(70, base=4) == np.array([[1, 0, 1, 2]]).T)) 26 | 27 | def test_onehot_numpy(self): 28 | x = np.array([1, 0, 2]) 29 | x_onehot = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) 30 | self.assertTrue(np.all(x_onehot == onehot_numpy(x))) 31 | 32 | def test_onehot_torch(self): 33 | x = torch.Tensor([1, 0, 2]) 34 | x_onehot = torch.Tensor([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) 35 | self.assertTrue(torch.all(x_onehot == onehot_torch(x).cpu())) 36 | 37 | def test_get_dataset_from_timeseries(self): 38 | T = 10 39 | N = 5 40 | lag = 3 41 | ts = np.arange(T).reshape(-1, 1, 1).repeat(N, 1) 42 | x, y = get_dataset_from_timeseries(ts, lag=lag) 43 | x_ref = np.arange(lag).reshape(-1, 1, 1).repeat(N, 1).T 44 | self.assertTrue(np.all(x[0] == x_ref)) 45 | self.assertTrue(np.all(y[0] == lag)) 46 | -------------------------------------------------------------------------------- /tests/util/test_verbose.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from dynalearn.util import Verbose 5 | 6 | 7 | class VerboseClass(unittest.TestCase): 8 | def setUp(self): 9 | self.filename = "test_verbose.txt" 10 | self.vtypes = [0, 1, 2] 11 | 12 | def tearDown(self): 13 | os.remove(self.filename) 14 | 15 | def test_init(self): 16 | for vtype in self.vtypes: 17 | verbose = Verbose(self.filename, vtype=vtype) 18 | --------------------------------------------------------------------------------