├── .circleci └── config.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── pull_request_template.md ├── .gitignore ├── .gitlab-ci.yml ├── .readthedocs.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── ampligraph ├── __init__.py ├── compat │ ├── __init__.py │ ├── evaluate.py │ └── models.py ├── datasets │ ├── __init__.py │ ├── data_adapter.py │ ├── data_indexer.py │ ├── datasets.py │ ├── graph_data_loader.py │ ├── graph_partitioner.py │ ├── partitioned_data_manager.py │ ├── partitioning_reporter.py │ ├── source_identifier.py │ └── sqlite_adapter.py ├── discovery │ ├── __init__.py │ └── discovery.py ├── evaluation │ ├── __init__.py │ ├── metrics.py │ └── protocol.py ├── latent_features │ ├── __init__.py │ ├── layers │ │ ├── __init__.py │ │ ├── calibration │ │ │ ├── __init__.py │ │ │ └── calibrate.py │ │ ├── corruption_generation │ │ │ ├── CorruptionGenerationLayerTrain.py │ │ │ └── __init__.py │ │ ├── encoding │ │ │ ├── EmbeddingLookupLayer.py │ │ │ └── __init__.py │ │ └── scoring │ │ │ ├── AbstractScoringLayer.py │ │ │ ├── ComplEx.py │ │ │ ├── DistMult.py │ │ │ ├── HolE.py │ │ │ ├── Random.py │ │ │ ├── RotatE.py │ │ │ ├── TransE.py │ │ │ └── __init__.py │ ├── loss_functions.py │ ├── models │ │ ├── ScoringBasedEmbeddingModel.py │ │ └── __init__.py │ ├── optimizers.py │ └── regularizers.py ├── logger.conf ├── pretrained_models │ ├── __init__.py │ └── pretrained_utils.py └── utils │ ├── __init__.py │ ├── file_utils.py │ ├── model_utils.py │ ├── profiling.py │ └── tags.py ├── docs ├── Makefile ├── _static │ ├── _sphinx_javascript_frameworks_compat.js │ ├── ampligraph_logo_transparent_white.png │ ├── basic.css │ ├── css │ │ ├── badge_only.css │ │ ├── fonts │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ ├── fontawesome-webfont.woff2 │ │ │ ├── lato-bold-italic.woff │ │ │ ├── lato-bold-italic.woff2 │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-normal-italic.woff │ │ │ ├── lato-normal-italic.woff2 │ │ │ ├── lato-normal.woff │ │ │ └── lato-normal.woff2 │ │ └── theme.css │ ├── doctools.js │ ├── documentation_options.js │ ├── file.png │ ├── jquery-3.6.0.js │ ├── jquery.js │ ├── js │ │ ├── badge_only.js │ │ ├── html5shiv-printshiv.min.js │ │ ├── html5shiv.min.js │ │ └── theme.js │ ├── language_data.js │ ├── minus.png │ ├── plus.png │ ├── pygments.css │ ├── searchtools.js │ ├── underscore-1.13.1.js │ └── underscore.js ├── _templates │ └── autosummary │ │ ├── base.rst │ │ ├── class.rst │ │ ├── function.rst │ │ └── module.rst ├── ampligraph.advanced_topics.rst ├── ampligraph.anatomy_model.rst ├── ampligraph.datasets.rst ├── ampligraph.discovery.rst ├── ampligraph.evaluation.rst ├── ampligraph.latent_features.rst ├── ampligraph.pretrained_models.rst ├── ampligraph.utils.rst ├── api.rst ├── background.rst ├── biblio.rst ├── changelog.md ├── conf.py ├── contacts.md ├── dev.md ├── examples.md ├── experiments.rst ├── generated │ ├── ampligraph.compat.ComplEx.rst │ ├── ampligraph.compat.DistMult.rst │ ├── ampligraph.compat.HolE.rst │ ├── ampligraph.compat.TransE.rst │ ├── ampligraph.compat.evaluate_performance.rst │ ├── ampligraph.datasets.BucketGraphPartitioner.rst │ ├── ampligraph.datasets.GraphDataLoader.rst │ ├── ampligraph.datasets.datasets.load_cn15k.rst │ ├── ampligraph.datasets.datasets.load_codex.rst │ ├── ampligraph.datasets.datasets.load_fb13.rst │ ├── ampligraph.datasets.datasets.load_fb15k.rst │ ├── ampligraph.datasets.datasets.load_fb15k_237.rst │ ├── ampligraph.datasets.datasets.load_from_csv.rst │ ├── ampligraph.datasets.datasets.load_from_ntriples.rst │ ├── ampligraph.datasets.datasets.load_from_rdf.rst │ ├── ampligraph.datasets.datasets.load_nl27k.rst │ ├── ampligraph.datasets.datasets.load_onet20k.rst │ ├── ampligraph.datasets.datasets.load_ppi5k.rst │ ├── ampligraph.datasets.datasets.load_wn11.rst │ ├── ampligraph.datasets.datasets.load_wn18.rst │ ├── ampligraph.datasets.datasets.load_wn18rr.rst │ ├── ampligraph.datasets.datasets.load_yago3_10.rst │ ├── ampligraph.datasets.load_cn15k.rst │ ├── ampligraph.datasets.load_codex.rst │ ├── ampligraph.datasets.load_fb13.rst │ ├── ampligraph.datasets.load_fb15k.rst │ ├── ampligraph.datasets.load_fb15k_237.rst │ ├── ampligraph.datasets.load_from_csv.rst │ ├── ampligraph.datasets.load_from_ntriples.rst │ ├── ampligraph.datasets.load_from_rdf.rst │ ├── ampligraph.datasets.load_nl27k.rst │ ├── ampligraph.datasets.load_onet20k.rst │ ├── ampligraph.datasets.load_ppi5k.rst │ ├── ampligraph.datasets.load_wn11.rst │ ├── ampligraph.datasets.load_wn18.rst │ ├── ampligraph.datasets.load_wn18rr.rst │ ├── ampligraph.datasets.load_yago3_10.rst │ ├── ampligraph.discovery.discover_facts.rst │ ├── ampligraph.discovery.find_clusters.rst │ ├── ampligraph.discovery.find_duplicates.rst │ ├── ampligraph.discovery.find_nearest_neighbours.rst │ ├── ampligraph.discovery.query_topn.rst │ ├── ampligraph.evaluation.filter_unseen_entities.rst │ ├── ampligraph.evaluation.hits_at_n_score.rst │ ├── ampligraph.evaluation.mr_score.rst │ ├── ampligraph.evaluation.mrr_score.rst │ ├── ampligraph.evaluation.rank_score.rst │ ├── ampligraph.evaluation.select_best_model_ranking.rst │ ├── ampligraph.evaluation.train_test_split_no_unseen.rst │ ├── ampligraph.latent_features.AbsoluteMarginLoss.rst │ ├── ampligraph.latent_features.AdagradOptimizer.rst │ ├── ampligraph.latent_features.AdamOptimizer.rst │ ├── ampligraph.latent_features.Constant.rst │ ├── ampligraph.latent_features.LP_regularizer.rst │ ├── ampligraph.latent_features.MomentumOptimizer.rst │ ├── ampligraph.latent_features.NLLLoss.rst │ ├── ampligraph.latent_features.NLLMulticlass.rst │ ├── ampligraph.latent_features.PairwiseLoss.rst │ ├── ampligraph.latent_features.SGDOptimizer.rst │ ├── ampligraph.latent_features.ScoringBasedEmbeddingModel.rst │ ├── ampligraph.latent_features.SelfAdversarialLoss.rst │ ├── ampligraph.latent_features.layers.calibration.CalibrationLayer.rst │ ├── ampligraph.latent_features.layers.corruption_generation.CorruptionGenerationLayerTrain.rst │ ├── ampligraph.latent_features.layers.encoding.EmbeddingLookupLayer.rst │ ├── ampligraph.latent_features.layers.scoring.AbstractScoringLayer.AbstractScoringLayer.rst │ ├── ampligraph.latent_features.layers.scoring.ComplEx.rst │ ├── ampligraph.latent_features.layers.scoring.DistMult.rst │ ├── ampligraph.latent_features.layers.scoring.HolE.rst │ ├── ampligraph.latent_features.layers.scoring.TransE.rst │ ├── ampligraph.latent_features.loss_functions.Loss.rst │ ├── ampligraph.utils.create_tensorboard_visualizations.rst │ ├── ampligraph.utils.dataframe_to_triples.rst │ ├── ampligraph.utils.preprocess_focusE_weights.rst │ ├── ampligraph.utils.restore_model.rst │ └── ampligraph.utils.save_model.rst ├── img │ ├── GitHub-Mark-32px.png │ ├── ampligraph_logo.png │ ├── ampligraph_logo_200px.png │ ├── ampligraph_logo_transparent.png │ ├── ampligraph_logo_transparent_200px.png │ ├── ampligraph_logo_transparent_300.png │ ├── ampligraph_logo_transparent_white.png │ ├── clustering │ │ ├── cluster_continents.png │ │ ├── cluster_embeddings.png │ │ └── clustered_embeddings_docstring.png │ ├── embeddings_projector.png │ ├── kg_eg.png │ ├── kg_lp.png │ ├── kg_lp_step1.png │ ├── kg_lp_step2.png │ └── slack_logo.png ├── index.rst ├── install.md ├── references.bib ├── requirements_readthedocs.txt ├── tutorials.md └── tutorials │ ├── AmpliGraphBasicsTutorial.ipynb │ ├── AmpliGraphBasicsTutorial.md │ ├── ClusteringAndClassificationWithEmbeddings.ipynb │ ├── ClusteringAndClassificationWithEmbeddings.md │ └── img │ ├── FootballGraph.png │ ├── GoT_tensoboard.png │ ├── got-graphql-schema.jpg │ ├── output_53_0.png │ └── output_55_0.png ├── experiments ├── config.json └── predictive_performance.py ├── notebooks └── AmpliGraph-Tutorials │ ├── Advanced 1 - Custom Loss Function.ipynb │ ├── Advanced 2 - Calibration.ipynb │ ├── Advanced 3 - Partitioned Training for large datasets - advanced approach.ipynb │ ├── Backward Compatibility.ipynb │ ├── Basics 1 - Load Benchmark Dataset, Train and evaluate (s, o, s&o, filters, entities_subset) and Visualization.ipynb │ ├── Basics 2 - Train a Random Model.ipynb │ ├── Basics 3 - Early Stopping and Regular Checkpoints.ipynb │ ├── Basics 4 - Save and Restore Models.ipynb │ ├── Basics 5 - Partitioned Training for large datasets- default approach.ipynb │ ├── Discovery 1 - Discover Facts.ipynb │ ├── Discovery 2 - Find Clusters.ipynb │ ├── Discovery 3 - Find Duplicates.ipynb │ └── Discovery 4 - Query TopN.ipynb ├── requirements.txt ├── setup.cfg ├── setup.py └── tests ├── __init__.py └── ampligraph ├── __init__.py ├── datasets ├── __init__.py ├── test_data_indexer.py ├── test_datasets.py ├── test_graph_data_loader.py ├── test_graph_partitioner.py ├── test_partitioning_reporter.py ├── test_source_identifier.py ├── test_sqlite_adapter.py └── test_triples.nt ├── discovery ├── __init__.py └── test_discovery.py ├── evaluation ├── __init__.py └── test_evaluate.py ├── latent_features ├── __init__.py ├── convkb.tmp ├── layers │ ├── calibrate │ │ └── test_calibrate.py │ ├── corruption_generation │ │ └── test_CorruptionGenerationLayerTrain.py │ ├── encoding │ │ └── test_EmbeddingLookupLayer.py │ ├── scoring │ │ ├── test_AbstractScoringLayer.py │ │ ├── test_ComplEx.py │ │ ├── test_DistMult.py │ │ ├── test_HolE.py │ │ ├── test_RotatE.py │ │ └── test_TransE.py │ └── test_predictions.py ├── test_initializers.py ├── test_loss_functions.py ├── test_optimizers.py └── test_regularizer.py └── utils ├── __init__.py └── test_profiling.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | .common-values: 2 | 3 | docker-image: &docker-image circleci/python:3.10 4 | 5 | restore-cache: &restore-cache 6 | keys: 7 | - v1-dependencies-{{ checksum "setup.py" }}-{{ checksum "requirements.txt" }} 8 | 9 | create-venv: &create-venv 10 | name: Create virtualenv 11 | command: /usr/local/bin/python3 -m venv venv 12 | 13 | save-cache: &save-cache 14 | paths: 15 | - ./venv 16 | key: v1-dependencies-{{ checksum "setup.py" }}-{{ checksum "requirements.txt" }} 17 | 18 | install-package: &install-package 19 | name: Install package 20 | command: | 21 | . venv/bin/activate 22 | venv/bin/python3 -m pip install tensorflow==2.10 23 | venv/bin/python3 -m pip install . 24 | 25 | version: 2 26 | jobs: 27 | build: 28 | docker: 29 | - image: *docker-image 30 | resource_class: medium 31 | steps: 32 | - checkout 33 | - restore_cache: *restore-cache 34 | - run: *create-venv 35 | - save_cache: *save-cache 36 | - run: *install-package 37 | 38 | pip-check: 39 | docker: 40 | - image: *docker-image 41 | steps: 42 | - checkout 43 | - restore_cache: *restore-cache 44 | - run: *create-venv 45 | - save_cache: *save-cache 46 | - run: *install-package 47 | - run: 48 | name: Pip check 49 | command: | 50 | . venv/bin/activate 51 | venv/bin/python3 -m pip check 52 | 53 | test: 54 | docker: 55 | - image: *docker-image 56 | resource_class: large 57 | steps: 58 | - checkout 59 | - restore_cache: *restore-cache 60 | - run: *create-venv 61 | - save_cache: *save-cache 62 | - run: *install-package 63 | - run: 64 | name: Unit tests with Pytest 65 | command: | 66 | . venv/bin/activate 67 | venv/bin/python3 setup.py test 68 | lint: 69 | docker: 70 | - image: *docker-image 71 | steps: 72 | - checkout 73 | - restore_cache: *restore-cache 74 | - run: *create-venv 75 | - save_cache: *save-cache 76 | - run: 77 | name: Linting with flake8 78 | command: | 79 | . venv/bin/activate 80 | venv/bin/python3 -m pip install flake8 81 | venv/bin/python3 -m flake8 ampligraph --max-line-length 200 --ignore=W605,W503,E231 82 | 83 | docs: 84 | docker: 85 | - image: *docker-image 86 | steps: 87 | - checkout 88 | - restore_cache: *restore-cache 89 | - run: *create-venv 90 | - save_cache: *save-cache 91 | - run: *install-package 92 | - run: 93 | name: Making docs with Sphinx 94 | command: | 95 | . venv/bin/activate 96 | cd docs 97 | make clean autogen html 98 | 99 | 100 | workflows: 101 | version: 2 102 | checks: 103 | jobs: 104 | - build: 105 | filters: 106 | branches: 107 | only: 108 | - main 109 | - develop 110 | - pip-check: 111 | filters: 112 | branches: 113 | only: 114 | - main 115 | - develop 116 | - lint: 117 | filters: 118 | branches: 119 | only: 120 | - main 121 | - develop 122 | - docs: 123 | filters: 124 | branches: 125 | only: 126 | - main 127 | - develop 128 | - test: 129 | filters: 130 | branches: 131 | only: 132 | - main 133 | - develop 134 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Describe the problem following the template below 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Description 11 | 12 | ### Actual Behavior 13 | 14 | ### Expected Behavior 15 | 16 | ### Steps to Reproduce 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest a new feature 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Background and Context** 11 | 12 | **Description** 13 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | #### Related Issue(s) 6 | 9 | 10 | #### Description of Changes 11 | 12 | 13 | #### Any other comments? 14 | 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | .DS_Store 10 | ._.DS_Store 11 | docs/.DS_Store 12 | docs/._.DS_Store 13 | 14 | 15 | # Distribution / packaging 16 | .Python 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | .hypothesis/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | .static_storage/ 62 | .media/ 63 | local_settings.py 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # pyenv 82 | .python-version 83 | 84 | # celery beat schedule file 85 | celerybeat-schedule 86 | 87 | # SageMath parsed files 88 | *.sage.py 89 | 90 | # Environments 91 | .env 92 | .venv 93 | env/ 94 | venv/ 95 | ENV/ 96 | env.bak/ 97 | venv.bak/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | .spyproject 102 | 103 | # Rope project settings 104 | .ropeproject 105 | 106 | # mkdocs documentation 107 | /site 108 | 109 | # mypy 110 | .mypy_cache/ 111 | 112 | # pycharm 113 | .idea/ 114 | 115 | #vim 116 | .swp 117 | 118 | # experiments 119 | experiments/config_test.json 120 | experiments/result.csv 121 | experiments/playground.py 122 | experiments/fb237.txt 123 | 124 | .pytest_cache 125 | 126 | playground 127 | 128 | 129 | .vscode/settings.json -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | # To contribute improvements to CI/CD templates, please follow the Development guide at: 2 | # https://docs.gitlab.com/ee/development/cicd/templates.html 3 | # This specific template is located at: 4 | # https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Python.gitlab-ci.yml 5 | 6 | # Official language image. Look for the different tagged releases at: 7 | # https://hub.docker.com/r/library/python/tags/ 8 | image: python:3.10.6 9 | 10 | # Change pip's cache directory to be inside the project directory since we can 11 | # only cache local items. 12 | variables: 13 | PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" 14 | 15 | # Pip's cache doesn't store the python packages 16 | # https://pip.pypa.io/en/stable/topics/caching/ 17 | # 18 | # If you want to also cache the installed packages, you have to install 19 | # them in a virtualenv and cache it as well. 20 | cache: 21 | paths: 22 | - .cache/pip 23 | 24 | before_script: 25 | - python --version # For debugging 26 | - pip install virtualenv 27 | - virtualenv venv 28 | - source venv/bin/activate 29 | - pip install --upgrade pip 30 | - pip install tensorflow==2.10 31 | - pip install ./ 32 | 33 | codestyle: 34 | script: 35 | - pip install pylint 36 | - pylint --fail-under=3 -v ./ampligraph 37 | 38 | test: 39 | script: 40 | - python setup.py test 41 | 42 | run: 43 | script: 44 | - pip install wheel setuptools 45 | - pip wheel --wheel-dir dist --no-deps . 46 | artifacts: 47 | paths: 48 | - dist/*.whl 49 | 50 | pages: 51 | script: 52 | - cd docs 53 | - make clean autogen html 54 | - mkdir ../public/ 55 | - mv _build/html/ ../public/ 56 | artifacts: 57 | paths: 58 | - public 59 | 60 | deploy: 61 | stage: deploy 62 | script: echo "Define your deployment script!" 63 | environment: production 64 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # requirements_readthedocs.txt is only used by readthedocs and includes tensorflow, which is imported 5 | # in the root __init__.py since d7fbb98 to suppress tf 1.x deprecation warnings. 6 | # This causes readthedocs.io builds to fail. 7 | 8 | version: 2 9 | 10 | build: 11 | os: ubuntu-22.04 12 | tools: 13 | python: "3.8" 14 | 15 | sphinx: 16 | configuration: docs/conf.py 17 | 18 | formats: all 19 | 20 | python: 21 | install: 22 | - requirements: docs/requirements_readthedocs.txt 23 | - method: pip 24 | path: . 25 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | We comply with the principles of the [Python Software Foundation](https://www.python.org/psf/codeofconduct/). -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | Read the [how to contribute guidelines](http://docs.ampligraph.org/en/latest/dev.html). -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include ampligraph/logger.conf 2 | 3 | -------------------------------------------------------------------------------- /ampligraph/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | """AmpliGraph is a library for relational learning on knowledge graphs.""" 9 | import logging.config 10 | 11 | import pkg_resources 12 | import tensorflow as tf 13 | 14 | tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) 15 | 16 | __version__ = '2.1-dev' 17 | __all__ = ['datasets', 'latent_features', 'discovery', 'evaluation', 'utils', 'pretrained_models'] 18 | 19 | logging.config.fileConfig( 20 | pkg_resources.resource_filename(__name__, "logger.conf"), 21 | disable_existing_loggers=False, 22 | ) 23 | -------------------------------------------------------------------------------- /ampligraph/compat/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | """Provides backward compatibility to AmpliGraph 1 APIs.""" 9 | from .evaluate import evaluate_performance 10 | from .models import ComplEx, DistMult, HolE, TransE 11 | 12 | __all__ = ["evaluate_performance", "TransE", "ComplEx", "DistMult", "HolE"] 13 | -------------------------------------------------------------------------------- /ampligraph/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | """Support for loading and managing datasets.""" 9 | from .datasets import ( 10 | _load_xai_fb15k_237_experiment_log, 11 | load_cn15k, 12 | load_codex, 13 | load_fb13, 14 | load_fb15k, 15 | load_fb15k_237, 16 | load_from_csv, 17 | load_from_ntriples, 18 | load_from_rdf, 19 | load_nl27k, 20 | load_onet20k, 21 | load_ppi5k, 22 | load_wn11, 23 | load_wn18, 24 | load_wn18rr, 25 | load_yago3_10, 26 | ) 27 | from .graph_data_loader import DataIndexer, GraphDataLoader, NoBackend 28 | from .graph_partitioner import PARTITION_ALGO_REGISTRY, BucketGraphPartitioner 29 | from .source_identifier import ( 30 | DataSourceIdentifier, 31 | chunks, 32 | load_csv, 33 | load_gz, 34 | load_json, 35 | load_tar, 36 | ) 37 | from .sqlite_adapter import SQLiteAdapter 38 | 39 | __all__ = [ 40 | "load_from_csv", 41 | "load_from_rdf", 42 | "load_wn18", 43 | "load_fb15k", 44 | "load_fb15k_237", 45 | "load_from_ntriples", 46 | "load_yago3_10", 47 | "load_wn18rr", 48 | "load_wn11", 49 | "load_fb13", 50 | "load_onet20k", 51 | "load_ppi5k", 52 | "load_nl27k", 53 | "load_cn15k", 54 | "load_codex", 55 | "chunks", 56 | "load_json", 57 | "load_gz", 58 | "load_tar", 59 | "load_csv", 60 | "DataSourceIdentifier", 61 | "DataIndexer", 62 | "NoBackend", 63 | "_load_xai_fb15k_237_experiment_log", 64 | "SQLiteAdapter", 65 | "GraphDataLoader", 66 | "BucketGraphPartitioner", 67 | "PARTITION_ALGO_REGISTRY", 68 | ] 69 | -------------------------------------------------------------------------------- /ampligraph/discovery/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | r"""This module includes a number of functions to perform knowledge discovery 9 | in graph embeddings. 10 | 11 | Functions provided include ``discover_facts`` which will generate candidate 12 | statements using one of several defined strategies and return triples that 13 | perform well when evaluated against corruptions, ``find_clusters`` which 14 | will perform link-based cluster analysis on a knowledge graph, 15 | ``find_duplicates`` which will find duplicate entities 16 | in a graph based on their embeddings, and ``query_topn`` which when given 17 | two elements of a triple will return the top_n results of all possible 18 | completions ordered by predicted score. 19 | 20 | """ 21 | 22 | from .discovery import ( 23 | discover_facts, 24 | find_clusters, 25 | find_duplicates, 26 | query_topn, 27 | find_nearest_neighbours 28 | ) 29 | 30 | __all__ = ["discover_facts", "find_clusters", "find_duplicates", "query_topn", "find_nearest_neighbours"] 31 | -------------------------------------------------------------------------------- /ampligraph/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | """The module includes performance metrics for neural graph embeddings models, 9 | along with model selection routines, negatives generation, and an 10 | implementation of the learning-to-rank-based evaluation protocol 11 | used in literature.""" 12 | 13 | from .metrics import hits_at_n_score, mr_score, mrr_score, rank_score 14 | from .protocol import ( 15 | filter_unseen_entities, 16 | select_best_model_ranking, 17 | train_test_split_no_unseen, 18 | ) 19 | 20 | __all__ = [ 21 | "mrr_score", 22 | "mr_score", 23 | "hits_at_n_score", 24 | "rank_score", 25 | "select_best_model_ranking", 26 | "train_test_split_no_unseen", 27 | "filter_unseen_entities", 28 | ] 29 | -------------------------------------------------------------------------------- /ampligraph/latent_features/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | r"""This module includes neural graph embedding models and support functions. 9 | 10 | Knowledge graph embedding models are neural architectures that encode concepts 11 | from a knowledge graph (i.e., entities :math:`\mathcal{E}` and relation types 12 | :math:`\mathcal{R}`) into low-dimensional, continuous vectors :math:`\in 13 | \mathcal{R}^k`. Such *knowledge graph embeddings* have applications in 14 | knowledge graph completion, entity resolution, and link-based clustering, 15 | just to cite a few :cite:`nickel2016review`. 16 | 17 | """ 18 | from .loss_functions import ( 19 | AbsoluteMarginLoss, 20 | NLLLoss, 21 | NLLMulticlass, 22 | PairwiseLoss, 23 | SelfAdversarialLoss, 24 | ) 25 | from .models import ScoringBasedEmbeddingModel 26 | from .regularizers import LP_regularizer 27 | 28 | __all__ = [ 29 | "layers", 30 | "models", 31 | "ScoringBasedEmbeddingModel", 32 | "PairwiseLoss", 33 | "NLLLoss", 34 | "AbsoluteMarginLoss", 35 | "SelfAdversarialLoss", 36 | "NLLMulticlass", 37 | "LP_regularizer", 38 | ] 39 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | __all__ = ["scoring", "encoding", "corruption_generation", "calibration"] 9 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/calibration/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from .calibrate import CalibrationLayer 9 | 10 | __all__ = ["CalibrationLayer"] 11 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/calibration/calibrate.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import tensorflow as tf 9 | 10 | 11 | class CalibrationLayer(tf.keras.layers.Layer): 12 | """Layer to calibrate the model outputs. 13 | 14 | The class implements the heuristics described in :cite:`calibration`, 15 | using Platt scaling :cite:`platt1999probabilistic`. 16 | 17 | See the docs of :meth:`~ampligraph.latent_features.models.ScoringBasedEmbeddingModel.calibrate()` for more details. 18 | """ 19 | 20 | def get_config(self): 21 | config = super(CalibrationLayer, self).get_config() 22 | config.update( 23 | { 24 | "pos_size": self.pos_size, 25 | "neg_size": self.neg_size, 26 | "positive_base_rate": self.positive_base_rate, 27 | } 28 | ) 29 | return config 30 | 31 | def __init__( 32 | self, pos_size=0, neg_size=0, positive_base_rate=None, **kwargs 33 | ): 34 | self.pos_size = pos_size 35 | self.neg_size = pos_size if neg_size == 0 else neg_size 36 | 37 | if positive_base_rate is not None: 38 | if positive_base_rate <= 0 or positive_base_rate >= 1: 39 | raise ValueError( 40 | "Positive_base_rate must be a value between 0 and 1." 41 | ) 42 | else: 43 | assert pos_size > 0 and neg_size > 0, "Positive size must be > 0." 44 | 45 | positive_base_rate = pos_size / (pos_size + neg_size) 46 | 47 | self.positive_base_rate = positive_base_rate 48 | self.w_init = tf.constant_initializer(kwargs.pop("calib_w", 0.0)) 49 | self.b_init = tf.constant_initializer( 50 | kwargs.pop( 51 | "calib_b", 52 | float(tf.math.log((self.neg_size + 1.0) / (self.pos_size + 1.0))) 53 | ) 54 | ) 55 | super(CalibrationLayer, self).__init__(**kwargs) 56 | 57 | def build(self, input_shape): 58 | """ 59 | Build method. 60 | """ 61 | self.calib_w = self.add_weight( 62 | "calib_w", 63 | shape=(), 64 | initializer=self.w_init, 65 | dtype=tf.float32, 66 | trainable=True, 67 | ) 68 | 69 | self.calib_b = self.add_weight( 70 | "calib_b", 71 | shape=(), 72 | initializer=self.b_init, 73 | dtype=tf.float32, 74 | trainable=True, 75 | ) 76 | self.built = True 77 | 78 | def call( 79 | self, scores_pos, scores_neg=[], training=0 80 | ): 81 | """ 82 | Call method. 83 | """ 84 | if training: 85 | scores_all = tf.concat([scores_pos, scores_neg], axis=0) 86 | else: 87 | scores_all = scores_pos 88 | 89 | logits = -(self.calib_w * scores_all + self.calib_b) 90 | 91 | if training: 92 | labels = tf.concat( 93 | [ 94 | tf.cast( 95 | tf.fill( 96 | scores_pos.shape, 97 | (self.pos_size + 1.0) / (self.pos_size + 2.0), 98 | ), 99 | tf.float32, 100 | ), 101 | tf.cast( 102 | tf.fill(scores_neg.shape, 1 / (self.neg_size + 2.0)), 103 | tf.float32, 104 | ), 105 | ], 106 | axis=0, 107 | ) 108 | weigths_pos = scores_neg.shape[0] / scores_pos.shape[0] 109 | weights_neg = ( 110 | 1.0 - self.positive_base_rate 111 | ) / self.positive_base_rate 112 | weights = tf.concat( 113 | [ 114 | tf.cast( 115 | tf.fill(scores_pos.shape, weigths_pos), tf.float32 116 | ), 117 | tf.cast( 118 | tf.fill(scores_neg.shape, weights_neg), tf.float32 119 | ), 120 | ], 121 | axis=0, 122 | ) 123 | loss = tf.reduce_mean( 124 | weights 125 | * tf.nn.sigmoid_cross_entropy_with_logits(labels, logits) 126 | ) 127 | return loss 128 | else: 129 | return tf.math.sigmoid(logits) 130 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/corruption_generation/CorruptionGenerationLayerTrain.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import tensorflow as tf 9 | 10 | 11 | class CorruptionGenerationLayerTrain(tf.keras.layers.Layer): 12 | """Generates corruptions during training. 13 | 14 | The corruption might involve either subject or object using 15 | entities sampled uniformly at random from the loaded graph. 16 | """ 17 | 18 | def get_config(self): 19 | config = super(CorruptionGenerationLayerTrain, self).get_config() 20 | config.update({"seed": self.seed}) 21 | return config 22 | 23 | def __init__(self, seed=0, **kwargs): 24 | """ 25 | Initializes the corruption generation layer. 26 | 27 | Parameters 28 | ---------- 29 | eta: int 30 | Number of corruptions to generate. 31 | """ 32 | self.seed = seed 33 | super(CorruptionGenerationLayerTrain, self).__init__(**kwargs) 34 | 35 | def call(self, pos, ent_size, eta): 36 | """ 37 | Generates corruption for the positives supplied. 38 | 39 | Parameters 40 | ---------- 41 | pos: array-like, shape (n, 3) 42 | Batch of input triples (positives). 43 | ent_size: int 44 | Number of unique entities present in the partition. 45 | 46 | Returns 47 | ------- 48 | corruptions: array-like, shape (n * eta, 3) 49 | Corruptions of the triples. 50 | """ 51 | # size and reshape the dataset to sample corruptions 52 | dataset = tf.tile(pos, [eta, 1]) 53 | # generate a mask which will tell which subject needs to be corrupted 54 | # (random uniform sampling) 55 | keep_subj_mask = tf.cast( 56 | tf.random.uniform( 57 | [tf.shape(input=dataset)[0]], 58 | 0, 59 | 2, 60 | dtype=tf.int32, 61 | seed=self.seed, 62 | ), 63 | tf.bool, 64 | ) 65 | # If we are not corrupting the subject then corrupt the object 66 | keep_obj_mask = tf.logical_not(keep_subj_mask) 67 | 68 | # cast it to integer (0/1) 69 | keep_subj_mask = tf.cast(keep_subj_mask, tf.int32) 70 | keep_obj_mask = tf.cast(keep_obj_mask, tf.int32) 71 | # generate the n * eta replacements (uniformly randomly) 72 | replacements = tf.random.uniform( 73 | [tf.shape(dataset)[0]], 0, ent_size, dtype=tf.int32, seed=self.seed 74 | ) 75 | # keep subjects of dataset where keep_subject is 1 and zero it where keep_subject is 0 76 | # now add replacements where keep_subject is 0 (i.e. keep_object is 1) 77 | subjects = tf.math.add( 78 | tf.math.multiply(keep_subj_mask, dataset[:, 0]), 79 | tf.math.multiply(keep_obj_mask, replacements), 80 | ) 81 | # keep relations as it is 82 | relationships = dataset[:, 1] 83 | # keep objects of dataset where keep_object is 1 and zero it where keep_object is 0 84 | # now add replacements where keep_object is 0 (i.e. keep_subject is 1) 85 | objects = tf.math.add( 86 | tf.math.multiply(keep_obj_mask, dataset[:, 2]), 87 | tf.math.multiply(keep_subj_mask, replacements), 88 | ) 89 | # stack the generated subject, reln and object entities and create the 90 | # corruptions 91 | corruptions = tf.transpose( 92 | a=tf.stack([subjects, relationships, objects]) 93 | ) 94 | return corruptions 95 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/corruption_generation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from .CorruptionGenerationLayerTrain import CorruptionGenerationLayerTrain 9 | 10 | __all__ = ["CorruptionGenerationLayerTrain"] 11 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/encoding/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from .EmbeddingLookupLayer import EmbeddingLookupLayer 9 | 10 | __all__ = ["EmbeddingLookupLayer"] 11 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/scoring/DistMult.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import tensorflow as tf 9 | from .AbstractScoringLayer import register_layer, AbstractScoringLayer 10 | 11 | 12 | @register_layer("DistMult") 13 | class DistMult(AbstractScoringLayer): 14 | r"""DistMult scoring layer. 15 | 16 | The model as described in :cite:`yang2014embedding`. 17 | 18 | The bilinear diagonal DistMult model uses the trilinear dot product as scoring function: 19 | 20 | .. math:: 21 | f_{DistMult}=\langle \mathbf{r}_p, \mathbf{e}_s, \mathbf{e}_o \rangle 22 | 23 | where :math:`\mathbf{e}_{s}` is the embedding of the subject, :math:`\mathbf{r}_{p}` the embedding 24 | of the predicate and :math:`\mathbf{e}_{o}` the embedding of the object. 25 | """ 26 | 27 | def get_config(self): 28 | config = super(DistMult, self).get_config() 29 | return config 30 | 31 | def __init__(self, k): 32 | super(DistMult, self).__init__(k) 33 | 34 | def _compute_scores(self, triples): 35 | """Compute scores using the distmult scoring function. 36 | 37 | Parameters 38 | ---------- 39 | triples: array-like, shape (n, 3) 40 | Batch of input triples. 41 | 42 | Returns 43 | ------- 44 | scores: tf.Tensor, shape (n,1) 45 | Tensor of scores of inputs. 46 | """ 47 | # compute scores as sum(s * p * o) 48 | scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1) 49 | return scores 50 | 51 | def _get_subject_corruption_scores(self, triples, ent_matrix): 52 | """Compute subject corruption scores. 53 | 54 | Evaluate the inputs against subject corruptions and scores of the corruptions. 55 | 56 | Parameters 57 | ---------- 58 | triples: array-like, shape (n, k) 59 | Batch of input embeddings. 60 | ent_matrix: array-like, shape (m, k) 61 | Slice of embedding matrix (corruptions). 62 | 63 | Returns 64 | ------- 65 | scores: tf.Tensor, shape (n, 1) 66 | Scores of subject corruptions (corruptions defined by `ent_embs` matrix). 67 | """ 68 | rel_emb, obj_emb = triples[1], triples[2] 69 | # compute the score by broadcasting the corruption embeddings(ent_matrix) and using the scoring function 70 | # compute scores as sum(s_corr * p * o) 71 | sub_corr_score = tf.reduce_sum( 72 | ent_matrix * tf.expand_dims(rel_emb * obj_emb, 1), 2 73 | ) 74 | return sub_corr_score 75 | 76 | def _get_object_corruption_scores(self, triples, ent_matrix): 77 | """Compute object corruption scores. 78 | 79 | Evaluate the inputs against object corruptions and scores of the corruptions. 80 | 81 | Parameters 82 | ---------- 83 | triples: array-like, shape (n, k) 84 | Batch of input embeddings. 85 | ent_matrix: array-like, shape (m, k) 86 | Slice of embedding matrix (corruptions). 87 | 88 | Returns 89 | ------- 90 | scores: tf.Tensor, shape (n, 1) 91 | Scores of object corruptions (corruptions defined by `ent_embs` matrix). 92 | """ 93 | sub_emb, rel_emb = triples[0], triples[1] 94 | # compute the score by broadcasting the corruption embeddings(ent_matrix) and using the scoring function 95 | # compute scores as sum(s * p * o_corr) 96 | obj_corr_score = tf.reduce_sum( 97 | tf.expand_dims(sub_emb * rel_emb, 1) * ent_matrix, 2 98 | ) 99 | return obj_corr_score 100 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/scoring/HolE.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from .AbstractScoringLayer import register_layer 9 | from .ComplEx import ComplEx 10 | 11 | 12 | @register_layer("HolE") 13 | class HolE(ComplEx): 14 | r"""Holographic Embeddings (HolE) scoring layer. 15 | 16 | The HolE model :cite:`nickel2016holographic` as re-defined by Hayashi et al. :cite:`HayashiS17`: 17 | 18 | .. math:: 19 | f_{HolE}= \frac{2}{k} \, f_{ComplEx} 20 | 21 | where :math:`k` is the size of the embeddings. 22 | """ 23 | 24 | def get_config(self): 25 | config = super(HolE, self).get_config() 26 | return config 27 | 28 | def __init__(self, k): 29 | super(HolE, self).__init__(k) 30 | 31 | def _compute_scores(self, triples): 32 | """Compute scores using HolE scoring function. 33 | 34 | Parameters 35 | ---------- 36 | triples: array-like, shape (n, 3) 37 | Batch of input triples. 38 | 39 | Returns 40 | ------- 41 | scores: tf.Tensor(n,1) 42 | Tensor of scores of inputs. 43 | """ 44 | # HolE scoring is 2/k * complex_score 45 | return (2 / (self.internal_k / 2)) * (super()._compute_scores(triples)) 46 | 47 | def _get_subject_corruption_scores(self, triples, ent_matrix): 48 | """Compute subject corruption scores. 49 | 50 | Evaluate the inputs against subject corruptions and scores of the corruptions. 51 | 52 | Parameters 53 | ---------- 54 | triples: array-like, shape (n, k) 55 | Batch of input embeddings. 56 | ent_matrix: array-like, shape (m, k) 57 | Slice of embedding matrix (corruptions). 58 | 59 | Returns 60 | ------- 61 | scores: tf.Tensor, shape (n,1) 62 | Scores of subject corruptions (corruptions defined by `ent_embs` matrix). 63 | """ 64 | # HolE scoring is 2/k * complex_score 65 | return (2 / (self.internal_k / 2)) * ( 66 | super()._get_subject_corruption_scores(triples, ent_matrix) 67 | ) 68 | 69 | def _get_object_corruption_scores(self, triples, ent_matrix): 70 | """Compute object corruption scores. 71 | 72 | Evaluate the inputs against object corruptions and scores of the corruptions. 73 | 74 | Parameters 75 | ---------- 76 | triples: array-like, shape (n, k) 77 | Batch of input embeddings. 78 | ent_matrix: array-like, shape (m, k) 79 | Slice of embedding matrix (corruptions). 80 | 81 | Returns 82 | ------- 83 | scores: tf.Tensor, shape (n,1) 84 | Scores of object corruptions (corruptions defined by `ent_embs` matrix). 85 | """ 86 | # HolE scoring is 2/k * complex_score 87 | return (2 / (self.internal_k / 2)) * ( 88 | super()._get_object_corruption_scores(triples, ent_matrix) 89 | ) 90 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/scoring/Random.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import tensorflow as tf 9 | from .AbstractScoringLayer import register_layer, AbstractScoringLayer 10 | 11 | 12 | @register_layer("Random") 13 | class Random(AbstractScoringLayer): 14 | r"""Random scoring layer.""" 15 | 16 | def get_config(self): 17 | config = super(Random, self).get_config() 18 | return config 19 | 20 | def __init__(self, k): 21 | super(Random, self).__init__(k) 22 | 23 | def _compute_scores(self, triples): 24 | """Compute scores using the transE scoring function. 25 | 26 | Parameters 27 | ---------- 28 | triples: array-like, shape (n, 3) 29 | Batch of input triples. 30 | 31 | Returns 32 | ------- 33 | scores: tf.Tensor, shape (n,1) 34 | Tensor of scores of inputs. 35 | """ 36 | 37 | scores = tf.random.uniform(shape=[tf.shape(triples[0])[0]], seed=0) 38 | return scores 39 | 40 | def _get_subject_corruption_scores(self, triples, ent_matrix): 41 | """Compute subject corruption scores. 42 | 43 | Evaluate the inputs against subject corruptions and scores of the corruptions. 44 | 45 | Parameters 46 | ---------- 47 | triples: array-like, shape (n, k) 48 | Batch of input embeddings. 49 | ent_matrix: array-like, shape (m, k) 50 | Slice of embedding matrix (corruptions). 51 | 52 | Returns 53 | ------- 54 | scores: tf.Tensor, shape (n, 1) 55 | Scores of subject corruptions (corruptions defined by `ent_embs` matrix). 56 | """ 57 | scores = tf.random.uniform( 58 | shape=[tf.shape(triples[0])[0], tf.shape(ent_matrix)[0]], seed=0 59 | ) 60 | return scores 61 | 62 | def _get_object_corruption_scores(self, triples, ent_matrix): 63 | """Compute object corruption scores. 64 | 65 | Evaluate the inputs against object corruptions and scores of the corruptions. 66 | 67 | Parameters 68 | ---------- 69 | triples: array-like, shape (n, k) 70 | Batch of input embeddings. 71 | ent_matrix: array-like, shape (m, k) 72 | Slice of embedding matrix (corruptions). 73 | 74 | Returns 75 | ------- 76 | scores: tf.Tensor, shape (n, 1) 77 | Scores of object corruptions (corruptions defined by `ent_embs` matrix). 78 | """ 79 | scores = tf.random.uniform( 80 | shape=[tf.shape(triples[0])[0], tf.shape(ent_matrix)[0]], seed=0 81 | ) 82 | return scores 83 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/scoring/TransE.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import tensorflow as tf 9 | from .AbstractScoringLayer import register_layer, AbstractScoringLayer 10 | 11 | 12 | @register_layer("TransE") 13 | class TransE(AbstractScoringLayer): 14 | r"""Translating Embeddings (TransE) scoring layer. 15 | 16 | The model as described in :cite:`bordes2013translating`. 17 | 18 | The scoring function of TransE computes a similarity between the embedding of the subject 19 | :math:`\mathbf{e}_{sub}` translated by the embedding of the predicate :math:`\mathbf{e}_{pred}`, 20 | and the embedding of the object :math:`\mathbf{e}_{obj}`, 21 | using the :math:`L_1` or :math:`L_2` norm :math:`||\cdot||` (default: :math:`L_1`): 22 | 23 | .. math:: 24 | f_{TransE}=-||\mathbf{e}_{sub} + \mathbf{e}_{pred} - \mathbf{e}_{obj}|| 25 | 26 | Such scoring function is then used on positive and negative triples :math:`t^+, t^-` in the loss function. 27 | 28 | """ 29 | 30 | def get_config(self): 31 | config = super(TransE, self).get_config() 32 | return config 33 | 34 | def __init__(self, k): 35 | super(TransE, self).__init__(k) 36 | 37 | def _compute_scores(self, triples): 38 | """Compute scores using transE scoring function. 39 | 40 | Parameters 41 | ---------- 42 | triples: array-like, (n, 3) 43 | Batch of input triples. 44 | 45 | Returns 46 | ------- 47 | scores: tf.Tensor, shape (n,1) 48 | Tensor of scores of inputs. 49 | """ 50 | # compute scores as -|| s + p - o|| 51 | scores = tf.negative( 52 | tf.norm(triples[0] + triples[1] - triples[2], axis=1, ord=1) 53 | ) 54 | return scores 55 | 56 | def _get_subject_corruption_scores(self, triples, ent_matrix): 57 | """Compute subject corruption scores. 58 | 59 | Evaluate the inputs against subject corruptions and scores of the corruptions. 60 | 61 | Parameters 62 | ---------- 63 | triples: array-like, shape (n, k) 64 | Batch of input embeddings. 65 | ent_matrix: array-like, shape (m, k) 66 | Slice of embedding matrix (corruptions). 67 | 68 | Returns 69 | ------- 70 | scores: tf.Tensor, shape (n, 1) 71 | Scores of subject corruptions (corruptions defined by `ent_embs` matrix). 72 | """ 73 | # get the subject, predicate and object embeddings of True positives 74 | rel_emb, obj_emb = triples[1], triples[2] 75 | # compute the score by broadcasting the corruption embeddings(ent_matrix) and using the scoring function 76 | # compute scores as -|| s_corr + p - o|| 77 | sub_corr_score = tf.negative( 78 | tf.norm( 79 | ent_matrix + tf.expand_dims(rel_emb - obj_emb, 1), 80 | axis=2, 81 | ord=1, 82 | ) 83 | ) 84 | return sub_corr_score 85 | 86 | def _get_object_corruption_scores(self, triples, ent_matrix): 87 | """Compute object corruption scores. 88 | 89 | Evaluate the inputs against object corruptions and scores of the corruptions. 90 | 91 | Parameters 92 | ---------- 93 | triples: array-like, shape (n, k) 94 | Batch of input embeddings. 95 | ent_matrix: array-like, shape (m, k) 96 | Slice of embedding matrix (corruptions). 97 | 98 | Returns 99 | ------- 100 | scores: tf.Tensor, shape (n, 1) 101 | Scores of object corruptions (corruptions defined by `ent_embs` matrix). 102 | """ 103 | # get the subject, predicate and object embeddings of True positives: 104 | sub_emb, rel_emb = triples[0], triples[1] 105 | # compute the score by broadcasting the corruption embeddings(ent_matrix) and using the scoring function 106 | # compute scores as -|| s + p - o_corr|| 107 | obj_corr_score = tf.negative( 108 | tf.norm( 109 | tf.expand_dims(sub_emb + rel_emb, 1) - ent_matrix, 110 | axis=2, 111 | ord=1, 112 | ) 113 | ) 114 | return obj_corr_score 115 | -------------------------------------------------------------------------------- /ampligraph/latent_features/layers/scoring/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-20213The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from .TransE import TransE 9 | from .DistMult import DistMult 10 | from .HolE import HolE 11 | from .ComplEx import ComplEx 12 | from .RotatE import RotatE 13 | from .Random import Random 14 | 15 | __all__ = ["TransE", "DistMult", "HolE", "ComplEx", "RotatE", "Random"] 16 | -------------------------------------------------------------------------------- /ampligraph/latent_features/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from .ScoringBasedEmbeddingModel import ScoringBasedEmbeddingModel 9 | 10 | __all__ = ["ScoringBasedEmbeddingModel"] 11 | -------------------------------------------------------------------------------- /ampligraph/latent_features/regularizers.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from functools import partial 10 | 11 | import tensorflow as tf 12 | 13 | 14 | def LP_regularizer(trainable_param, regularizer_parameters={}): 15 | """Norm :math:`L^{p}` regularizer. 16 | 17 | It is passed to the model as the ``entity_relation_regularizer`` argument of the 18 | :meth:`~ampligraph.latent_features.models.ScoringBasedEmbeddingModel.compile` method. 19 | 20 | Parameters 21 | ---------- 22 | trainable_param: tf.Variable 23 | Trainable parameters of the model that need to be regularized. 24 | regularizer_parameters: dict 25 | Parameters of the regularizer: 26 | 27 | - **p**: (int) - p for the LP regularizer. For example, when :math:`p=2` (default), it uses the L2 regularizer. 28 | - **lambda** : (float) - Regularizer weight (default: 0.00001). 29 | Returns 30 | ------- 31 | regularizer: tf.keras.regularizer 32 | Regularizer instance from the `tf.keras.regularizer` class. 33 | 34 | """ 35 | return regularizer_parameters.get("lambda", 0.00001) * tf.reduce_sum( 36 | tf.pow(tf.abs(trainable_param), regularizer_parameters.get("p", 2)) 37 | ) 38 | 39 | 40 | def get(identifier, hyperparams={}): 41 | """Get the regularizer specified by the identifier. 42 | 43 | Parameters 44 | ---------- 45 | identifier: str or tf.keras.regularizer or a callable 46 | Name of the regularizer to use (with default parameters) or instance of `tf.keras.regularizer` or a 47 | callable function. 48 | 49 | Returns 50 | ------- 51 | regularizer: tf.keras.regularizer 52 | Regularizer instance of the `tf.keras.regularizer` class. 53 | 54 | Example 55 | ------- 56 | >>> from ampligraph.latent_features.regularizers import get as get_regularizer 57 | >>> regularizer = get_regularizer('LP', {'p': 2, 'lambda': 1e-2}) 58 | 59 | """ 60 | if isinstance(identifier, str) and identifier == "l3": 61 | hyperparams["p"] = 3 62 | identifier = partial( 63 | LP_regularizer, regularizer_parameters=hyperparams 64 | ) 65 | identifier = tf.keras.regularizers.get(identifier) 66 | identifier.__name__ = "LP" 67 | elif isinstance(identifier, str) and identifier == "LP": 68 | identifier = partial( 69 | LP_regularizer, regularizer_parameters=hyperparams 70 | ) 71 | identifier = tf.keras.regularizers.get(identifier) 72 | identifier.__name__ = "LP" 73 | return identifier 74 | -------------------------------------------------------------------------------- /ampligraph/logger.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root,default 3 | 4 | [handlers] 5 | keys=fileHandler,consoleHandler 6 | 7 | [formatters] 8 | keys=format 9 | 10 | [logger_root] 11 | level=DEBUG 12 | handlers=fileHandler,consoleHandler 13 | 14 | [logger_default] 15 | level=INFO 16 | handlers=fileHandler,consoleHandler 17 | qualname=sampleLogger 18 | propagate=0 19 | 20 | [handler_consoleHandler] 21 | class=StreamHandler 22 | level=WARNING 23 | formatter=format 24 | args=(sys.stdout,) 25 | 26 | [handler_fileHandler] 27 | class=FileHandler 28 | level=INFO 29 | formatter=format 30 | args=('application.log',) 31 | 32 | [formatter_format] 33 | format=%(levelname)s - %(message)s 34 | -------------------------------------------------------------------------------- /ampligraph/pretrained_models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | """Support for loading and managing pretrained models.""" 9 | from .pretrained_utils import ( 10 | load_pretrained_model 11 | ) 12 | 13 | __all__ = [ 14 | "load_pretrained_model" 15 | ] 16 | -------------------------------------------------------------------------------- /ampligraph/pretrained_models/pretrained_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import logging 9 | from collections import namedtuple 10 | from ampligraph.utils.file_utils import _fetch_file 11 | from ampligraph.utils.model_utils import restore_model 12 | 13 | AMPLIGRAPH_ENV_NAME = "AMPLIGRAPH_DATA_HOME" 14 | 15 | ModelMetadata = namedtuple( 16 | "ModelMetadata", 17 | [ 18 | "scoring_type", 19 | "dataset", 20 | "pretrained_model_name", 21 | "url", 22 | "model_checksum" 23 | ], 24 | defaults=(None, None, None, None, None), 25 | ) 26 | 27 | logger = logging.getLogger(__name__) 28 | logger.setLevel(logging.DEBUG) 29 | 30 | 31 | def load_pretrained_model(dataset, scoring_type, data_home=None): 32 | """ 33 | Function to load a pretrained model. 34 | 35 | This function allows downloading and loading one of the AmpliGraph pre-trained 36 | model on benchmark datasets. 37 | 38 | Parameters 39 | ---------- 40 | dataset: str 41 | Specify the dataset on which the pre-trained model was built. The possible 42 | value is one of `["fb15k-237", "wn18rr", "yago310", "fb15k", "wn18rr"]`. 43 | scoring_type: str 44 | The scoring function used when training the model. The possible value is one of 45 | `["TransE", "DistMult", "ComplEx", "HolE", "RotatE"]`. 46 | 47 | Return 48 | ------ 49 | model: ScoringBasedEmbeddingModel 50 | The pre-trained :class:`~ampligraph.latent_features.ScoringBasedEmbeddingModel`. 51 | 52 | Example 53 | ------- 54 | >>> from ampligraph.datasets import load_fb15k_237 55 | >>> from ampligraph.pretrained_models import load_pretrained_model 56 | >>> from ampligraph.evaluation.metrics import mrr_score, hits_at_n_score, mr_score 57 | >>> 58 | >>> dataset = load_fb15k_237() 59 | >>> model = load_pretrained_model(dataset_name="fb15k-237", scoring_type="ComplEx") 60 | >>> ranks = model.evaluate( 61 | >>> dataset['test'], 62 | >>> corrupt_side='s,o', 63 | >>> use_filter={'train': dataset['train'], 64 | >>> 'valid': dataset['valid'], 65 | >>> 'test': dataset['test']} 66 | >>> ) 67 | >>> print(f"mr_score: {mr_score(ranks)}") 68 | >>> print(f"mrr_score: {mrr_score(ranks)}") 69 | >>> print(f"hits@1: {hits_at_n_score(ranks, 1)}") 70 | >>> print(f"hits@10: {hits_at_n_score(ranks, 10)}") 71 | """ 72 | assert dataset in ["fb15k-237", "wn18rr", "yago310", "fb15k", "wn18rr"], \ 73 | "The dataset you specified is not one of the available ones! Try with one of " \ 74 | "the following: ['fb15k-237', 'wn18rr', 'yago310', 'fb15k', 'wn18rr']." 75 | assert scoring_type in ["TransE", "DistMult", "ComplEx", "HolE", "RotatE"], \ 76 | "The scoring type you provided is not one of the available ones! Try with one of " \ 77 | "the following: ['TransE', 'DistMult', 'ComplEx', 'HolE', 'RotatE']." 78 | 79 | model_name = scoring_type.upper() 80 | dataset_name = dataset.upper() 81 | pretrained_model_name = dataset_name + "_" + model_name 82 | filename = pretrained_model_name + ".zip" 83 | url = "https://ampligraph.s3.eu-west-1.amazonaws.com/pretrained-models-v2.0/" + filename 84 | 85 | metadata = ModelMetadata( 86 | scoring_type=scoring_type, 87 | dataset=dataset, 88 | pretrained_model_name=pretrained_model_name, 89 | url=url 90 | ) 91 | 92 | # with this command we download the .zip file and unzip it, so that, in the 93 | # desired folder, we'll have the model ready to be loaded. 94 | model_path = _fetch_file(metadata, data_home, file_type='models') 95 | 96 | return restore_model(model_path) 97 | -------------------------------------------------------------------------------- /ampligraph/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | """This module contains utility functions for neural knowledge graph 9 | embedding models. 10 | 11 | """ 12 | 13 | from .model_utils import ( 14 | create_tensorboard_visualizations, 15 | dataframe_to_triples, 16 | preprocess_focusE_weights, 17 | restore_model, 18 | save_model, 19 | write_metadata_tsv, 20 | ) 21 | 22 | __all__ = [ 23 | "save_model", 24 | "restore_model", 25 | "create_tensorboard_visualizations", 26 | "write_metadata_tsv", 27 | "dataframe_to_triples", 28 | "preprocess_focusE_weights", 29 | ] 30 | -------------------------------------------------------------------------------- /ampligraph/utils/profiling.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import tracemalloc 9 | from functools import wraps 10 | from time import time 11 | 12 | 13 | def get_memory_size(): 14 | """Get memory size. 15 | 16 | Returns 17 | ------- 18 | Total: float 19 | Memory size used in total. 20 | """ 21 | snapshot = tracemalloc.take_snapshot() 22 | stats = snapshot.statistics("lineno", cumulative=True) 23 | total = sum(stat.size for stat in stats) 24 | return total 25 | 26 | 27 | def get_human_readable_size(size_in_bytes): 28 | """Convert size from bytes to human readable units. 29 | 30 | Parameters 31 | ---------- 32 | size_in_bytes: int 33 | Original size given in bytes 34 | 35 | Returns 36 | ------- 37 | readable_size: tuple 38 | Tuple of new size and unit, size in units GB/MB/KB/Bytes according 39 | to thresholds. 40 | """ 41 | if size_in_bytes >= 1024 * 1024 * 1024: 42 | return float(size_in_bytes / (1024 * 1024 * 1024)), "GB" 43 | if size_in_bytes >= 1024 * 1024: 44 | return float(size_in_bytes / (1024 * 1024)), "MB" 45 | if size_in_bytes >= 1024: 46 | return float(size_in_bytes / 1024), "KB" # return in KB 47 | return float(size_in_bytes), "Bytes" 48 | 49 | 50 | def timing_and_memory(f): 51 | """Decorator to register time and memory used by a function f. 52 | 53 | Parameters 54 | ---------- 55 | f: function 56 | Function for which the time and memory will be measured. 57 | 58 | It logs the time and the memory in the dictionary passed inside `'log'` 59 | parameter if provided. Time is logged in seconds, memory in bytes. 60 | Example dictionary entry looks like that: 61 | {'SPLIT': {'time': 1.62, 'memory-bytes': 789.097}}, 62 | where keys are names of functions that were called to get 63 | the time measured in uppercase. 64 | 65 | Requires 66 | -------- 67 | passing **kwargs in function parameters 68 | """ 69 | 70 | @wraps(f) 71 | def wrapper(*args, **kwargs): 72 | tracemalloc.start() 73 | mem_before = get_memory_size() 74 | start = time() 75 | result = f(*args, **kwargs) 76 | end = time() 77 | mem_after = get_memory_size() 78 | mem_diff = mem_after - mem_before 79 | print( 80 | "{}: memory before: {:.5}{}, after: {:.5}{},\ 81 | consumed: {:.5}{}; exec time: {:.5}s".format( 82 | f.__name__, 83 | *get_human_readable_size(mem_before), 84 | *get_human_readable_size(mem_after), 85 | *get_human_readable_size(mem_diff), 86 | end - start 87 | ) 88 | ) 89 | 90 | if "log" in kwargs: 91 | name = kwargs.get("log_name", f.__name__.upper()) 92 | kwargs["log"][name] = { 93 | "time": end - start, 94 | "memory-bytes": mem_diff, 95 | } 96 | return result 97 | 98 | return wrapper 99 | -------------------------------------------------------------------------------- /ampligraph/utils/tags.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import warnings 9 | 10 | 11 | class experimentalWarning(Warning): 12 | """Warning that is triggered when the 13 | experimental function is run. 14 | """ 15 | 16 | def __init__(self, message): 17 | self.message = message 18 | 19 | def __str__(self): 20 | return repr(self.message) 21 | 22 | 23 | def experimental(func): 24 | """ 25 | Decorator - a function that accepts another function 26 | and marks it as experimental, meaning it may change in 27 | future releases, or its execution is not guaranteed. 28 | 29 | Example: 30 | 31 | >>>@experimental 32 | >>>def a_function(): 33 | >>> "Demonstration function" 34 | >>> return "demonstration" 35 | 36 | >>>a_function() 37 | experimentalWarning: 'Experimental! Function: a_function is experimental. 38 | Use at your own risk.' 39 | warnings.warn(experimentalWarning(msg)) 40 | demonstration 41 | 42 | To disable experimentalWarning set this in the module: 43 | >>>warnings.filterwarnings("ignore", category=experimentalWarning) 44 | 45 | """ 46 | 47 | def mark_experimental(): 48 | msg = f"Experimental! Function: {func.__name__} is experimental. Use \ 49 | at your own risk." 50 | 51 | warnings.warn(experimentalWarning(msg)) 52 | 53 | return func() 54 | 55 | return mark_experimental 56 | 57 | 58 | def deprecated(*args, **kwargs): 59 | """ 60 | Decorator - a function that accepts another function 61 | and marks it as deprecated, meaning it may be discontinued in 62 | future releases, and is provided only for backward compatibility purposes. 63 | 64 | --------------- 65 | Example: 66 | 67 | >>>@deprecated(instead="module2.another_function") 68 | >>>def a_function(): 69 | >>> "Demonstration function" 70 | >>> return "demonstration" 71 | 72 | >>>a_function() 73 | DeprecationWarning: Deprecated! Function: a_function is deprecated. 74 | Instead use module2.another_function. 75 | warnings.warn(DeprecationWarning(msg)) 76 | demonstration 77 | """ 78 | 79 | def mark_deprecated(func): 80 | msg = f"Deprecated! Function: {func.__name__} is deprecated. \ 81 | Instead use {kwargs['instead']}." 82 | 83 | warnings.warn(DeprecationWarning(msg)) 84 | 85 | return func 86 | 87 | return mark_deprecated 88 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python3 -msphinx 7 | SPHINXPROJ = xai_lp 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | .PHONY: autogen 23 | autogen: 24 | sphinx-autogen -o generated -t _templates/autosummary *.rst -------------------------------------------------------------------------------- /docs/_static/ampligraph_logo_transparent_white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/ampligraph_logo_transparent_white.png -------------------------------------------------------------------------------- /docs/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '2.0-dev', 4 | LANGUAGE: 'en', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false, 12 | SHOW_SEARCH_SUMMARY: true, 13 | ENABLE_SEARCH_SHORTCUTS: false, 14 | }; -------------------------------------------------------------------------------- /docs/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/_static/file.png -------------------------------------------------------------------------------- /docs/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /docs/_static/js/html5shiv-printshiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3-pre | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/_static/js/html5shiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | !function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t`_. In this case, data is persisted on disk and is later loaded in memory in 19 | chunks, so to avoid overloading the RAM. This is the option to choose for handling massive datasets. 20 | 21 | The instantiation of a backend is not by itself sufficient. Indeed, it is capital to specify how the chunks 22 | we load in memory are defined. This is equivalent to tackle the problem of graph partitioning. 23 | Partitioning a graph amounts to split its nodes into :math:`P` partitions sized to fit in memory. 24 | When loading the data, partitions are created and singularly persisted on disk. Then, during training, single partitions 25 | are loaded in memory and the model is trained on it. Once the model finishes operating on one partition, it unloads it 26 | and loads the next one. 27 | 28 | There are many possible strategies to partition a graph, but in AmpliGraph we recommend to use the default 29 | option, the :class:`BucketGraphPartitioner` strategy, as its runtime performance are much better than 30 | the others baselines. 31 | 32 | For more details about the data pipeline components see the API below: 33 | 34 | .. autosummary:: 35 | :toctree: 36 | :template: class.rst 37 | 38 | GraphDataLoader 39 | BucketGraphPartitioner 40 | -------------------------------------------------------------------------------- /docs/ampligraph.anatomy_model.rst: -------------------------------------------------------------------------------- 1 | Anatomy of a Model 2 | ================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | .. automodule:: ampligraph.latent_features 6 | 7 | .. _embedding: 8 | 9 | 10 | -------------------------------------------------------------------------------- /docs/ampligraph.discovery.rst: -------------------------------------------------------------------------------- 1 | Discovery 2 | ========== 3 | .. currentmodule:: ampligraph.discovery 4 | 5 | .. automodule:: ampligraph.discovery 6 | 7 | 8 | .. autosummary:: 9 | :toctree: generated 10 | :template: function.rst 11 | 12 | discover_facts 13 | find_clusters 14 | find_duplicates 15 | query_topn -------------------------------------------------------------------------------- /docs/ampligraph.evaluation.rst: -------------------------------------------------------------------------------- 1 | Evaluation 2 | ========== 3 | .. currentmodule:: ampligraph.evaluation 4 | 5 | .. automodule:: ampligraph.evaluation 6 | 7 | After the training is complete, the model is ready to perform predictions and to be evaluated on unseen data. Given a 8 | triple, the model can score it and quantify its plausibility. Importantly, the entities and relations of new triples 9 | must have been seen during training, otherwise no embedding for them is available. Future extensions of the code base 10 | will introduce inductive methods as well. 11 | 12 | The standard evaluation of a test triples is achieved by comparing the score assigned by the model to that triple with 13 | those assigned to the same triple where we corrupted either the object or the subject. From this comparison we 14 | extract some metrics. By aggregating the metrics obtained for all triples in the test set, we finally obtain a "thorough" 15 | (depending on the quality of the test set and of the corruptions) evaluation of the model. 16 | 17 | Metrics 18 | ^^^^^^^ 19 | 20 | The available metrics implemented in AmpliGraph to rank a triple against its corruptions are listed in the table below. 21 | 22 | .. autosummary:: 23 | :toctree: 24 | :template: function.rst 25 | 26 | rank_score 27 | mr_score 28 | mrr_score 29 | hits_at_n_score 30 | 31 | Model Selection 32 | ^^^^^^^^^^^^^^^ 33 | 34 | AmpliGraph implements a model selection routine for KGE models via either a grid search or a random search. 35 | Random search is typically more efficient, but grid search, on the other hand, can provide a more controlled selection framework. 36 | 37 | .. autosummary:: 38 | :toctree: 39 | :template: function.rst 40 | 41 | select_best_model_ranking 42 | 43 | Helper Functions 44 | ^^^^^^^^^^^^^^^^ 45 | 46 | Utilities and support functions for evaluation procedures. 47 | 48 | .. autosummary:: 49 | :toctree: 50 | :template: function.rst 51 | 52 | train_test_split_no_unseen 53 | filter_unseen_entities 54 | -------------------------------------------------------------------------------- /docs/ampligraph.pretrained_models.rst: -------------------------------------------------------------------------------- 1 | Pre-Trained Models 2 | =================== 3 | 4 | .. currentmodule:: ampligraph.pretrained_models 5 | 6 | .. automodule:: ampligraph.pretrained_models 7 | 8 | This module provides an API to download and have ready to use pre-trained 9 | :class:`~ampligraph.latent_features.ScoringBasedEmbeddingModel`. 10 | 11 | .. autosummary:: 12 | :toctree: 13 | :template: function.rst 14 | 15 | load_pretrained_model 16 | 17 | Currently the available models are trained on "FB15K-237", "WN18RR", "YAGO310", "FB15K" and "WN18" and have as 18 | scoring function "TransE", "DistMult", "ComplEx", "HolE" and "RotatE". 19 | 20 | -------------------------------------------------------------------------------- /docs/ampligraph.utils.rst: -------------------------------------------------------------------------------- 1 | Utils 2 | ===== 3 | .. currentmodule:: ampligraph.utils 4 | .. automodule:: ampligraph.utils 5 | 6 | This module contains utility functions for Knowledge Graph Embedding models. 7 | 8 | Saving/Restoring Models 9 | ----------------------- 10 | 11 | Models can be saved and restored from disk. This is useful to avoid re-training a model. On the contrary of what happens 12 | for :meth:`~ampligraph.latent_features.models.ScoringBasedEmbeddingModel.save_weights` and 13 | :meth:`~ampligraph.latent_features.models.ScoringBasedEmbeddingModel.save_weights`, the functions below allow to restart 14 | the model training from where it was interrupted when the model was first saved. 15 | 16 | 17 | .. autosummary:: 18 | :toctree: generated 19 | :template: function.rst 20 | 21 | save_model 22 | restore_model 23 | 24 | 25 | Visualization 26 | ------------- 27 | 28 | Functions to visualize embeddings. 29 | 30 | .. autosummary:: 31 | :toctree: generated 32 | :template: function.rst 33 | 34 | create_tensorboard_visualizations 35 | 36 | Others 37 | ------------- 38 | 39 | Function various functions to be used at need. 40 | 41 | .. autosummary:: 42 | :toctree: generated 43 | :template: function.rst 44 | 45 | dataframe_to_triples 46 | preprocess_focusE_weights -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API 2 | === 3 | 4 | AmpliGraph divides its APIs in the five main submodules listed below: 5 | 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | ampligraph.datasets 11 | ampligraph.latent_features 12 | ampligraph.evaluation 13 | ampligraph.discovery 14 | ampligraph.utils 15 | ampligraph.pretrained_models 16 | 17 | The different submodules provide the user with support through all the operations needed when dealing with Knowledge 18 | Graph Embedding models, from loading benchmark or user customised datasets, to saving and reloading a model after it has 19 | been trained, validated and tested. Further, the APIs also support important downstream tasks and provide enough 20 | flexibility to allow custom extensions from the most demanding users. -------------------------------------------------------------------------------- /docs/background.rst: -------------------------------------------------------------------------------- 1 | Background 2 | ========== 3 | 4 | For a comprehensive theoretical and hands-on overview of KGE models and hands-on AmpliGraph, check out our tutorials: 5 | `COLING-22 KGE4NLP Tutorial (Slides + Recording + Colab Notebook)`_ and `ECAI-20 Tutorial (Slides + Recording + Colab Notebook)`_. 6 | 7 | .. _COLING-22 KGE4NLP Tutorial (Slides + Recording + Colab Notebook): https://kge4nlp-coling22.github.io/ 8 | 9 | .. _ECAI-20 Tutorial (Slides + Recording + Colab Notebook): https://kge-tutorial-ecai2020.github.io/ 10 | 11 | Knowledge graphs are graph-based knowledge bases whose facts are modeled as relationships between entities. Knowledge 12 | graph research led to broad-scope graphs such as DBpedia :cite:`auer2007dbpedia`, WordNet :cite:`wordnet`, and YAGO 13 | :cite:`suchanek2007yago`. Countless domain-specific knowledge graphs have also been published on the web, giving birth 14 | to the so-called Web of Data :cite:`bizer2011linked`. 15 | 16 | Formally, a knowledge graph :math:`\mathcal{G}=\{ (sub,pred,obj)\} \subseteq \mathcal{E} \times \mathcal{R} \times \mathcal{E}` 17 | is a set of :math:`(sub,pred,obj)` triples, each including a subject :math:`sub \in \mathcal{E}`, 18 | a predicate :math:`pred \in \mathcal{R}`, and an object :math:`obj \in \mathcal{E}`. 19 | :math:`\mathcal{E}` and :math:`\mathcal{R}` are the sets of all entities and relation types of :math:`\mathcal{G}`. 20 | 21 | 22 | Knowledge graph embedding models are neural architectures that encode concepts from a knowledge graph (i.e. entities 23 | :math:`\mathcal{E}` and relation types :math:`\mathcal{R}`) into low-dimensional, continuous vectors 24 | :math:`\in \mathcal{R}^k`. Such \textit{knowledge graph embeddings} have applications in knowledge graph completion, 25 | entity resolution, and link-based clustering, just to cite a few :cite:`nickel2016review`. 26 | Knowledge graph embeddings are learned by training a neural architecture over a graph. 27 | Although such architectures vary, the training phase always consists in minimizing a loss function :math:`\mathcal{L}` 28 | that includes a *scoring function* :math:`f_{m}(t)`, i.e. a model-specific function that assigns a score to a triple 29 | :math:`t=(sub,pred,obj)`. 30 | 31 | The goal of the optimization procedure is learning optimal embeddings, such that the scoring function is able to assign 32 | high scores to positive statements and low scores to statements unlikely to be true. Existing models propose scoring 33 | functions that combine the embeddings :math:`\mathbf{e}_{sub},\mathbf{e}_{pred}, \mathbf{e}_{obj} \in \mathcal{R}^k` of 34 | the subject, predicate, and object of triple :math:`t=(sub,pred,obj)` using different intuitions: 35 | :class:`~ampligraph.latent_features.layers.scoring.TransE` :cite:`bordes2013translating` relies on distances, 36 | :class:`~ampligraph.latent_features.layers.scoring.DistMult` :cite:`yang2014embedding` and 37 | :class:`~ampligraph.latent_features.layers.scoring.ComplEx` :cite:`trouillon2016complex` are bilinear-diagonal models, 38 | :class:`~ampligraph.latent_features.layers.scoring.RotatE` :cite:`sun2018rotate` models relations as rotations in the 39 | complex space, :class:`~ampligraph.latent_features.layers.scoring.HolE` :cite:`nickel2016holographic` uses circular 40 | correlation. While the above models can be interpreted as multilayer perceptrons, others such as ConvE include 41 | convolutional layers :cite:`DettmersMS018`. 42 | 43 | As example, the scoring function of TransE computes a similarity between the embedding of the subject 44 | :math:`\mathbf{e}_{sub}` translated by the embedding of the predicate :math:`\mathbf{e}_{pred}` and the embedding of 45 | the object :math:`\mathbf{e}_{obj}`, using the :math:`L_1` or :math:`L_2` norm :math:`||\cdot||`: 46 | 47 | .. math:: 48 | 49 | f_{TransE}=-||\mathbf{e}_{sub} + \mathbf{e}_{pred} - \mathbf{e}_{obj}||_n 50 | 51 | 52 | Such scoring function is then used on positive and negative triples :math:`t^+, t^-` in the loss function. 53 | This can be for example a pairwise margin-based loss, as shown in the equation below: 54 | 55 | .. math:: 56 | \mathcal{L}(\Theta) = \sum_{t^+ \in \mathcal{G}}\sum_{t^- \in \mathcal{N}}max(0, [\gamma + f_{m}(t^-;\Theta) - f_{m}(t^+;\Theta)]) 57 | 58 | where :math:`\Theta` are the embeddings learned by the model, :math:`f_{m}` is the model-specific scoring function, 59 | :math:`\gamma \in \mathcal{R}` is the margin and :math:`\mathcal{N}` is a set of negative triples generated with a 60 | corruption heuristic :cite:`bordes2013translating`. 61 | -------------------------------------------------------------------------------- /docs/biblio.rst: -------------------------------------------------------------------------------- 1 | Bibliography 2 | ============ 3 | 4 | .. bibliography:: references.bib 5 | :all: 6 | -------------------------------------------------------------------------------- /docs/contacts.md: -------------------------------------------------------------------------------- 1 | # About 2 | 3 | AmpliGraph is developed and maintained by [Accenture Labs Dublin](https://www.accenture.com/us-en/accenture-technology-labs-index). 4 | 5 | ## Contact us 6 | 7 | You can contact us by email at [about@ampligraph.org](mailto:about@ampligraph.org). 8 | 9 | [Join the conversation on Slack](https://join.slack.com/t/ampligraph/shared_invite/enQtNTc2NTI0MzUxMTM5LTRkODk0MjI2OWRlZjdjYmExY2Q3M2M3NGY0MGYyMmI4NWYyMWVhYTRjZDhkZjA1YTEyMzBkMGE4N2RmNTRiZDg) 10 | ![](/img/slack_logo.png) 11 | 12 | 13 | 14 | ## How to Cite 15 | 16 | If you like AmpliGraph and you use it in your project, why not starring the project on GitHub! 17 | 18 | [![](https://img.shields.io/github/stars/Accenture/AmpliGraph.svg?style=social&label=Star&maxAge=3600)](https://GitHub.com/Accenture/AmpliGraph/stargazers/) 19 | 20 | 21 | If you instead use AmpliGraph in an academic publication, cite as: 22 | 23 | ``` 24 | @misc{ampligraph, 25 | author= {Luca Costabello and 26 | Alberto Bernardi and 27 | Adrianna Janik and 28 | Aldan Creo and 29 | Sumit Pai and 30 | Chan Le Van and 31 | Rory McGrath and 32 | Nicholas McCarthy and 33 | Pedro Tabacof}, 34 | title = {{AmpliGraph: a Library for Representation Learning on Knowledge Graphs}}, 35 | month = mar, 36 | year = 2019, 37 | doi = {10.5281/zenodo.2595043}, 38 | url = {https://doi.org/10.5281/zenodo.2595043} 39 | } 40 | ``` 41 | [![](https://zenodo.org/badge/DOI/10.5281/zenodo.2595043.svg)](https://doi.org/10.5281/zenodo.2595043) 42 | 43 | 44 | ## Contributors 45 | 46 | Active contributors (in alphabetical order) 47 | 48 | + [Alberto Bernardi](http://github.com/albernar) 49 | + [Luca Costabello](http://github.com/lukostaz) 50 | + [Aldan Creo](http://github.com/acmcmc) 51 | + [Adrianna Janik](https://github.com/adrijanik) 52 | 53 | Past contributors 54 | + [Nicholas McCarthy](http://github.com/NicholasMcCarthy) 55 | + [Rory McGrath](http://github.com/rorymcgrath) 56 | + [Chan Le Van](http://github.com/chanlevan) 57 | + [Sumit Pai](http://github.com/sumitpai) 58 | + [Pedro Tabacof](http://github.com/tabacof) 59 | 60 | ## License 61 | 62 | AmpliGraph is licensed under the Apache 2.0 License. 63 | 64 | -------------------------------------------------------------------------------- /docs/dev.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | ## Git Repo and Issue Tracking 4 | [![](https://img.shields.io/github/stars/Accenture/AmpliGraph.svg?style=social&label=Star&maxAge=3600)](https://GitHub.com/Accenture/AmpliGraph/stargazers/) 5 | 6 | AmpliGraph [repository is available on GitHub](https://github.com/Accenture/AmpliGraph). 7 | 8 | A list of open issues [is available here](https://github.com/Accenture/AmpliGraph/issues). 9 | 10 | [Join the conversation on Slack](https://join.slack.com/t/ampligraph/shared_invite/enQtNTc2NTI0MzUxMTM5LTRkODk0MjI2OWRlZjdjYmExY2Q3M2M3NGY0MGYyMmI4NWYyMWVhYTRjZDhkZjA1YTEyMzBkMGE4N2RmNTRiZDg) 11 | ![](/img/slack_logo.png) 12 | 13 | 14 | ## How to Contribute 15 | We welcome community contributions, whether they are new models, tests, or documentation. 16 | 17 | You can contribute to AmpliGraph in many ways: 18 | - Raise a [bug report](https://github.com/Accenture/AmpliGraph/issues/new?assignees=&labels=&template=bug_report.md&title=) 19 | - File a [feature request](https://github.com/Accenture/AmpliGraph/issues/new?assignees=&labels=&template=feature_request.md&title=) 20 | - Help other users by commenting on the [issue tracking system](https://github.com/Accenture/AmpliGraph/issues) 21 | - Add unit tests 22 | - Improve the documentation 23 | - Add a new graph embedding model (see below) 24 | 25 | 26 | ## Adding Your Own Model 27 | 28 | The landscape of knowledge graph embeddings evolves rapidly. 29 | We welcome new models as a contribution to AmpliGraph, which has been built to provide a shared codebase to guarantee a 30 | fair evalaution and comparison acros models. 31 | 32 | You can add your own model by raising a pull request. 33 | 34 | To get started, [read the documentation on how current models have been implemented](ampligraph.latent_features.html#anatomy-of-a-model). 35 | 36 | 37 | ## Clone and Install in editable mode 38 | 39 | Clone the repository and checkout the `develop` branch. 40 | Install from source with pip. use the `-e` flag to enable [editable mode](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs): 41 | 42 | ``` 43 | git clone https://github.com/Accenture/AmpliGraph.git 44 | git checkout develop 45 | cd AmpliGraph 46 | pip install -e . 47 | ``` 48 | 49 | 50 | ## Unit Tests 51 | 52 | To run all the unit tests: 53 | 54 | ``` 55 | $ pytest tests 56 | ``` 57 | 58 | See [pytest documentation](https://docs.pytest.org/en/latest/) for additional arguments. 59 | 60 | 61 | ## Documentation 62 | 63 | The [project documentation](https://docs.ampligraph.org) is based on Sphinx and can be built on your local working 64 | copy as follows: 65 | 66 | ``` 67 | cd docs 68 | make clean autogen html 69 | ``` 70 | 71 | The above generates an HTML version of the documentation under `docs/_built/html`. 72 | 73 | 74 | ## Packaging 75 | 76 | To build an AmpliGraph custom wheel, do the following: 77 | 78 | ``` 79 | pip wheel --wheel-dir dist --no-deps . 80 | ``` -------------------------------------------------------------------------------- /docs/generated/ampligraph.compat.ComplEx.rst: -------------------------------------------------------------------------------- 1 | ComplEx 2 | ========================= 3 | 4 | .. currentmodule:: ampligraph.compat 5 | 6 | .. autoclass:: ComplEx 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | ~ComplEx.name 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | .. rubric:: Methods 24 | 25 | .. autosummary:: 26 | 27 | 28 | ~ComplEx.__init__ 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | .. automethod:: ComplEx.__init__ 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.compat.DistMult.rst: -------------------------------------------------------------------------------- 1 | DistMult 2 | ========================== 3 | 4 | .. currentmodule:: ampligraph.compat 5 | 6 | .. autoclass:: DistMult 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | ~DistMult.name 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | .. rubric:: Methods 24 | 25 | .. autosummary:: 26 | 27 | 28 | ~DistMult.__init__ 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | .. automethod:: DistMult.__init__ 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.compat.HolE.rst: -------------------------------------------------------------------------------- 1 | HolE 2 | ====================== 3 | 4 | .. currentmodule:: ampligraph.compat 5 | 6 | .. autoclass:: HolE 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | ~HolE.name 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | .. rubric:: Methods 24 | 25 | .. autosummary:: 26 | 27 | 28 | ~HolE.__init__ 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | .. automethod:: HolE.__init__ 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.compat.TransE.rst: -------------------------------------------------------------------------------- 1 | TransE 2 | ======================== 3 | 4 | .. currentmodule:: ampligraph.compat 5 | 6 | .. autoclass:: TransE 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | ~TransE.name 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | .. rubric:: Methods 24 | 25 | .. autosummary:: 26 | 27 | 28 | ~TransE.__init__ 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | .. automethod:: TransE.__init__ 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.compat.evaluate_performance.rst: -------------------------------------------------------------------------------- 1 | evaluate_performance 2 | ====================================== 3 | 4 | .. currentmodule:: ampligraph.compat 5 | 6 | .. autofunction:: evaluate_performance 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.BucketGraphPartitioner.rst: -------------------------------------------------------------------------------- 1 | BucketGraphPartitioner 2 | ========================================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autoclass:: BucketGraphPartitioner 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | ~BucketGraphPartitioner.manager 16 | 17 | 18 | ~BucketGraphPartitioner.name 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | .. rubric:: Methods 27 | 28 | .. autosummary:: 29 | 30 | 31 | ~BucketGraphPartitioner.__init__ 32 | 33 | 34 | 35 | ~BucketGraphPartitioner.create_single_partition 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | .. automethod:: BucketGraphPartitioner.__init__ 45 | 46 | 47 | .. automethod:: BucketGraphPartitioner.create_single_partition 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.GraphDataLoader.rst: -------------------------------------------------------------------------------- 1 | GraphDataLoader 2 | =================================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autoclass:: GraphDataLoader 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | ~GraphDataLoader.max_entities 16 | 17 | 18 | ~GraphDataLoader.max_relations 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | .. rubric:: Methods 27 | 28 | .. autosummary:: 29 | 30 | 31 | ~GraphDataLoader.__init__ 32 | 33 | 34 | ~GraphDataLoader.add_dataset 35 | 36 | 37 | ~GraphDataLoader.clean 38 | 39 | 40 | ~GraphDataLoader.get_batch_generator 41 | 42 | 43 | ~GraphDataLoader.get_complementary_entities 44 | 45 | 46 | ~GraphDataLoader.get_complementary_objects 47 | 48 | 49 | ~GraphDataLoader.get_complementary_subjects 50 | 51 | 52 | ~GraphDataLoader.get_data_size 53 | 54 | 55 | ~GraphDataLoader.get_participating_entities 56 | 57 | 58 | ~GraphDataLoader.get_tf_generator 59 | 60 | 61 | ~GraphDataLoader.get_triples 62 | 63 | 64 | ~GraphDataLoader.intersect 65 | 66 | 67 | ~GraphDataLoader.on_complete 68 | 69 | 70 | ~GraphDataLoader.on_epoch_end 71 | 72 | 73 | ~GraphDataLoader.reload 74 | 75 | 76 | 77 | .. automethod:: GraphDataLoader.__init__ 78 | 79 | .. automethod:: GraphDataLoader.add_dataset 80 | 81 | .. automethod:: GraphDataLoader.clean 82 | 83 | .. automethod:: GraphDataLoader.get_batch_generator 84 | 85 | .. automethod:: GraphDataLoader.get_complementary_entities 86 | 87 | .. automethod:: GraphDataLoader.get_complementary_objects 88 | 89 | .. automethod:: GraphDataLoader.get_complementary_subjects 90 | 91 | .. automethod:: GraphDataLoader.get_data_size 92 | 93 | .. automethod:: GraphDataLoader.get_participating_entities 94 | 95 | .. automethod:: GraphDataLoader.get_tf_generator 96 | 97 | .. automethod:: GraphDataLoader.get_triples 98 | 99 | .. automethod:: GraphDataLoader.intersect 100 | 101 | .. automethod:: GraphDataLoader.on_complete 102 | 103 | .. automethod:: GraphDataLoader.on_epoch_end 104 | 105 | .. automethod:: GraphDataLoader.reload 106 | 107 | 108 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_cn15k.rst: -------------------------------------------------------------------------------- 1 | load_cn15k 2 | ======================================= 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_cn15k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_codex.rst: -------------------------------------------------------------------------------- 1 | load_codex 2 | ======================================= 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_codex 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_fb13.rst: -------------------------------------------------------------------------------- 1 | load_fb13 2 | ====================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_fb13 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_fb15k.rst: -------------------------------------------------------------------------------- 1 | load_fb15k 2 | ======================================= 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_fb15k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_fb15k_237.rst: -------------------------------------------------------------------------------- 1 | load_fb15k_237 2 | =========================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_fb15k_237 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_from_csv.rst: -------------------------------------------------------------------------------- 1 | load_from_csv 2 | ========================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_from_csv 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_from_ntriples.rst: -------------------------------------------------------------------------------- 1 | load_from_ntriples 2 | =============================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_from_ntriples 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_from_rdf.rst: -------------------------------------------------------------------------------- 1 | load_from_rdf 2 | ========================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_from_rdf 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_nl27k.rst: -------------------------------------------------------------------------------- 1 | load_nl27k 2 | ======================================= 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_nl27k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_onet20k.rst: -------------------------------------------------------------------------------- 1 | load_onet20k 2 | ========================================= 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_onet20k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_ppi5k.rst: -------------------------------------------------------------------------------- 1 | load_ppi5k 2 | ======================================= 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_ppi5k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_wn11.rst: -------------------------------------------------------------------------------- 1 | load_wn11 2 | ====================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_wn11 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_wn18.rst: -------------------------------------------------------------------------------- 1 | load_wn18 2 | ====================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_wn18 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_wn18rr.rst: -------------------------------------------------------------------------------- 1 | load_wn18rr 2 | ======================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_wn18rr 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.datasets.load_yago3_10.rst: -------------------------------------------------------------------------------- 1 | load_yago3_10 2 | ========================================== 3 | 4 | .. currentmodule:: ampligraph.datasets.datasets 5 | 6 | .. autofunction:: load_yago3_10 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_cn15k.rst: -------------------------------------------------------------------------------- 1 | load_cn15k 2 | ============================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_cn15k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_codex.rst: -------------------------------------------------------------------------------- 1 | load_codex 2 | ============================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_codex 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_fb13.rst: -------------------------------------------------------------------------------- 1 | load_fb13 2 | ============================= 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_fb13 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_fb15k.rst: -------------------------------------------------------------------------------- 1 | load_fb15k 2 | ============================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_fb15k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_fb15k_237.rst: -------------------------------------------------------------------------------- 1 | load_fb15k_237 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_fb15k_237 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_from_csv.rst: -------------------------------------------------------------------------------- 1 | load_from_csv 2 | ================================= 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_from_csv 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_from_ntriples.rst: -------------------------------------------------------------------------------- 1 | load_from_ntriples 2 | ====================================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_from_ntriples 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_from_rdf.rst: -------------------------------------------------------------------------------- 1 | load_from_rdf 2 | ================================= 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_from_rdf 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_nl27k.rst: -------------------------------------------------------------------------------- 1 | load_nl27k 2 | ============================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_nl27k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_onet20k.rst: -------------------------------------------------------------------------------- 1 | load_onet20k 2 | ================================ 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_onet20k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_ppi5k.rst: -------------------------------------------------------------------------------- 1 | load_ppi5k 2 | ============================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_ppi5k 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_wn11.rst: -------------------------------------------------------------------------------- 1 | load_wn11 2 | ============================= 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_wn11 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_wn18.rst: -------------------------------------------------------------------------------- 1 | load_wn18 2 | ============================= 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_wn18 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_wn18rr.rst: -------------------------------------------------------------------------------- 1 | load_wn18rr 2 | =============================== 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_wn18rr 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.datasets.load_yago3_10.rst: -------------------------------------------------------------------------------- 1 | load_yago3_10 2 | ================================= 3 | 4 | .. currentmodule:: ampligraph.datasets 5 | 6 | .. autofunction:: load_yago3_10 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.discovery.discover_facts.rst: -------------------------------------------------------------------------------- 1 | discover_facts 2 | =================================== 3 | 4 | .. currentmodule:: ampligraph.discovery 5 | 6 | .. autofunction:: discover_facts 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.discovery.find_clusters.rst: -------------------------------------------------------------------------------- 1 | find_clusters 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.discovery 5 | 6 | .. autofunction:: find_clusters 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.discovery.find_duplicates.rst: -------------------------------------------------------------------------------- 1 | find_duplicates 2 | ==================================== 3 | 4 | .. currentmodule:: ampligraph.discovery 5 | 6 | .. autofunction:: find_duplicates 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.discovery.find_nearest_neighbours.rst: -------------------------------------------------------------------------------- 1 | find_nearest_neighbours 2 | ============================================ 3 | 4 | .. currentmodule:: ampligraph.discovery 5 | 6 | .. autofunction:: find_nearest_neighbours 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.discovery.query_topn.rst: -------------------------------------------------------------------------------- 1 | query_topn 2 | =============================== 3 | 4 | .. currentmodule:: ampligraph.discovery 5 | 6 | .. autofunction:: query_topn 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.evaluation.filter_unseen_entities.rst: -------------------------------------------------------------------------------- 1 | filter_unseen_entities 2 | ============================================ 3 | 4 | .. currentmodule:: ampligraph.evaluation 5 | 6 | .. autofunction:: filter_unseen_entities 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.evaluation.hits_at_n_score.rst: -------------------------------------------------------------------------------- 1 | hits_at_n_score 2 | ===================================== 3 | 4 | .. currentmodule:: ampligraph.evaluation 5 | 6 | .. autofunction:: hits_at_n_score 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.evaluation.mr_score.rst: -------------------------------------------------------------------------------- 1 | mr_score 2 | ============================== 3 | 4 | .. currentmodule:: ampligraph.evaluation 5 | 6 | .. autofunction:: mr_score 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.evaluation.mrr_score.rst: -------------------------------------------------------------------------------- 1 | mrr_score 2 | =============================== 3 | 4 | .. currentmodule:: ampligraph.evaluation 5 | 6 | .. autofunction:: mrr_score 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.evaluation.rank_score.rst: -------------------------------------------------------------------------------- 1 | rank_score 2 | ================================ 3 | 4 | .. currentmodule:: ampligraph.evaluation 5 | 6 | .. autofunction:: rank_score 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.evaluation.select_best_model_ranking.rst: -------------------------------------------------------------------------------- 1 | select_best_model_ranking 2 | =============================================== 3 | 4 | .. currentmodule:: ampligraph.evaluation 5 | 6 | .. autofunction:: select_best_model_ranking 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.evaluation.train_test_split_no_unseen.rst: -------------------------------------------------------------------------------- 1 | train_test_split_no_unseen 2 | ================================================ 3 | 4 | .. currentmodule:: ampligraph.evaluation 5 | 6 | .. autofunction:: train_test_split_no_unseen 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.AbsoluteMarginLoss.rst: -------------------------------------------------------------------------------- 1 | AbsoluteMarginLoss 2 | ============================================= 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: AbsoluteMarginLoss 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~AbsoluteMarginLoss.external_params 17 | 18 | 19 | 20 | ~AbsoluteMarginLoss.name 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | .. rubric:: Methods 29 | 30 | .. autosummary:: 31 | 32 | 33 | ~AbsoluteMarginLoss.__init__ 34 | 35 | 36 | 37 | .. automethod:: AbsoluteMarginLoss.__init__ 38 | 39 | 40 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.AdagradOptimizer.rst: -------------------------------------------------------------------------------- 1 | AdagradOptimizer 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: AdagradOptimizer 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | .. rubric:: Methods 17 | 18 | .. autosummary:: 19 | 20 | ~AdagradOptimizer.__init__ 21 | ~AdagradOptimizer.minimize 22 | ~AdagradOptimizer.update_feed_dict 23 | 24 | .. automethod:: AdagradOptimizer.__init__ 25 | .. automethod:: AdagradOptimizer.minimize 26 | .. automethod:: AdagradOptimizer.update_feed_dict 27 | 28 | 29 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.AdamOptimizer.rst: -------------------------------------------------------------------------------- 1 | AdamOptimizer 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: AdamOptimizer 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | .. rubric:: Methods 17 | 18 | .. autosummary:: 19 | 20 | ~AdamOptimizer.__init__ 21 | ~AdamOptimizer.minimize 22 | ~AdamOptimizer.update_feed_dict 23 | 24 | .. automethod:: AdamOptimizer.__init__ 25 | .. automethod:: AdamOptimizer.minimize 26 | .. automethod:: AdamOptimizer.update_feed_dict 27 | 28 | 29 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.Constant.rst: -------------------------------------------------------------------------------- 1 | Constant 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: Constant 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | .. rubric:: Methods 17 | 18 | .. autosummary:: 19 | 20 | ~Constant.__init__ 21 | 22 | .. automethod:: Constant.__init__ 23 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.LP_regularizer.rst: -------------------------------------------------------------------------------- 1 | LP_regularizer 2 | ========================================= 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: LP_regularizer 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.MomentumOptimizer.rst: -------------------------------------------------------------------------------- 1 | MomentumOptimizer 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: MomentumOptimizer 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | .. rubric:: Methods 17 | 18 | .. autosummary:: 19 | 20 | ~MomentumOptimizer.__init__ 21 | ~MomentumOptimizer.minimize 22 | ~MomentumOptimizer.update_feed_dict 23 | 24 | .. automethod:: MomentumOptimizer.__init__ 25 | .. automethod:: MomentumOptimizer.minimize 26 | .. automethod:: MomentumOptimizer.update_feed_dict 27 | 28 | 29 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.NLLLoss.rst: -------------------------------------------------------------------------------- 1 | NLLLoss 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: NLLLoss 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~NLLLoss.external_params 17 | 18 | 19 | 20 | ~NLLLoss.name 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | .. rubric:: Methods 29 | 30 | .. autosummary:: 31 | 32 | 33 | ~NLLLoss.__init__ 34 | 35 | 36 | 37 | .. automethod:: NLLLoss.__init__ 38 | 39 | 40 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.NLLMulticlass.rst: -------------------------------------------------------------------------------- 1 | NLLMulticlass 2 | ======================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: NLLMulticlass 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~NLLMulticlass.external_params 17 | 18 | 19 | 20 | ~NLLMulticlass.name 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | .. rubric:: Methods 29 | 30 | .. autosummary:: 31 | 32 | 33 | ~NLLMulticlass.__init__ 34 | 35 | 36 | 37 | .. automethod:: NLLMulticlass.__init__ 38 | 39 | 40 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.PairwiseLoss.rst: -------------------------------------------------------------------------------- 1 | PairwiseLoss 2 | ======================================= 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: PairwiseLoss 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~PairwiseLoss.external_params 17 | 18 | 19 | 20 | ~PairwiseLoss.name 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | .. rubric:: Methods 29 | 30 | .. autosummary:: 31 | 32 | 33 | ~PairwiseLoss.__init__ 34 | 35 | 36 | 37 | .. automethod:: PairwiseLoss.__init__ 38 | 39 | 40 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.SGDOptimizer.rst: -------------------------------------------------------------------------------- 1 | SGDOptimizer 2 | ================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: SGDOptimizer 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | .. rubric:: Methods 17 | 18 | .. autosummary:: 19 | 20 | ~SGDOptimizer.__init__ 21 | ~SGDOptimizer.minimize 22 | ~SGDOptimizer.update_feed_dict 23 | 24 | .. automethod:: SGDOptimizer.__init__ 25 | .. automethod:: SGDOptimizer.minimize 26 | .. automethod:: SGDOptimizer.update_feed_dict 27 | 28 | 29 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.SelfAdversarialLoss.rst: -------------------------------------------------------------------------------- 1 | SelfAdversarialLoss 2 | ============================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features 5 | 6 | .. autoclass:: SelfAdversarialLoss 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~SelfAdversarialLoss.external_params 17 | 18 | 19 | 20 | ~SelfAdversarialLoss.name 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | .. rubric:: Methods 29 | 30 | .. autosummary:: 31 | 32 | 33 | ~SelfAdversarialLoss.__init__ 34 | 35 | 36 | 37 | .. automethod:: SelfAdversarialLoss.__init__ 38 | 39 | 40 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.calibration.CalibrationLayer.rst: -------------------------------------------------------------------------------- 1 | CalibrationLayer 2 | ============================================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.calibration 5 | 6 | .. autoclass:: CalibrationLayer 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | .. rubric:: Methods 51 | 52 | .. autosummary:: 53 | 54 | 55 | ~CalibrationLayer.__init__ 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | ~CalibrationLayer.build 64 | 65 | 66 | ~CalibrationLayer.call 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | ~CalibrationLayer.get_config 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | .. automethod:: CalibrationLayer.__init__ 89 | 90 | 91 | 92 | 93 | 94 | 95 | .. automethod:: CalibrationLayer.build 96 | 97 | .. automethod:: CalibrationLayer.call 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | .. automethod:: CalibrationLayer.get_config 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.corruption_generation.CorruptionGenerationLayerTrain.rst: -------------------------------------------------------------------------------- 1 | CorruptionGenerationLayerTrain 2 | ====================================================================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.corruption_generation 5 | 6 | .. autoclass:: CorruptionGenerationLayerTrain 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | .. rubric:: Methods 51 | 52 | .. autosummary:: 53 | 54 | 55 | ~CorruptionGenerationLayerTrain.__init__ 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | ~CorruptionGenerationLayerTrain.call 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | ~CorruptionGenerationLayerTrain.get_config 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | .. automethod:: CorruptionGenerationLayerTrain.__init__ 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | .. automethod:: CorruptionGenerationLayerTrain.call 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | .. automethod:: CorruptionGenerationLayerTrain.get_config 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.encoding.EmbeddingLookupLayer.rst: -------------------------------------------------------------------------------- 1 | EmbeddingLookupLayer 2 | =============================================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.encoding 5 | 6 | .. autoclass:: EmbeddingLookupLayer 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | ~EmbeddingLookupLayer.max_ent_size 27 | 28 | 29 | ~EmbeddingLookupLayer.max_rel_size 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | .. rubric:: Methods 57 | 58 | .. autosummary:: 59 | 60 | 61 | ~EmbeddingLookupLayer.__init__ 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | ~EmbeddingLookupLayer.build 70 | 71 | 72 | ~EmbeddingLookupLayer.call 73 | 74 | 75 | 76 | ~EmbeddingLookupLayer.compute_output_shape 77 | 78 | 79 | 80 | 81 | 82 | 83 | ~EmbeddingLookupLayer.get_config 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | ~EmbeddingLookupLayer.partition_change_updates 94 | 95 | 96 | ~EmbeddingLookupLayer.set_ent_rel_initial_value 97 | 98 | 99 | ~EmbeddingLookupLayer.set_initializer 100 | 101 | 102 | ~EmbeddingLookupLayer.set_regularizer 103 | 104 | 105 | 106 | 107 | 108 | .. automethod:: EmbeddingLookupLayer.__init__ 109 | 110 | 111 | 112 | 113 | 114 | 115 | .. automethod:: EmbeddingLookupLayer.build 116 | 117 | .. automethod:: EmbeddingLookupLayer.call 118 | 119 | 120 | .. automethod:: EmbeddingLookupLayer.compute_output_shape 121 | 122 | 123 | 124 | 125 | 126 | .. automethod:: EmbeddingLookupLayer.get_config 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | .. automethod:: EmbeddingLookupLayer.partition_change_updates 136 | 137 | .. automethod:: EmbeddingLookupLayer.set_ent_rel_initial_value 138 | 139 | .. automethod:: EmbeddingLookupLayer.set_initializer 140 | 141 | .. automethod:: EmbeddingLookupLayer.set_regularizer 142 | 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.scoring.AbstractScoringLayer.AbstractScoringLayer.rst: -------------------------------------------------------------------------------- 1 | AbstractScoringLayer 2 | =================================================================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.scoring.AbstractScoringLayer 5 | 6 | .. autoclass:: AbstractScoringLayer 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | .. rubric:: Methods 51 | 52 | .. autosummary:: 53 | 54 | 55 | ~AbstractScoringLayer.__init__ 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | ~AbstractScoringLayer.call 65 | 66 | 67 | 68 | ~AbstractScoringLayer.compute_output_shape 69 | 70 | 71 | 72 | 73 | 74 | 75 | ~AbstractScoringLayer.get_config 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | ~AbstractScoringLayer.get_ranks 85 | 86 | 87 | 88 | 89 | 90 | 91 | .. automethod:: AbstractScoringLayer.__init__ 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | .. automethod:: AbstractScoringLayer.call 100 | 101 | 102 | .. automethod:: AbstractScoringLayer.compute_output_shape 103 | 104 | 105 | 106 | 107 | 108 | .. automethod:: AbstractScoringLayer.get_config 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | .. automethod:: AbstractScoringLayer.get_ranks 117 | 118 | 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.scoring.ComplEx.rst: -------------------------------------------------------------------------------- 1 | ComplEx 2 | ================================================= 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.scoring 5 | 6 | .. autoclass:: ComplEx 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~ComplEx.class_params 17 | 18 | 19 | 20 | 21 | 22 | 23 | ~ComplEx.external_params 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | ~ComplEx.name 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | .. rubric:: Methods 59 | 60 | .. autosummary:: 61 | 62 | 63 | ~ComplEx.__init__ 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | ~ComplEx.get_config 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | .. automethod:: ComplEx.__init__ 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | .. automethod:: ComplEx.get_config 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.scoring.DistMult.rst: -------------------------------------------------------------------------------- 1 | DistMult 2 | ================================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.scoring 5 | 6 | .. autoclass:: DistMult 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~DistMult.class_params 17 | 18 | 19 | 20 | 21 | 22 | 23 | ~DistMult.external_params 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | ~DistMult.name 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | .. rubric:: Methods 59 | 60 | .. autosummary:: 61 | 62 | 63 | ~DistMult.__init__ 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | ~DistMult.get_config 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | .. automethod:: DistMult.__init__ 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | .. automethod:: DistMult.get_config 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.scoring.HolE.rst: -------------------------------------------------------------------------------- 1 | HolE 2 | ============================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.scoring 5 | 6 | .. autoclass:: HolE 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~HolE.class_params 17 | 18 | 19 | 20 | 21 | 22 | 23 | ~HolE.external_params 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | ~HolE.name 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | .. rubric:: Methods 59 | 60 | .. autosummary:: 61 | 62 | 63 | ~HolE.__init__ 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | ~HolE.get_config 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | .. automethod:: HolE.__init__ 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | .. automethod:: HolE.get_config 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.layers.scoring.TransE.rst: -------------------------------------------------------------------------------- 1 | TransE 2 | ================================================ 3 | 4 | .. currentmodule:: ampligraph.latent_features.layers.scoring 5 | 6 | .. autoclass:: TransE 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | 16 | ~TransE.class_params 17 | 18 | 19 | 20 | 21 | 22 | 23 | ~TransE.external_params 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | ~TransE.name 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | .. rubric:: Methods 59 | 60 | .. autosummary:: 61 | 62 | 63 | ~TransE.__init__ 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | ~TransE.get_config 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | .. automethod:: TransE.__init__ 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | .. automethod:: TransE.get_config 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.latent_features.loss_functions.Loss.rst: -------------------------------------------------------------------------------- 1 | Loss 2 | ============================================== 3 | 4 | .. currentmodule:: ampligraph.latent_features.loss_functions 5 | 6 | .. autoclass:: Loss 7 | 8 | 9 | 10 | .. rubric:: Attributes 11 | 12 | .. autosummary:: 13 | 14 | 15 | ~Loss.class_params 16 | 17 | 18 | ~Loss.external_params 19 | 20 | 21 | ~Loss.metrics 22 | 23 | 24 | ~Loss.name 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | .. rubric:: Methods 33 | 34 | .. autosummary:: 35 | 36 | 37 | ~Loss.__init__ 38 | 39 | 40 | 41 | .. automethod:: Loss.__init__ 42 | 43 | 44 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.utils.create_tensorboard_visualizations.rst: -------------------------------------------------------------------------------- 1 | create_tensorboard_visualizations 2 | ================================================== 3 | 4 | .. currentmodule:: ampligraph.utils 5 | 6 | .. autofunction:: create_tensorboard_visualizations 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.utils.dataframe_to_triples.rst: -------------------------------------------------------------------------------- 1 | dataframe_to_triples 2 | ===================================== 3 | 4 | .. currentmodule:: ampligraph.utils 5 | 6 | .. autofunction:: dataframe_to_triples 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.utils.preprocess_focusE_weights.rst: -------------------------------------------------------------------------------- 1 | preprocess_focusE_weights 2 | ========================================== 3 | 4 | .. currentmodule:: ampligraph.utils 5 | 6 | .. autofunction:: preprocess_focusE_weights 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.utils.restore_model.rst: -------------------------------------------------------------------------------- 1 | restore_model 2 | ============================== 3 | 4 | .. currentmodule:: ampligraph.utils 5 | 6 | .. autofunction:: restore_model 7 | -------------------------------------------------------------------------------- /docs/generated/ampligraph.utils.save_model.rst: -------------------------------------------------------------------------------- 1 | save_model 2 | =========================== 3 | 4 | .. currentmodule:: ampligraph.utils 5 | 6 | .. autofunction:: save_model 7 | -------------------------------------------------------------------------------- /docs/img/GitHub-Mark-32px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/GitHub-Mark-32px.png -------------------------------------------------------------------------------- /docs/img/ampligraph_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/ampligraph_logo.png -------------------------------------------------------------------------------- /docs/img/ampligraph_logo_200px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/ampligraph_logo_200px.png -------------------------------------------------------------------------------- /docs/img/ampligraph_logo_transparent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/ampligraph_logo_transparent.png -------------------------------------------------------------------------------- /docs/img/ampligraph_logo_transparent_200px.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/ampligraph_logo_transparent_200px.png -------------------------------------------------------------------------------- /docs/img/ampligraph_logo_transparent_300.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/ampligraph_logo_transparent_300.png -------------------------------------------------------------------------------- /docs/img/ampligraph_logo_transparent_white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/ampligraph_logo_transparent_white.png -------------------------------------------------------------------------------- /docs/img/clustering/cluster_continents.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/clustering/cluster_continents.png -------------------------------------------------------------------------------- /docs/img/clustering/cluster_embeddings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/clustering/cluster_embeddings.png -------------------------------------------------------------------------------- /docs/img/clustering/clustered_embeddings_docstring.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/clustering/clustered_embeddings_docstring.png -------------------------------------------------------------------------------- /docs/img/embeddings_projector.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/embeddings_projector.png -------------------------------------------------------------------------------- /docs/img/kg_eg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/kg_eg.png -------------------------------------------------------------------------------- /docs/img/kg_lp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/kg_lp.png -------------------------------------------------------------------------------- /docs/img/kg_lp_step1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/kg_lp_step1.png -------------------------------------------------------------------------------- /docs/img/kg_lp_step2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/kg_lp_step2.png -------------------------------------------------------------------------------- /docs/img/slack_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/img/slack_logo.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | AmpliGraph 2 | ========== 3 | 4 | **Open source Python library that predicts links between concepts in a knowledge graph.** 5 | 6 | 7 | `Go to the GitHub repository `_ |ImageLink|_ 8 | 9 | .. |ImageLink| image:: /img/GitHub-Mark-32px.png 10 | .. _ImageLink: https://github.com/Accenture/AmpliGraph/ 11 | 12 | 13 | `Join the conversation on Slack `_ |ImageLink2|_ 14 | 15 | .. |ImageLink2| image:: /img/slack_logo.png 16 | .. _ImageLink2: https://join.slack.com/t/ampligraph/shared_invite/enQtNTc2NTI0MzUxMTM5LTRkODk0MjI2OWRlZjdjYmExY2Q3M2M3NGY0MGYyMmI4NWYyMWVhYTRjZDhkZjA1YTEyMzBkMGE4N2RmNTRiZDg> 17 | 18 | 19 | AmpliGraph is a suite of neural machine learning models for relational Learning, a branch of machine learning 20 | that deals with supervised learning on knowledge graphs. 21 | 22 | .. image:: img/kg_lp.png 23 | :align: center 24 | 25 | 26 | **Use AmpliGraph if you need to**: 27 | 28 | * Discover new knowledge from an existing knowledge graph. 29 | * Complete large knowledge graphs with missing statements. 30 | * Generate stand-alone knowledge graph embeddings. 31 | * Develop and evaluate a new relational model. 32 | 33 | 34 | 35 | AmpliGraph's machine learning models generate **knowledge graph embeddings**, vector representations of concepts in a metric space: 36 | 37 | .. image:: img/kg_lp_step1.png 38 | 39 | It then combines embeddings with model-specific scoring functions to predict unseen and novel links: 40 | 41 | .. image:: img/kg_lp_step2.png 42 | 43 | 44 | Key Features 45 | ------------ 46 | 47 | * **Intuitive APIs**: AmpliGraph APIs are designed to reduce the code amount required to learn models that predict links 48 | in knowledge graphs. The new version AmpliGraph 2 APIs are in Keras style, making the user experience even smoother. 49 | * **GPU-Ready**: AmpliGraph is built on top of TensorFlow 2, and it is designed to run seamlessly on CPU and GPU devices - to speed-up training. 50 | * **Extensible**: Roll your own knowledge graph embeddings model by extending AmpliGraph base estimators. 51 | 52 | .. The library includes Relational Learning models, i.e. supervised learning models designed to predict 53 | .. links in knowledge graphs. 54 | 55 | .. The tool also includes the required evaluation protocol, metrics, knowledge graph preprocessing, 56 | .. and negative statements generator strategies. 57 | 58 | 59 | Modules 60 | ------- 61 | 62 | AmpliGraph includes the following submodules: 63 | 64 | * **Datasets**: helper functions to load datasets (knowledge graphs). 65 | * **Models**: knowledge graph embedding models. AmpliGraph offers **TransE**, **DistMult**, **ComplEx**, **HolE**, **RotatE** (More to come!) 66 | * **Evaluation**: metrics and evaluation protocols to assess the predictive power of the models. 67 | * **Discovery**: High-level convenience APIs for knowledge discovery (discover new facts, cluster entities, predict near duplicates). 68 | * **Compat**: submodule that extends the compatibility of AmpliGraph APIs to those of AmpliGraph 1.x for the user already familiar with them. 69 | 70 | 71 | How to Cite 72 | ----------- 73 | 74 | If you like AmpliGraph and you use it in your project, why not starring the `project on GitHub `_! 75 | 76 | |GitHub stars| 77 | 78 | .. |GitHub stars| image:: https://img.shields.io/github/stars/Accenture/AmpliGraph.svg?style=social&label=Star&maxAge=3600 79 | :target: https://github.com/Accenture/AmpliGraph/stargazers/ 80 | 81 | If you instead use AmpliGraph in an academic publication, cite as: 82 | 83 | .. code-block:: bibtex 84 | 85 | @misc{ampligraph, 86 | author= {Luca Costabello and 87 | Alberto Bernardi and 88 | Adrianna Janik and 89 | Aldan Creo and 90 | Sumit Pai and 91 | Chan Le Van and 92 | Rory McGrath and 93 | Nicholas McCarthy and 94 | Pedro Tabacof}, 95 | title = {{AmpliGraph: a Library for Representation Learning on Knowledge Graphs}}, 96 | month = mar, 97 | year = 2019, 98 | doi = {10.5281/zenodo.2595043}, 99 | url = {https://doi.org/10.5281/zenodo.2595043 } 100 | } 101 | 102 | .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.2595043.svg 103 | :target: https://doi.org/10.5281/zenodo.2595043 104 | 105 | 106 | .. toctree:: 107 | :maxdepth: 1 108 | :caption: Contents: 109 | 110 | install 111 | background 112 | api 113 | dev 114 | examples 115 | tutorials 116 | experiments 117 | biblio 118 | changelog 119 | contacts 120 | 121 | -------------------------------------------------------------------------------- /docs/install.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## Prerequisites 4 | 5 | * Linux, macOS, Windows 6 | * Python ≥ 3.8 7 | 8 | ## Provision a Virtual Environment 9 | 10 | To provision a virtual environment for installing AmpliGraph, any option can work; here we will give provide the 11 | instruction for using `venv` and `Conda`. 12 | 13 | ### venv 14 | 15 | The first step is to create and activate the virtual environment. 16 | 17 | ``` 18 | python3.8 -m venv PATH/TO/NEW/VIRTUAL_ENVIRONMENT 19 | source PATH/TO/NEW/VIRTUAL_ENVIRONMENT/bin/activate 20 | ``` 21 | 22 | Once this is done, we can proceed with the installation of TensorFlow 2: 23 | 24 | ``` 25 | pip install "tensorflow==2.9.0" 26 | ``` 27 | 28 | If you are installing Tensorflow on MacOS, instead of the following please use: 29 | 30 | ``` 31 | pip install "tensorflow-macos==2.9.0" 32 | ``` 33 | 34 | **IMPORTANT**: the installation of TensorFlow can be tricky on Mac OS with the Apple silicon chip. Though `venv` can 35 | provide a smooth experience, we invite you to refer to the [dedicated section](#install-tensorflow-2-for-mac-os-m1-chip) 36 | down below and consider using `conda` if some issues persist in alignment with the 37 | [Tensorflow Plugin page on Apple developer site](https://developer.apple.com/metal/tensorflow-plugin/). 38 | 39 | 40 | ### Conda 41 | 42 | The first step is to create and activate the virtual environment. 43 | 44 | ``` 45 | conda create --name ampligraph python=3.8 46 | source activate ampligraph 47 | ``` 48 | 49 | Once this is done, we can proceed with the installation of TensorFlow 2, which can be done through `pip` or `conda`. 50 | 51 | ``` 52 | pip install "tensorflow==2.9.0" 53 | 54 | or 55 | 56 | conda install "tensorflow==2.9.0" 57 | ``` 58 | 59 | ### Install TensorFlow 2 for Mac OS M1 chip 60 | 61 | When installing TensorFlow 2 for Mac OS with Apple silicon chip we recommend to use a conda environment. 62 | 63 | ``` 64 | conda create --name ampligraph python=3.8 65 | source activate ampligraph 66 | ``` 67 | 68 | After having created and activated the virtual environment, run the following to install Tensorflow. 69 | 70 | ``` 71 | conda install -c apple tensorflow-deps 72 | pip install --user tensorflow-macos==2.9.0 73 | pip install --user tensorflow-metal==0.6 74 | ``` 75 | 76 | In case of problems with the installation or for further details, refer to 77 | [Tensorflow Plugin page](https://developer.apple.com/metal/tensorflow-plugin/) on the official Apple developer website. 78 | 79 | ## Install AmpliGraph 80 | 81 | Once the installation of Tensorflow is complete, we can proceed with the installation of AmpliGraph. 82 | 83 | To install the latest stable release from pip: 84 | 85 | ``` 86 | pip install ampligraph 87 | ``` 88 | 89 | To sanity check the installation, run the following: 90 | 91 | ```python 92 | >>> import ampligraph 93 | >>> ampligraph.__version__ 94 | '2.1.0' 95 | ``` 96 | 97 | If instead you want the most recent development version, you can clone the repository from 98 | [GitHub](https://github.com/Accenture/AmpliGraph.git), install AmpliGraph from source and checkout the `develop` 99 | branch. In this way, your local working copy will be on the latest commit on the `develop` branch. 100 | 101 | ``` 102 | git clone https://github.com/Accenture/AmpliGraph.git 103 | cd AmpliGraph 104 | git checkout develop 105 | pip install -e . 106 | ``` 107 | Notice that the code snippet above installs the library in editable mode (`-e`). 108 | 109 | To sanity check the installation run the following: 110 | 111 | ```python 112 | >>> import ampligraph 113 | >>> ampligraph.__version__ 114 | '2.1-dev' 115 | ``` 116 | 117 | 118 | ## Support for TensorFlow 1.x 119 | For TensorFlow 1.x-compatible AmpliGraph, use [AmpliGraph 1.x](https://docs.ampligraph.org/en/1.4.0/), whose API are 120 | available cloning the [repository](https://github.com/Accenture/AmpliGraph.git) from GitHub and checking out the 121 | *ampligraph1/develop* branch. However, notice that the support for this version has been discontinued. 122 | 123 | Finally, if you want to use AmpliGraph 1.x APIs on top of Tensorflow 2, refer to the backward compatibility APIs 124 | provided on Ampligraph [compat](https://docs.ampligraph.org/en/2.0.0/ampligraph.latent_features.html#module-ampligraph.compat) 125 | module. -------------------------------------------------------------------------------- /docs/requirements_readthedocs.txt: -------------------------------------------------------------------------------- 1 | sphinx==5.0.2 2 | sphinxcontrib-bibtex 3 | tensorflow==2.9.0 -------------------------------------------------------------------------------- /docs/tutorials.md: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | For a comprehensive theoretical and hands-on overview of KGE models and hands-on AmpliGraph, check out our tutorials: 5 | [COLING-22 KGE4NLP Tutorial (Slides + Colab Notebook)](https://kge4nlp-coling22.github.io/) and [ECAI-20 Tutorial (Slides + Recording + Colab Notebook)](https://kge-tutorial-ecai2020.github.io/). 6 | 7 | The following Jupyter notebooks will guide you through the most important features of AmpliGraph: 8 | 9 | + [AmpliGraph basics](tutorials/AmpliGraphBasicsTutorial.md): training, saving and restoring a model, evaluating a model, 10 | discover new links, visualize embeddings. 11 | [[Jupyter notebook](https://github.com/Accenture/AmpliGraph/blob/master/docs/tutorials/AmpliGraphBasicsTutorial.ipynb)] 12 | [[Colab notebook](https://colab.research.google.com/drive/1rylqOnm992AdP9z1aW8metlKpPuBTRGD)] 13 | + [Link-based clustering and classification](tutorials/ClusteringAndClassificationWithEmbeddings.md): how to use the 14 | knowledge embeddings generated by a graph of international football matches in clustering and classification tasks. 15 | [[Jupyter notebook](https://github.com/Accenture/AmpliGraph/blob/master/docs/tutorials/ClusteringAndClassificationWithEmbeddings.ipynb)] 16 | [[Colab notebook](https://colab.research.google.com/drive/1QUphvcFvNsWyRZM_J5ahsLhEHJY4SjyS)] 17 | 18 | Additional examples and code snippets are [available here](examples.md). 19 | 20 | 21 | If you reuse materials presented in the tutorials, cite as: 22 | 23 | 24 | ``` 25 | @misc{kge4nlp_tutorial_coling22, 26 | title = {Knowledge Graph Embeddings for NLP: From Theory to Practice}, 27 | url = {https://kge4nlp-coling22.github.io/}, 28 | author= {Luca Costabello and 29 | Adrianna Janik and 30 | Eda Bayram and 31 | Sumit Pai}, 32 | date = {2022-16-10}, 33 | note = {COLING 2022 Tutorials} 34 | } 35 | ``` 36 | 37 | 38 | ``` 39 | @misc{kge_tutorial_ecai20, 40 | title = {Knowledge Graph Embeddings Tutorial: From Theory to Practice}, 41 | url = {http://kge-tutorial-ecai-2020.github.io/}, 42 | author= {Luca Costabello and 43 | Sumit Pai and 44 | Adrianna Janik and 45 | Nick McCarthy}, 46 | shorttitle = {Knowledge Graph Embeddings Tutorial}, 47 | date = {2020-09-04}, 48 | note = {ECAI 2020 Tutorials} 49 | } 50 | ``` 51 | -------------------------------------------------------------------------------- /docs/tutorials/img/FootballGraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/tutorials/img/FootballGraph.png -------------------------------------------------------------------------------- /docs/tutorials/img/GoT_tensoboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/tutorials/img/GoT_tensoboard.png -------------------------------------------------------------------------------- /docs/tutorials/img/got-graphql-schema.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/tutorials/img/got-graphql-schema.jpg -------------------------------------------------------------------------------- /docs/tutorials/img/output_53_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/tutorials/img/output_53_0.png -------------------------------------------------------------------------------- /docs/tutorials/img/output_55_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/docs/tutorials/img/output_55_0.png -------------------------------------------------------------------------------- /notebooks/AmpliGraph-Tutorials/Discovery 1 - Discover Facts.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "f920e9ce", 6 | "metadata": {}, 7 | "source": [ 8 | "# Discover Facts\n", 9 | "The following example shows how to predict new concept that are likely to be true via the __ampligraph.discovery.discover_facts__ API." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 21, 15 | "id": "f7d979c1", 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "import sys\n", 20 | "sys.path.append('../..')\n", 21 | "import os\n", 22 | "os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n", 23 | "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'\n", 24 | "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n", 25 | "import tensorflow as tf\n", 26 | "tf.get_logger().setLevel('ERROR')" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 25, 32 | "id": "dafa4c11", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "import ampligraph\n", 37 | "# Benchmark datasets are under ampligraph.datasets module\n", 38 | "from ampligraph.datasets import load_fb15k_237\n", 39 | "# load fb15k-237 dataset\n", 40 | "dataset = load_fb15k_237()" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 26, 46 | "id": "87ed1243", 47 | "metadata": {}, 48 | "outputs": [ 49 | { 50 | "data": { 51 | "text/plain": [ 52 | "" 53 | ] 54 | }, 55 | "execution_count": 26, 56 | "metadata": {}, 57 | "output_type": "execute_result" 58 | } 59 | ], 60 | "source": [ 61 | "from ampligraph.latent_features import ScoringBasedEmbeddingModel\n", 62 | "\n", 63 | "# Create, compile and fit the model\n", 64 | "model = ScoringBasedEmbeddingModel(eta=1, \n", 65 | " k=100,\n", 66 | " scoring_type='ComplEx')\n", 67 | "\n", 68 | "\n", 69 | "\n", 70 | "model.compile(optimizer='adam', \n", 71 | " loss='multiclass_nll')\n", 72 | "\n", 73 | "model.fit(dataset['train'],\n", 74 | " batch_size=10000,\n", 75 | " epochs=50,\n", 76 | " verbose=False)" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": 27, 82 | "id": "a3cc5988", 83 | "metadata": {}, 84 | "outputs": [ 85 | { 86 | "data": { 87 | "text/plain": [ 88 | "(array([['/m/0342h', '/location/country/form_of_government', '/m/09nqf'],\n", 89 | " ['/m/0cnk2q', '/location/country/form_of_government', '/m/01q03']],\n", 90 | " dtype=object),\n", 91 | " array([ 5.5, 54.5]))" 92 | ] 93 | }, 94 | "execution_count": 27, 95 | "metadata": {}, 96 | "output_type": "execute_result" 97 | } 98 | ], 99 | "source": [ 100 | "from ampligraph.discovery import discover_facts\n", 101 | "\n", 102 | "discover_facts(dataset['train'][:100], \n", 103 | " model, \n", 104 | " top_n=100, \n", 105 | " strategy='entity_frequency', \n", 106 | " max_candidates=100,\n", 107 | " target_rel='/location/country/form_of_government', \n", 108 | " seed=0)\n" 109 | ] 110 | } 111 | ], 112 | "metadata": { 113 | "kernelspec": { 114 | "display_name": "Python 3.10.6 ('base')", 115 | "language": "python", 116 | "name": "python3" 117 | }, 118 | "language_info": { 119 | "codemirror_mode": { 120 | "name": "ipython", 121 | "version": 3 122 | }, 123 | "file_extension": ".py", 124 | "mimetype": "text/x-python", 125 | "name": "python", 126 | "nbconvert_exporter": "python", 127 | "pygments_lexer": "ipython3", 128 | "version": "3.10.6" 129 | }, 130 | "vscode": { 131 | "interpreter": { 132 | "hash": "2e69f3670cdad0193847aaa0b77be56c05c951fcbdd384ff882dde0464f4de76" 133 | } 134 | } 135 | }, 136 | "nbformat": 4, 137 | "nbformat_minor": 5 138 | } 139 | -------------------------------------------------------------------------------- /notebooks/AmpliGraph-Tutorials/Discovery 4 - Query TopN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "04975d41", 6 | "metadata": {}, 7 | "source": [ 8 | "# Query Top-N\n", 9 | "The following example show a way to extract with the ampligraph.discovery.query_topn API the top N candidates to be true positives." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "id": "f7d979c1", 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "import sys\n", 20 | "import os\n", 21 | "os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n", 22 | "os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'\n", 23 | "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n", 24 | "import tensorflow as tf\n", 25 | "tf.get_logger().setLevel('ERROR')" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 2, 31 | "id": "dafa4c11", 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "import requests\n", 36 | "from ampligraph.datasets import load_from_csv\n", 37 | "# Game of Thrones relations dataset\n", 38 | "url = 'https://ampligraph.s3-eu-west-1.amazonaws.com/datasets/GoT.csv'\n", 39 | "open('GoT.csv', 'wb').write(requests.get(url).content)\n", 40 | "X = load_from_csv('.', 'GoT.csv', sep=',')" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 3, 46 | "id": "87ed1243", 47 | "metadata": {}, 48 | "outputs": [ 49 | { 50 | "name": "stdout", 51 | "output_type": "stream", 52 | "text": [ 53 | "Metal device set to: Apple M1 Pro\n", 54 | "\n", 55 | "systemMemory: 32.00 GB\n", 56 | "maxCacheSize: 10.67 GB\n", 57 | "\n" 58 | ] 59 | }, 60 | { 61 | "data": { 62 | "text/plain": [ 63 | "" 64 | ] 65 | }, 66 | "execution_count": 3, 67 | "metadata": {}, 68 | "output_type": "execute_result" 69 | } 70 | ], 71 | "source": [ 72 | "from ampligraph.latent_features import ScoringBasedEmbeddingModel\n", 73 | "\n", 74 | "# Create, compile and fit the model\n", 75 | "model = ScoringBasedEmbeddingModel(eta=5, \n", 76 | " k=150,\n", 77 | " scoring_type='DistMult')\n", 78 | "\n", 79 | "\n", 80 | "\n", 81 | "model.compile(optimizer='adam', loss='pairwise')\n", 82 | "model.fit(X,\n", 83 | " batch_size=100,\n", 84 | " epochs=20, \n", 85 | " verbose=False)" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": 4, 91 | "id": "f531fdd4", 92 | "metadata": {}, 93 | "outputs": [ 94 | { 95 | "data": { 96 | "text/plain": [ 97 | "(array([['Eddard Stark', 'ALLIED_WITH', 'House Stark of Winterfell'],\n", 98 | " ['Eddard Stark', 'ALLIED_WITH', 'House Westerling of the Crag'],\n", 99 | " ['Eddard Stark', 'ALLIED_WITH', \"House Baratheon of Storm's End\"],\n", 100 | " ['Eddard Stark', 'ALLIED_WITH', 'House Frey of the Crossing'],\n", 101 | " ['Eddard Stark', 'ALLIED_WITH', 'House Karstark of Karhold']],\n", 102 | " dtype='=1.14.3', 23 | 'pytest>=3.5.1', 24 | 'scikit-learn>=0.19.1', 25 | 'tqdm>=4.23.4', 26 | 'pandas>=0.23.1', 27 | 'sphinx==5.0.2', 28 | 'myst-parser==0.18.0', 29 | 'docutils<0.18', 30 | 'sphinx_rtd_theme==1.0.0', 31 | 'sphinxcontrib-bibtex==2.4.2', 32 | 'beautifultable>=0.7.0', 33 | 'pyyaml>=3.13', 34 | 'rdflib>=4.2.2', 35 | 'scipy==1.10.0', 36 | 'networkx>=2.3', 37 | 'flake8>=3.7.7', 38 | 'setuptools>=36', 39 | 'matplotlib>=3.7', 40 | 'docopt==0.6.2', 41 | 'schema==0.7.5' 42 | ], 43 | setup_requires=['pytest-runner'], 44 | tests_require=['pytest', 45 | 'pytest_mock>=3.10.0', 46 | 'mock>=5.0.1']) 47 | 48 | if __name__ == '__main__': 49 | setup(**setup_params) 50 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | -------------------------------------------------------------------------------- /tests/ampligraph/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | -------------------------------------------------------------------------------- /tests/ampligraph/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | -------------------------------------------------------------------------------- /tests/ampligraph/datasets/test_data_indexer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from ampligraph.datasets.data_indexer import DataIndexer 9 | import numpy as np 10 | import pytest 11 | 12 | 13 | def data_generator(data): 14 | for elem in data: 15 | yield np.array(elem).reshape((1, 3)) 16 | 17 | np_array = np.array([['a', 'b', 'c'], ['c', 'b', 'd'], ['d', 'e', 'f'], ['f', 'e', 'c'], ['a', 'e', 'd']]) 18 | generator = lambda: data_generator(np_array) 19 | 20 | 21 | @pytest.fixture(params=[np_array, pytest.param(generator, marks=pytest.mark.skip("Can't use generators as parameters in fixtures."))]) 22 | def data_type(request): 23 | '''Returns an in-memory DataIndexer instance with example data.''' 24 | return request.param 25 | 26 | 27 | @pytest.fixture(params=['in_memory', 'sqlite', 'shelves']) 28 | def data_indexer(request, data_type): 29 | '''Returns an in-memory DataIndexer instance with example data.''' 30 | data_indexer = DataIndexer(data_type, backend=request.param) 31 | yield data_indexer 32 | data_indexer.clean() 33 | 34 | def test_get_max_ents_index(data_indexer): 35 | max_ents = data_indexer.backend._get_max_ents_index() 36 | assert max_ents == 3, "Max index should be 3 for 4 unique entities, instead got {}.".format(max_ents) 37 | 38 | 39 | def test_get_max_rels_index(data_indexer): 40 | print(data_indexer) 41 | max_rels = data_indexer.backend._get_max_rels_index() 42 | assert max_rels == 1, "Max index should be 1 for 2 unique relations, instead got {}.".format(max_rels) 43 | 44 | 45 | def test_get_entities_in_batches(data_indexer): 46 | for batch in data_indexer.get_entities_in_batches(batch_size=3): 47 | assert len(batch) == 3 48 | break 49 | for batch in data_indexer.get_entities_in_batches(): 50 | assert len(batch) == data_indexer.get_entities_count() 51 | 52 | 53 | def test_get_indexes(data_indexer): 54 | tmp = np.array([['a', 'b', 'c']]) 55 | indexes = data_indexer.get_indexes(tmp) 56 | assert np.shape(indexes) == np.shape(tmp), "returned indexes are not the same shape" 57 | assert np.issubdtype(indexes.dtype, np.integer), "indexes are not integers" 58 | 59 | @pytest.mark.skip(reason="update not implemented for sqlite backend") 60 | def test_update_mappings(data_indexer): 61 | new_data = np.array([['g', 'i', 'h'], ['g', 'i', 'a']]) 62 | data_indexer.update_mappings(new_data) 63 | assert data_indexer.backend.ents_length == 6, "entities size should be 6, two new added" 64 | assert data_indexer.backend.rels_length == 3, "relations size should be 3, one new added" 65 | 66 | 67 | def test_get_starting_index_ents(data_indexer): 68 | ind = data_indexer.backend._get_starting_index_ents() 69 | assert ind == data_indexer.backend.ents_length, "index doesn't match entities length" 70 | 71 | 72 | def test_get_starting_index_rels(data_indexer): 73 | ind = data_indexer.backend._get_starting_index_rels() 74 | assert ind == data_indexer.backend.rels_length, "index doesn't match relations length" 75 | -------------------------------------------------------------------------------- /tests/ampligraph/datasets/test_source_identifier.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | import numpy as np 9 | import pytest 10 | import pandas as pd 11 | from ampligraph.datasets import load_csv, chunks 12 | from ampligraph.datasets.source_identifier import DataSourceIdentifier 13 | import os 14 | 15 | SCOPE = "function" 16 | 17 | def create_data(): 18 | np_array = np.array([['a','b','c'],['c','b','d'],['d','e','f'],['f','e','c'],['a','e','d'], ['a','b','d']]) 19 | df = pd.DataFrame(np_array) 20 | df.to_csv('test.csv', index=False, header=False, sep='\t') 21 | df.to_csv('test.txt', index=False, header=False, sep='\t') 22 | df.to_csv('test.gz', index=False, header=False, sep='\t', compression='gzip') 23 | return np_array, len(df) 24 | 25 | def clean_data(): 26 | os.remove('test.csv') 27 | os.remove('test.txt') 28 | os.remove('test.gz') 29 | 30 | @pytest.fixture(params=['np_array', 'test.csv', 'test.gz', 'test.txt'], scope=SCOPE) 31 | def data_source(request): 32 | np_array, _ = create_data() 33 | return request.param if request.param != 'np_array' else np_array 34 | clean_data() 35 | 36 | @pytest.fixture(scope=SCOPE) 37 | def source_identifier(request, data_source): 38 | '''Returns a SourceIdentifier instance.''' 39 | src_identifier = DataSourceIdentifier(data_source) 40 | yield src_identifier, data_source 41 | 42 | 43 | def test_load_csv(): 44 | _, length = create_data() 45 | data = load_csv('test.csv') 46 | assert len(data) == length, "Loaded data differ from what it should be, got {}, expected {}.".format(len(data), len(df)) 47 | clean_data() 48 | 49 | def test_data_source_identifier(source_identifier): 50 | src_identifier, data_src = source_identifier 51 | if isinstance(data_src, str): 52 | src = data_src.split('.')[-1] 53 | elif isinstance(data_src, np.ndarray): 54 | src = "iter" 55 | else: 56 | assert False, "Provided data source is not supported." 57 | assert src == src_identifier.get_src(), "Source identified not equal to the one provided." 58 | 59 | 60 | def test_data_source_identifier_fetch_loader(source_identifier): 61 | src_identifier, data_src = source_identifier 62 | loader = src_identifier.fetch_loader() 63 | data = loader(data_src) 64 | assert isinstance(data, np.ndarray) or isinstance(data, pd.DataFrame) or isinstance(data, type(chunks([]))), "Returned data should be either in numpy array or pandas data frame, instead got {}".format(type(data)) 65 | 66 | -------------------------------------------------------------------------------- /tests/ampligraph/datasets/test_sqlite_adapter.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from ampligraph.datasets.sqlite_adapter import SQLiteAdapter 9 | from ampligraph.datasets.source_identifier import DataSourceIdentifier 10 | import pytest 11 | import numpy as np 12 | import pandas as pd 13 | 14 | SCOPE = "function" 15 | np_array = np.array([['a','b','c'],['c','b','d'],['d','e','f'],['f','e','c'],['a','e','d'], ['a','b','d']]) 16 | indexed_arr = np.array([[0,0,1],[1,0,2],[2,1,3],[3,1,1],[0,1,2], [0,0,2]]) 17 | df = pd.DataFrame(np_array) 18 | df.to_csv('test.csv', index=False, header=False, sep='\t') 19 | 20 | 21 | @pytest.fixture(scope=SCOPE) 22 | def sqlite_adapter(request): 23 | '''Returns a SQLiteAdapter instance.''' 24 | src = DataSourceIdentifier('test.csv') 25 | backend = SQLiteAdapter('database.db', identifier=src) 26 | backend._load('test.csv') 27 | yield backend 28 | backend._clean() 29 | 30 | 31 | def test_sqlite_adapter_get_batch_generator(sqlite_adapter): 32 | batch_gen = sqlite_adapter._get_batch_generator() 33 | batch = next(batch_gen) 34 | assert np.shape(batch) == (1,3), "batch size is wrong, got {}".format(batch) 35 | 36 | 37 | def test_sqlite_adapter_get_data_size(sqlite_adapter): 38 | size = sqlite_adapter.get_data_size() 39 | assert size == 6, "Size is not equal to 6" 40 | -------------------------------------------------------------------------------- /tests/ampligraph/datasets/test_triples.nt: -------------------------------------------------------------------------------- 1 | . 2 | . 3 | . 4 | -------------------------------------------------------------------------------- /tests/ampligraph/discovery/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # -------------------------------------------------------------------------------- /tests/ampligraph/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/convkb.tmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Accenture/AmpliGraph/7d8567118a9a569730aee93ce3500dcbd19bff23/tests/ampligraph/latent_features/convkb.tmp -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/calibrate/test_calibrate.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.calibration import CalibrationLayer 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | 16 | def test_init(): 17 | calib_layer = CalibrationLayer(pos_size=5, positive_base_rate=0.5) 18 | 19 | calib_layer.build((10,10)) 20 | assert calib_layer.pos_size == calib_layer.neg_size, \ 21 | 'CalibrationLayer: pos_size and neg_size must be same if calibrating with corruptions' 22 | assert calib_layer.calib_w.numpy() == 0, 'CalibrationLayer: w not initialized correctly' 23 | assert calib_layer.calib_b.numpy() == 0, 'CalibrationLayer: b not initialized correctly' 24 | 25 | calib_layer = CalibrationLayer(pos_size=5, neg_size=5, calib_w=10, calib_b=10) 26 | 27 | calib_layer.build((10,10)) 28 | assert calib_layer.calib_w.numpy() == 10, 'CalibrationLayer (passed w): w not initialized correctly' 29 | assert calib_layer.calib_b.numpy() == 10, 'CalibrationLayer (passed b): b not initialized correctly' 30 | assert calib_layer.positive_base_rate == 0.5, 'Incorrect positive base rate' 31 | 32 | with pytest.raises(ValueError, match="Positive_base_rate must be a value between 0 and 1."): 33 | calib_layer = CalibrationLayer(pos_size=5, positive_base_rate=1.1) 34 | 35 | with pytest.raises(AssertionError, match="Positive size must be > 0."): 36 | calib_layer = CalibrationLayer(pos_size=0) 37 | 38 | 39 | def test_call(): 40 | calib_layer = CalibrationLayer(pos_size=5, neg_size=5, calib_w=10, calib_b=10) 41 | 42 | calib_layer.build((10,10)) 43 | 44 | out = calib_layer.call(scores_pos=tf.constant([-2,1,-1], dtype=tf.float32), 45 | scores_neg=tf.constant([10,11,12], dtype=tf.float32), training=0) 46 | assert (np.around(out.numpy(), 2) == np.array([1, 0, 0.5], dtype=np.float32)).all(), \ 47 | 'CalibrationLayer: calibration scores don\'t match' 48 | 49 | 50 | out1 = calib_layer.call(scores_pos=tf.constant([-2,1,-1], dtype=tf.float32), 51 | scores_neg=tf.constant([10,11,12], dtype=tf.float32), training=1) 52 | assert np.around(out1.numpy(), 2) == np.array([11.78], dtype=np.float32), \ 53 | 'CalibrationLayer: calibration scores don\'t match' 54 | 55 | calib_layer2 = CalibrationLayer(pos_size=5, positive_base_rate=0.5, calib_w=10, calib_b=10) 56 | calib_layer2.build((10,10)) 57 | out2 = calib_layer2.call(scores_pos=tf.constant([-2,1,-1], dtype=tf.float32), 58 | scores_neg=tf.constant([10,11,12], dtype=tf.float32), training=1) 59 | 60 | assert np.around(out1.numpy(), 2) == np.around(out2.numpy(), 2), \ 61 | 'CalibrationLayer: calibration scores don\'t match' 62 | -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/corruption_generation/test_CorruptionGenerationLayerTrain.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.corruption_generation import CorruptionGenerationLayerTrain 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | 16 | def test_call(): 17 | tf.random.set_seed(0) 18 | train_layer = CorruptionGenerationLayerTrain() 19 | out = train_layer.call(tf.constant([[1,0,5], [3, 0, 7], [1, 1, 9]]), 1000, 2) 20 | assert (out.numpy() == np.array([[760, 0, 5], [861, 0, 7], [1, 1, 39], [567, 0, 5], [3, 0, 147], [28, 1, 9]])).all(), \ 21 | "CorruptionGenerationLayerTrain: Corruptions not generated correctly" -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/scoring/test_AbstractScoringLayer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.scoring import DistMult 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | def test_compute_score(): 16 | model = DistMult(k=3) 17 | 18 | triples = [np.array([[1, 1, 1], [2, 2, 2]]).astype(np.float32), 19 | np.array([[10, 10, 10], [100, 100, 100]]).astype(np.float32), 20 | np.array([[3, 3, 3], [4, 4, 4]]).astype(np.float32)] 21 | 22 | mapping_dict = tf.lookup.experimental.DenseHashTable(tf.int32, tf.int32, -1, -1, -2) 23 | 24 | ent_matrix = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]).astype(np.float32) 25 | scores = np.around(model._compute_scores(triples).numpy(), 2) 26 | 27 | sub_corr_score = model._get_subject_corruption_scores(triples, ent_matrix) 28 | 29 | obj_corr_score = model._get_object_corruption_scores(triples, ent_matrix) 30 | 31 | ranks = model.get_ranks(triples, ent_matrix, 0, 4, tf.ragged.constant([], dtype=tf.int32), mapping_dict) 32 | assert (ranks.numpy() == np.array([[4, 3], [2, 1]], dtype=np.int32)).all(), 'Unfiltered Ranks not correct' 33 | 34 | ranks = model.get_ranks(triples, ent_matrix, 0, 4, tf.ragged.constant([[[0], [1]], [[2], [3]]], dtype=tf.int32), 35 | mapping_dict) 36 | 37 | assert (ranks.numpy() == np.array([[3, 2], [1, 0]], dtype=np.int32)).all(), '(s,o) Filtered Ranks not correct' 38 | 39 | ranks = model.get_ranks(triples, ent_matrix, 0, 4, tf.ragged.constant([[[0], [1]], [[2], [3]]], dtype=tf.int32), 40 | mapping_dict, corrupt_side='s') 41 | assert (ranks.numpy() == np.array([[3, 2]], dtype=np.int32)).all(), '(s) Filtered Ranks not correct' 42 | 43 | ranks = model.get_ranks(triples, ent_matrix, 0, 4, tf.ragged.constant([[[2], [3]]], dtype=tf.int32), 44 | mapping_dict, corrupt_side='o') 45 | assert (ranks.numpy() == np.array([[1, 0]], dtype=np.int32)).all(), '(o) Filtered Ranks not correct' 46 | 47 | ranks = model.get_ranks(triples, ent_matrix, 0, 4, tf.ragged.constant([], dtype=tf.int32), 48 | mapping_dict, corrupt_side='s') 49 | 50 | assert (ranks.numpy() == np.array([[4, 3]], dtype=np.int32)).all(), '(s) Unfiltered Ranks not correct' 51 | 52 | ranks = model.get_ranks(triples, ent_matrix, 0, 4, tf.ragged.constant([], dtype=tf.int32), 53 | mapping_dict, corrupt_side='o') 54 | assert (ranks.numpy() == np.array([[2, 1]], dtype=np.int32)).all(), '(o) Unfiltered Ranks not correct' -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/scoring/test_ComplEx.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.scoring import ComplEx 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | def test_compute_score(): 16 | model = ComplEx(k=3) 17 | 18 | triples = [np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 19 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 20 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32)] 21 | 22 | scores = np.around(model._compute_scores(triples).numpy(), 2) 23 | assert (scores == np.array([222., 117273.], 24 | dtype=np.float32)).all(), 'ComplEx: Scores don\'t match!' 25 | 26 | 27 | def test_get_subject_corruption_scores(): 28 | model = ComplEx(k=3) 29 | 30 | ent_matrix = np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32) 31 | triples = [np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 32 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 33 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32)] 34 | scores = np.around(model._get_subject_corruption_scores(triples, ent_matrix).numpy(), 2) 35 | assert (np.diag(scores) == np.array([222., 117273.], 36 | dtype=np.float32)).all(), 'ComplEx: Scores don\'t match!' 37 | 38 | def test_get_object_corruption_scores(): 39 | model = ComplEx(k=3) 40 | 41 | ent_matrix = np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32) 42 | triples = [np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 43 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 44 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32)] 45 | scores = np.around(model._get_object_corruption_scores(triples, ent_matrix).numpy(), 2) 46 | assert (np.diag(scores) == np.array([222., 117273.], 47 | dtype=np.float32)).all(), 'ComplEx: Scores don\'t match!' -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/scoring/test_DistMult.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.scoring import DistMult 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | def test_compute_score(): 16 | model = DistMult(k=7) 17 | 18 | triples = [np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32), 19 | np.array([[5, 5, 5, 5, 5, 5, 5], [100, 100, 100, 100, 100, 100, 100]]).astype(np.float32), 20 | np.array([[4, 4, 4, 4, 4, 4, 4], [9, 9, 9, 9, 9, 9, 9]]).astype(np.float32)] 21 | 22 | scores = np.around(model._compute_scores(triples).numpy(), 2) 23 | assert (scores == np.array([140, 63000], 24 | dtype=np.float32)).all(), 'DistMult: Scores don\'t match!' 25 | 26 | 27 | def test_get_subject_corruption_scores(): 28 | model = DistMult(k=7) 29 | 30 | ent_matrix = np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32) 31 | triples = [np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32), 32 | np.array([[5, 5, 5, 5, 5, 5, 5], [100, 100, 100, 100, 100, 100, 100]]).astype(np.float32), 33 | np.array([[4, 4, 4, 4, 4, 4, 4], [9, 9, 9, 9, 9, 9, 9]]).astype(np.float32)] 34 | scores = np.around(model._get_subject_corruption_scores(triples, ent_matrix).numpy(), 2) 35 | assert (np.diag(scores) == np.array([140, 63000], 36 | dtype=np.float32)).all(), 'DistMult: Scores don\'t match!' 37 | 38 | def test_get_object_corruption_scores(): 39 | model = DistMult(k=7) 40 | 41 | ent_matrix = np.array([[4, 4, 4, 4, 4, 4, 4], [9, 9, 9, 9, 9, 9, 9]]).astype(np.float32) 42 | triples = [np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32), 43 | np.array([[5, 5, 5, 5, 5, 5, 5], [100, 100, 100, 100, 100, 100, 100]]).astype(np.float32), 44 | np.array([[4, 4, 4, 4, 4, 4, 4], [9, 9, 9, 9, 9, 9, 9]]).astype(np.float32)] 45 | scores = np.around(model._get_object_corruption_scores(triples, ent_matrix).numpy(), 2) 46 | assert (np.diag(scores) == np.array([140, 63000], 47 | dtype=np.float32)).all(), 'DistMult: Scores don\'t match!' -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/scoring/test_HolE.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.scoring import HolE 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | def test_compute_score(): 16 | model = HolE(k=3) 17 | 18 | triples = [np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 19 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 20 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32)] 21 | 22 | scores = np.around(model._compute_scores(triples).numpy(), 2) 23 | assert (scores == 2 * np.array([222., 117273.], dtype=np.float32) / 3.0).all(),\ 24 | 'HolE: Scores don\'t match!' 25 | 26 | 27 | def test_get_subject_corruption_scores(): 28 | model = HolE(k=3) 29 | 30 | ent_matrix = np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32) 31 | triples = [np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 32 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 33 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32)] 34 | scores = np.around(model._get_subject_corruption_scores(triples, ent_matrix).numpy(), 2) 35 | assert (np.diag(scores) == 2 * np.array([222., 117273.], dtype=np.float32) / 3.0).all(),\ 36 | 'HolE: Scores don\'t match!' 37 | 38 | def test_get_object_corruption_scores(): 39 | model = HolE(k=3) 40 | 41 | ent_matrix = np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32) 42 | triples = [np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 43 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 44 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32)] 45 | scores = np.around(model._get_object_corruption_scores(triples, ent_matrix).numpy(), 2) 46 | assert (np.diag(scores) == 2 * np.array([222., 117273.], dtype=np.float32) / 3.0).all(),\ 47 | 'HolE: Scores don\'t match!' -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/scoring/test_RotatE.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.scoring import RotatE 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | 16 | def test_compute_score(): 17 | model = RotatE(k=3, max_rel_size=2) 18 | 19 | triples = [ 20 | np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 21 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 22 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32) 23 | ] 24 | 25 | scores = np.around(model._compute_scores(triples).numpy(), 2) 26 | assert (scores == np.array([-28.03, -94.19], dtype=np.float32)).all(),\ 27 | 'RotatE: Scores don\'t match!' 28 | 29 | 30 | def test_get_subject_corruption_scores(): 31 | model = RotatE(k=3, max_rel_size=2) 32 | 33 | ent_matrix = np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32) 34 | triples = [ 35 | np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 36 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 37 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32) 38 | ] 39 | 40 | scores = np.around(model._get_subject_corruption_scores(triples, ent_matrix).numpy(), 2) 41 | assert (np.diag(scores) == np.array([-28.03, -94.19], dtype=np.float32)).all(),\ 42 | 'RotatE: Scores don\'t match!' 43 | 44 | 45 | def test_get_object_corruption_scores(): 46 | model = RotatE(k=3, max_rel_size=2) 47 | 48 | ent_matrix = np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32) 49 | triples = [ 50 | np.array([[1, 1, 1, 2, 2, 2], [10, 10, 10, 11, 11, 11]]).astype(np.float32), 51 | np.array([[5, 5, 5, 3, 3, 3], [100, 100, 100, 101, 101, 101]]).astype(np.float32), 52 | np.array([[4, 4, 4, 6, 6, 6], [9, 9, 9, 19, 19, 19]]).astype(np.float32) 53 | ] 54 | 55 | scores = np.around(model._get_object_corruption_scores(triples, ent_matrix).numpy(), 2) 56 | assert (np.diag(scores) == np.array([-28.03, -94.19], dtype=np.float32)).all(),\ 57 | 'RotatE: Scores don\'t match!' -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/scoring/test_TransE.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features.layers.scoring import TransE 10 | 11 | import pytest 12 | import tensorflow as tf 13 | import numpy as np 14 | 15 | def test_compute_score(): 16 | model = TransE(k=7) 17 | 18 | triples = [np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32), 19 | np.array([[13, 13, 13, 13, 13, 13, 13], [100, 100, 100, 100, 100, 100, 100]]).astype(np.float32), 20 | np.array([[4, 4, 4, 4, 4, 4, 9], [90, 90, 90, 90, 90, 90, 90]]).astype(np.float32)] 21 | 22 | scores = np.around(model._compute_scores(triples).numpy(), 2) 23 | assert (scores == np.array([-65., -140.], 24 | dtype=np.float32)).all(), 'TransE: Scores don\'t match!' 25 | 26 | 27 | def test_get_subject_corruption_scores(): 28 | model = TransE(k=7) 29 | 30 | ent_matrix = np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32) 31 | triples = [np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32), 32 | np.array([[13, 13, 13, 13, 13, 13, 13], [100, 100, 100, 100, 100, 100, 100]]).astype(np.float32), 33 | np.array([[4, 4, 4, 4, 4, 4, 9], [90, 90, 90, 90, 90, 90, 90]]).astype(np.float32)] 34 | scores = np.around(model._get_subject_corruption_scores(triples, ent_matrix).numpy(), 2) 35 | assert (np.diag(scores) == np.array([-65., -140.], 36 | dtype=np.float32)).all(), 'TransE: Scores don\'t match!' 37 | 38 | def test_get_object_corruption_scores(): 39 | model = TransE(k=7) 40 | 41 | ent_matrix = np.array([[4, 4, 4, 4, 4, 4, 9], [90, 90, 90, 90, 90, 90, 90]]).astype(np.float32) 42 | triples = [np.array([[1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10]]).astype(np.float32), 43 | np.array([[13, 13, 13, 13, 13, 13, 13], [100, 100, 100, 100, 100, 100, 100]]).astype(np.float32), 44 | np.array([[4, 4, 4, 4, 4, 4, 9], [90, 90, 90, 90, 90, 90, 90]]).astype(np.float32)] 45 | scores = np.around(model._get_object_corruption_scores(triples, ent_matrix).numpy(), 2) 46 | assert (np.diag(scores) == np.array([-65., -140.], 47 | dtype=np.float32)).all(), 'TransE: Scores don\'t match!' -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/layers/test_predictions.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | import tensorflow as tf 10 | import numpy as np 11 | from ampligraph.datasets import load_fb15k_237 12 | from ampligraph.latent_features import ScoringBasedEmbeddingModel 13 | from ampligraph.latent_features import PairwiseLoss 14 | from ampligraph.utils import save_model, restore_model 15 | 16 | 17 | def test_reproducible_predictions(): 18 | X = np.array([['a', 'x', 'b'], 19 | ['a', 'y', 'c'], 20 | ['a', 'y', 'd'], 21 | ['a', 'x', 'e'], 22 | ['e', 'x', 'b'], 23 | ['b', 'y', 'd'], 24 | ['c', 'x', 'e']]) 25 | 26 | model = ScoringBasedEmbeddingModel(k=50, 27 | eta=1, 28 | scoring_type='TransE', 29 | seed=0) 30 | 31 | adam = tf.keras.optimizers.Adam(learning_rate=0.001) 32 | 33 | loss = PairwiseLoss(loss_params={'margin': 0.5}) 34 | 35 | model.compile(optimizer=adam, 36 | loss=loss, 37 | entity_relation_initializer='glorot_uniform', 38 | entity_relation_regularizer='L2') 39 | 40 | model.fit(X, 41 | batch_size=10, 42 | epochs=5, 43 | verbose=True) 44 | 45 | assert (model.predict(X[:2, ]) == np.array([-2.2272136, -2.429057], dtype=np.float32).all(), 46 | 'Prediction scores have changed.') 47 | 48 | 49 | def test_reproducible_predictions_fb15k237(): 50 | X = load_fb15k_237() 51 | 52 | model = ScoringBasedEmbeddingModel(k=50, eta=1, scoring_type='TransE', seed=0) 53 | 54 | adam = tf.keras.optimizers.Adam(learning_rate=0.001) 55 | loss = PairwiseLoss(loss_params={'margin': 0.5}) 56 | 57 | model.compile(optimizer=adam, 58 | loss=loss, 59 | entity_relation_initializer='glorot_uniform', 60 | entity_relation_regularizer='L2') 61 | 62 | model.fit(X['train'], 63 | batch_size=30000, 64 | epochs=5, 65 | verbose=True) 66 | 67 | print(model.predict(X['test'][:2, ])) 68 | 69 | assert (model.predict(X['test'][:2, ]) == np.array([-0.3415385, -0.4203454], dtype=np.float32).all(), 70 | 'Prediction scores have changed.') 71 | 72 | 73 | def test_reproducible_predictions_restored_model(): 74 | 75 | X = np.array([['a', 'x', 'b'], 76 | ['a', 'y', 'c'], 77 | ['a', 'y', 'd'], 78 | ['a', 'x', 'e'], 79 | ['e', 'x', 'b'], 80 | ['b', 'y', 'd'], 81 | ['c', 'x', 'e']]) 82 | 83 | model = ScoringBasedEmbeddingModel(k=50, 84 | eta=1, 85 | scoring_type='TransE', 86 | seed=0) 87 | adam = tf.keras.optimizers.Adam(learning_rate=0.001) 88 | loss = PairwiseLoss(loss_params={'margin': 0.5}) 89 | model.compile(optimizer=adam, 90 | loss=loss, 91 | entity_relation_initializer='glorot_uniform', 92 | entity_relation_regularizer='L2') 93 | model.fit(X, 94 | batch_size=10, 95 | epochs=5, 96 | verbose=True) 97 | 98 | save_model(model, 'test_reproducible_predictions_restored_model') 99 | 100 | model_restored = restore_model(model_name_path='test_reproducible_predictions_restored_model') 101 | 102 | assert (model_restored.predict(X[:2, ]) == np.array([-2.2272136, -2.429057], dtype=np.float32).all(), 103 | 'Prediction scores from restored model do not match the originals') -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/test_initializers.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.datasets import load_fb15k_237 10 | from ampligraph.latent_features import ScoringBasedEmbeddingModel 11 | import numpy as np 12 | import tensorflow as tf 13 | 14 | 15 | def test_initializers(): 16 | dataset = load_fb15k_237() 17 | 18 | model = ScoringBasedEmbeddingModel(eta=2, 19 | k=10, 20 | scoring_type='TransE') 21 | 22 | unique_ent_len = len(set(dataset['train'][:10, 0]).union(set(dataset['train'][:10, 2]))) 23 | init_ent = tf.constant_initializer( 24 | value=np.ones(shape=(unique_ent_len, 10), dtype=np.float32) 25 | ) 26 | 27 | unique_rel_len = len(set(dataset['train'][:10, 1])) 28 | init_rel = tf.constant_initializer( 29 | value=np.ones(shape=(unique_rel_len, 10), dtype=np.float32) 30 | ) 31 | 32 | model.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-10), loss='nll', entity_relation_initializer=[init_ent, init_rel]) 33 | model.fit(dataset['train'][:10], batch_size=10, epochs=1) 34 | 35 | assert np.all(model.encoding_layer.ent_emb.numpy().round() == np.float32(1)), 'Entity Initializer not working!' 36 | 37 | assert np.all(model.encoding_layer.rel_emb.numpy().round() == np.float32(1)), 'Relation Initializer not working!' 38 | 39 | 40 | model = ScoringBasedEmbeddingModel(eta=2, 41 | k=10, 42 | scoring_type='TransE') 43 | 44 | init = tf.random_normal_initializer(mean=0.0, stddev=0.001, seed=117) 45 | model.compile(optimizer=tf.optimizers.Adam(learning_rate=1e-10), loss='nll', entity_relation_initializer=init) 46 | model.fit(dataset['train'][:10], batch_size=10, epochs=1) 47 | 48 | assert np.round(np.mean(model.encoding_layer.ent_emb.numpy()), 3) == np.float32(0), 'Entity Initializer not working! Mean should be 0' 49 | assert np.round(np.std(model.encoding_layer.ent_emb.numpy()), 3) == np.float32(0.001), 'Entity Initializer not working! Std should be 0.001' 50 | -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/test_optimizers.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.latent_features import optimizers 10 | import pytest 11 | import tensorflow as tf 12 | import numpy as np 13 | from tensorflow.keras.optimizers import Adagrad, Adam 14 | 15 | 16 | def test_optimizer_adam(): 17 | ''' test by passing a string''' 18 | adam = optimizers.get('Adam') 19 | adam.set_partitioned_training() 20 | ent = tf.Variable(np.array([[1, 2], [3, 4]], dtype=np.float32), trainable=True) 21 | rel = tf.Variable(np.array([[1, 2], [3, 4]], dtype=np.float32), trainable=True) 22 | 23 | with tf.GradientTape() as tape: 24 | loss = tf.reduce_sum(ent * rel) 25 | 26 | adam.minimize(loss, ent, rel, tape) 27 | curr_weights = adam.get_weights() 28 | 29 | # step + 2 hyperparams * 2 trainable vars 30 | assert len(curr_weights) == (1 + adam.get_hyperparam_count() * adam.num_optimized_vars), \ 31 | 'Adam: Lengths dont match!' 32 | 33 | assert adam.get_iterations() == 1, 'Adam: Iteration count doesnt match!' 34 | 35 | adam.set_weights(curr_weights) 36 | new_weights = adam.get_weights() 37 | 38 | # test whether all params are same 39 | out = [np.all(i==j) for i, j in zip(curr_weights, new_weights)] 40 | assert np.all(out), 'Adam: Weights are not the same!' 41 | 42 | 43 | def test_optimizer_adagrad(): 44 | ''' test the wrapping functionality around keras optimizer''' 45 | adagrad = optimizers.get(Adagrad(learning_rate = 0.0001)) 46 | adagrad.set_partitioned_training() 47 | ent = tf.Variable(np.array([[1, 2], [3, 4]], dtype=np.float32), trainable=True) 48 | rel = tf.Variable(np.array([[1, 2], [3, 4]], dtype=np.float32), trainable=True) 49 | 50 | with tf.GradientTape() as tape: 51 | loss = tf.reduce_sum(ent * rel) 52 | 53 | adagrad.minimize(loss, ent, rel, tape) 54 | curr_weights = adagrad.get_weights() 55 | # step + 2 hyperparams * 2 trainable vars 56 | assert len(curr_weights) == (1 + adagrad.get_hyperparam_count() * adagrad.num_optimized_vars), \ 57 | 'Adagrad: Lengths dont match!' 58 | 59 | adagrad.set_weights(curr_weights) 60 | new_weights = adagrad.get_weights() 61 | 62 | # test whether all params are same 63 | out = [np.all(i==j) for i, j in zip(curr_weights, new_weights)] 64 | assert np.all(out), 'Adagrad: Weights are not the same!' 65 | 66 | 67 | def test_entity_relation_hyperparameters(): 68 | '''test the getters and setters of entity relation hyperparams''' 69 | adam = optimizers.get(Adam(learning_rate = 0.0001)) 70 | ent = tf.Variable(np.array([[1, 2], [3, 4]], dtype=np.float32), trainable=True) 71 | rel = tf.Variable(np.array([[1, 2], [3, 4]], dtype=np.float32), trainable=True) 72 | 73 | with tf.GradientTape() as tape: 74 | loss = tf.reduce_sum(ent * rel) 75 | 76 | adam.minimize(loss, ent, rel, tape) 77 | 78 | curr_weights = adam.get_weights() 79 | ent_hyp, rel_hyp = adam.get_entity_relation_hyperparams() 80 | 81 | assert (curr_weights[1] == ent_hyp[0]).all() and (curr_weights[3] == ent_hyp[1]).all(), \ 82 | 'ent weights are not correct!' 83 | assert (curr_weights[2] == rel_hyp[0]).all() and (curr_weights[4] == rel_hyp[1]).all(), \ 84 | 'rel weights are not correct!' 85 | -------------------------------------------------------------------------------- /tests/ampligraph/latent_features/test_regularizer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | from ampligraph.datasets import load_fb15k_237 10 | from ampligraph.latent_features import ScoringBasedEmbeddingModel 11 | import numpy as np 12 | import tensorflow as tf 13 | 14 | 15 | def test_regularizer(): 16 | dataset = load_fb15k_237() 17 | 18 | model = ScoringBasedEmbeddingModel(eta=2, 19 | k=10, 20 | scoring_type='TransE') 21 | 22 | unique_ent_len = len(set(dataset['train'][:10, 0]).union(set(dataset['train'][:10, 2]))) 23 | init_ent = tf.constant_initializer( 24 | value=np.ones(shape=(unique_ent_len, 10), dtype=np.float32) 25 | ) 26 | 27 | unique_rel_len = len(set(dataset['train'][:10, 1])) 28 | init_rel = tf.constant_initializer( 29 | value=np.ones(shape=(unique_rel_len, 10), dtype=np.float32) 30 | ) 31 | 32 | model.compile(optimizer=tf.optimizers.SGD(learning_rate=1e-10), loss='nll', 33 | entity_relation_initializer=[init_ent, init_rel], 34 | entity_relation_regularizer=tf.keras.regularizers.L2(1e10)) 35 | model.fit(dataset['train'][:10], batch_size=10, epochs=1) 36 | 37 | assert np.all(model.encoding_layer.ent_emb.numpy().round() != np.float32(1)), 'Entity Regularizer not working!' 38 | assert np.all(model.encoding_layer.rel_emb.numpy().round() != np.float32(1)), 'Relation Regularizer not working!' 39 | -------------------------------------------------------------------------------- /tests/ampligraph/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | 9 | -------------------------------------------------------------------------------- /tests/ampligraph/utils/test_profiling.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019-2023 The AmpliGraph Authors. All Rights Reserved. 2 | # 3 | # This file is Licensed under the Apache License, Version 2.0. 4 | # A copy of the Licence is available in LICENCE, or at: 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | from ampligraph.utils.profiling import get_memory_size, get_human_readable_size, timing_and_memory 9 | import time 10 | import pytest 11 | import numpy as np 12 | 13 | @pytest.mark.skip(reason="may not be a reliable way for measuring memory used...") 14 | def test_get_memory_size(): 15 | pre_size = get_memory_size() 16 | post_size = None 17 | # create table of a certain size and make sure difference in 18 | # occupied memory is visable with get_memory_size function 19 | size = int(0.1 * 1024 * 1024 * 1024) # 0.1 of 1 Gb 20 | tab = b'0' * size 21 | post_size = get_memory_size() 22 | 23 | assert(post_size - pre_size >= size) 24 | 25 | 26 | def test_get_human_readable_size(): 27 | size_in_gb = 1.210720 28 | result1, result2 = get_human_readable_size(1300000000) 29 | assert(np.round(result1,5) == size_in_gb and result2 == "GB") 30 | 31 | 32 | def test_timing_and_memory_logging(): 33 | @timing_and_memory 34 | def mock_fcn(**kwargs): 35 | time.sleep(1.0) 36 | test_logs = {"MOCK_FCN": None} 37 | logs = {} 38 | mock_fcn(log=logs) 39 | assert(logs.keys() == test_logs.keys()) 40 | 41 | --------------------------------------------------------------------------------