├── .github └── workflows │ └── CI_build.yml ├── CITATION.cff ├── LICENSE ├── README.md ├── __version__.py ├── custom_functions ├── library_search.py ├── networking.py ├── plotting_functions.py ├── pubchem_lookup.py └── similarity_matrix.py ├── notebooks ├── iomega-1-mgf_to_cleaned_data.ipynb ├── iomega-10-spectra-networking.ipynb ├── iomega-2-split_data_into_subsets.ipynb ├── iomega-3-classical-spectra-similarities.ipynb ├── iomega-4-fingerprint-based-similarities.ipynb ├── iomega-5-train-spec2vec-model.ipynb ├── iomega-5b-compare_model_iterations.ipynb ├── iomega-5b-train-spec2vec-model-all-gnps.ipynb ├── iomega-6-compute-spec2vec-similarities.ipynb ├── iomega-7-compare-the-different-scores-alternatives.ipynb ├── iomega-7-compare-the-different-scores.ipynb ├── iomega-7-compare_different_classical_score_parameters.ipynb ├── iomega-8-library-matching-pre_revision.ipynb ├── iomega-8-library-matching-scenario1.ipynb ├── iomega-8-library-matching-scenario2.ipynb ├── iomega-8-library-matching-scenario3-known-query-compounds_ge5.ipynb ├── iomega-9-unknown-compound-search-mod-cos.ipynb ├── iomega-9-unknown-compound-search-only-annotated-spectra.ipynb ├── iomega-9-unknown-compound-search.ipynb ├── iomega-extra-classical-spectra-similarities-performance-analysis-synthetic-data.ipynb ├── iomega-extra-evaluate-retraining-effect-5000subset.ipynb ├── iomega-extra-get-spec2vec-embeddings.ipynb ├── iomega-in-depths-spectrum-comparions.ipynb └── iomega-inspect_model_variability.ipynb ├── setup.py └── tests └── test_library_matching.py /.github/workflows/CI_build.yml: -------------------------------------------------------------------------------- 1 | name: CI Build 2 | 3 | on: 4 | push: 5 | pull_request: 6 | types: [opened, reopened] 7 | 8 | jobs: 9 | 10 | basic_checks: 11 | name: Basic code checks / python-${{ matrix.python-version }} / ${{ matrix.os }} 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | os: ['ubuntu-latest', 'macos-latest', 'windows-latest'] 17 | python-version: ['3.7', '3.8'] 18 | steps: 19 | - uses: actions/checkout@v2 20 | - name: Set up Python ${{ matrix.python-version }} 21 | uses: actions/setup-python@v1 22 | with: 23 | python-version: ${{ matrix.python-version }} 24 | - name: Python info 25 | run: | 26 | which python 27 | python --version 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip 31 | pip install -e .[dev] 32 | - name: Show pip list 33 | run: | 34 | pip list 35 | - name: Run unit tests 36 | run: | 37 | pytest 38 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # YAML 1.2 2 | --- 3 | abstract: "Python library for fuzzy comparison of mass spectrum data and other Python objects." 4 | authors: 5 | - 6 | affiliation: "Netherlands eScience Center" 7 | family-names: Huber 8 | given-names: Florian 9 | orcid: "https://orcid.org/0000-0002-3535-9406" 10 | - 11 | affiliation: "Netherlands eScience Center" 12 | family-names: Ridder 13 | given-names: Lars 14 | orcid: "https://orcid.org/0000-0002-7635-9533" 15 | - 16 | affiliation: "University of Glasgow" 17 | family-names: Rogers 18 | given-names: Simon 19 | orcid: "https://orcid.org/0000-0003-3578-4477" 20 | - 21 | affiliation: "Wageningen University and Research" 22 | family-names: Hooft 23 | name-particle: van der 24 | given-names: Justin J. J. 25 | orcid: "https://orcid.org/0000-0002-9340-5511" 26 | 27 | cff-version: "1.1.0" 28 | license: "Apache-2.0" 29 | message: "If you use this software, please cite it using these metadata." 30 | repository-code: "https://github.com/matchms/matchms" 31 | title: Spec2Vec 32 | ... 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "{}" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | 204 | 205 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![GitHub](https://img.shields.io/github/license/iomega/spec2vec_gnps_data_analysis) ![GitHub Workflow Status](https://img.shields.io/github/workflow/status/iomega/spec2vec_gnps_data_analysis/CI%20Build) 2 | 3 | # spec2vec_gnps_data_analysis 4 | Analysis and benchmarking of mass spectra similarity measures using gnps data set. 5 | 6 | If you use **spec2vec** for your research, please cite the following references: 7 | 8 | F Huber, L Ridder, S Verhoeven, JH Spaaks, F Diblen, S Rogers, JJJ van der Hooft, "Spec2Vec: Improved mass spectral similarity scoring through learning of structural relationships", bioRxiv, https://doi.org/10.1101/2020.08.11.245928 9 | 10 | (and if you use **matchms** as well: 11 | F. Huber, S. Verhoeven, C. Meijer, H. Spreeuw, E. M. Villanueva Castilla, C. Geng, J.J.J. van der Hooft, S. Rogers, A. Belloum, F. Diblen, J.H. Spaaks, (2020). matchms - processing and similarity evaluation of mass spectrometry data. Journal of Open Source Software, 5(52), 2411, https://doi.org/10.21105/joss.02411 ) 12 | 13 | Thanks! 14 | 15 | ## Tutorial on matchms and Spec2Vec 16 | Possibly the easiest way to learn how to run Spec2Vec is to follow our tutorial on `matchms` and `Spec2Vec`. 17 | 18 | + [Part I - Import and process MS/MS data using matchms](https://blog.esciencecenter.nl/build-your-own-mass-spectrometry-analysis-pipeline-in-python-using-matchms-part-i-d96c718c68ee) 19 | + [Part II - Compute spectral similarity using Spec2Vec](https://blog.esciencecenter.nl/build-a-mass-spectrometry-analysis-pipeline-in-python-using-matchms-part-ii-spec2vec-8aa639571018) 20 | + [Part III - Create molecular networks from Spec2Vec similarities](https://blog.esciencecenter.nl/build-a-mass-spectrometry-analysis-pipeline-in-python-using-matchms-part-iii-molecular-91891248ee34) 21 | 22 | 23 | ## Create environment 24 | Current spec2vec works with Python 3.7 or 3.8, it might also work with earlier versions but we haven't tested. 25 | ``` 26 | conda create --name spec2vec_analysis python=3.7 # or 3.8 if you prefer 27 | conda activate spec2vec_analysis 28 | conda install --channel nlesc --channel bioconda --channel conda-forge spec2vec 29 | pip install jupyter 30 | ``` 31 | 32 | ## Clone this repository and run notebooks 33 | ``` 34 | git clone https://github.com/iomega/spec2vec_gnps_data_analysis 35 | cd spec2vec_gnps_data_analysis 36 | jupyter notebook 37 | ``` 38 | 39 | ## Download data 40 | - Original data was obtained from GNPS: https://gnps-external.ucsd.edu/gnpslibrary/ALL_GNPS.json 41 | - Cleaned and processed GNPS dataset for positive mode spectra (raw data accessed on 2020-05-11), can be found on zenodo: https://zenodo.org/record/3978072 42 | 43 | ## Download pre-trained models 44 | Pretrained Word2Vec models to be used with Spec2Vec can be found on zenodo. 45 | - Model trained on __UniqueInchikey__ subset (12,797 spectra): https://zenodo.org/record/3978054 46 | - Model trained on __AllPositive__ set of all positive ionization mode spectra (after filtering): https://zenodo.org/record/4173596 47 | -------------------------------------------------------------------------------- /__version__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.1.0' 2 | -------------------------------------------------------------------------------- /custom_functions/library_search.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | import numpy as np 3 | import pandas as pd 4 | from tqdm import tqdm 5 | from gensim.models.basemodel import BaseTopicModel 6 | from matchms.similarity import CosineGreedy, ModifiedCosine, PrecursorMzMatch 7 | from spec2vec import SpectrumDocument 8 | from spec2vec import Spec2Vec 9 | 10 | 11 | def library_matching(documents_query: List[SpectrumDocument], 12 | documents_library: List[SpectrumDocument], 13 | model: BaseTopicModel, 14 | presearch_based_on: List[str] = ["precursor_mz", "spec2vec-top10"], 15 | ignore_non_annotated: bool = True, 16 | include_scores=["spec2vec", "cosine", "modcosine"], 17 | intensity_weighting_power: float = 0.5, 18 | allowed_missing_percentage: float = 0, 19 | cosine_tol: float = 0.005, 20 | min_matches: int = 6, 21 | mass_tolerance: float = 2.0, 22 | mass_tolerance_type: str = "ppm"): 23 | """Selecting potential spectra matches with spectra library. 24 | 25 | Suitable candidates will be selected by 1) top_n Spec2Vec similarity, and 2) 26 | same precursor mass (within given mz_ppm tolerance(s)). 27 | For later matching routines, additional scores (cosine, modified cosine) 28 | are added as well. 29 | 30 | Args: 31 | -------- 32 | documents_query: 33 | List containing all spectrum documents that should be queried against the library. 34 | documents_library: 35 | List containing all library spectrum documents. 36 | model: 37 | Pretrained word2Vec model. 38 | presearch_based_on: 39 | List with strings to specify which measures to use for the presearch. 40 | This can include 'precursor_mz', 'spec2vec-topX', 41 | ignore_non_annotated: bool, optional 42 | If True, only annotated spectra will be considered for matching. 43 | Default = True. 44 | cosine_tol: float, optional 45 | Set tolerance for the cosine and modified cosine score. Default = 0.005 46 | mass_tolerance 47 | Specify tolerance for a mass match. 48 | mass_toleramce_type 49 | Chose between "ppm" (relative) and "Dalton" (absolute) tolerance type. 50 | """ 51 | 52 | # Initializations 53 | found_matches = [] 54 | m_mass_matches = None 55 | m_spec2vec_similarities = None 56 | m_modcos_similarities = None 57 | 58 | def get_metadata(documents): 59 | metadata = [] 60 | for doc in documents: 61 | metadata.append(doc._obj.get("smiles")) 62 | return metadata 63 | 64 | library_spectra_metadata = get_metadata(documents_library) 65 | if ignore_non_annotated: 66 | # Get array of all ids for spectra with smiles 67 | library_ids = np.asarray([i for i, x in enumerate(library_spectra_metadata) if x]) 68 | else: 69 | library_ids = np.arange(len(documents_library)) 70 | 71 | allowed_presearch_type = ["precursor_mz", "spec2vec-top", "modcos-top"] 72 | msg = "Presearch must include one of: " + ", ".join(allowed_presearch_type) 73 | assert np.any([(x in y) for x in allowed_presearch_type for y in presearch_based_on]), msg 74 | 75 | # 1. Search for top-n Spec2Vec matches ------------------------------------ 76 | if np.any(["spec2vec" in x for x in presearch_based_on]): 77 | top_n = int([x.split("top")[1] for x in presearch_based_on if "spec2vec" in x][0]) 78 | print(f"Pre-selection includes spec2vec top {top_n}.") 79 | spec2vec = Spec2Vec(model=model, intensity_weighting_power=intensity_weighting_power, 80 | allowed_missing_percentage=allowed_missing_percentage, 81 | progress_bar=True) 82 | m_spec2vec_similarities = spec2vec.matrix([documents_library[i] for i in library_ids], 83 | documents_query) 84 | 85 | # Select top_n similarity values: 86 | selection_spec2vec = np.argpartition(m_spec2vec_similarities, -top_n, axis=0)[-top_n:, :] 87 | else: 88 | selection_spec2vec = np.empty((0, len(documents_query)), dtype="int") 89 | 90 | # 2. Search for precursor_mz based matches --------------------------------- 91 | if "precursor_mz" in presearch_based_on: 92 | print(f"Pre-selection includes mass matches within {mass_tolerance} {mass_tolerance_type}.") 93 | mass_matching = PrecursorMzMatch(tolerance=mass_tolerance, 94 | tolerance_type=mass_tolerance_type) 95 | m_mass_matches = mass_matching.matrix([documents_library[i]._obj for i in library_ids], 96 | [x._obj for x in documents_query]) 97 | selection_massmatch = [] 98 | for i in range(len(documents_query)): 99 | selection_massmatch.append(np.where(m_mass_matches[:, i] == 1)[0]) 100 | else: 101 | selection_massmatch = np.empty((len(documents_query), 0), dtype="int") 102 | 103 | # 3. Search for top-n modified cosine matches ------------------------------------ 104 | if np.any(["modcos" in x for x in presearch_based_on]): 105 | top_n = int([x.split("top")[1] for x in presearch_based_on if "modcos" in x][0]) 106 | print(f"Pre-selection includes modified cosine top {top_n}.") 107 | modcos = ModifiedCosine(tolerance=cosine_tol) 108 | 109 | n_rows = len(library_ids) 110 | n_cols = len(documents_query) 111 | m_modcos_similarities = np.zeros([n_rows, n_cols], dtype=np.float64) 112 | m_modcos_matches = np.zeros([n_rows, n_cols], dtype=np.float64) 113 | for i_ref, reference in enumerate(tqdm([documents_library[i]._obj for i in library_ids])): 114 | for i_query, query in enumerate([x._obj for x in documents_query]): 115 | score = modcos.pair(reference, query) 116 | m_modcos_similarities[i_ref][i_query] = score[0] 117 | m_modcos_matches[i_ref][i_query] = score[1] 118 | 119 | # Select top_n similarity values: 120 | m_modcos_selected = m_modcos_similarities.copy() 121 | m_modcos_selected[m_modcos_matches < min_matches] = 0 122 | selection_modcos = np.argpartition(m_modcos_selected, -top_n, axis=0)[-top_n:, :] 123 | else: 124 | selection_modcos = np.empty((0, len(documents_query)), dtype="int") 125 | 126 | # 4. Combine found matches ------------------------------------------------ 127 | if "cosine" in include_scores: 128 | print("Calculate cosine score for selected candidates.") 129 | if "modcosine" in include_scores: 130 | print("Calculate modified cosine score for selected candidates.") 131 | 132 | for i in tqdm(range(len(documents_query))): 133 | s2v_top_ids = selection_spec2vec[:, i] 134 | mass_match_ids = selection_massmatch[i] 135 | modcos_ids = selection_modcos[:, i] 136 | 137 | all_match_ids = np.unique(np.concatenate((s2v_top_ids, mass_match_ids, modcos_ids))) 138 | 139 | if len(all_match_ids) > 0: 140 | if "cosine" in include_scores: 141 | # Get cosine score for found matches 142 | cosine_similarity = CosineGreedy(tolerance=cosine_tol) 143 | cosine_scores = [] 144 | for match_id in library_ids[all_match_ids]: 145 | cosine_scores.append(cosine_similarity.pair(documents_library[match_id]._obj, 146 | documents_query[i]._obj)) 147 | else: 148 | cosine_scores = len(all_match_ids) * ["not calculated"] 149 | 150 | if m_modcos_similarities is not None: 151 | mod_cosine_scores0 = [x for x in m_modcos_similarities[all_match_ids, i]] 152 | mod_cosine_scores1 = [x for x in m_modcos_matches[all_match_ids, i]] 153 | mod_cosine_scores = list(zip(mod_cosine_scores0, mod_cosine_scores1)) 154 | elif "modcosine" in include_scores: 155 | # Get modified cosine score for found matches 156 | mod_cosine_similarity = ModifiedCosine(tolerance=cosine_tol) 157 | mod_cosine_scores = [] 158 | for match_id in library_ids[all_match_ids]: 159 | mod_cosine_scores.append(mod_cosine_similarity.pair(documents_library[match_id]._obj, 160 | documents_query[i]._obj)) 161 | else: 162 | mod_cosine_scores = len(all_match_ids) * ["not calculated"] 163 | 164 | matches_df = pd.DataFrame({"cosine_score": [x["score"] for x in cosine_scores], 165 | "cosine_matches": [x["matches"] for x in cosine_scores], 166 | "mod_cosine_score": [x["score"] for x in mod_cosine_scores], 167 | "mod_cosine_matches": [x["matches"] for x in mod_cosine_scores]}, 168 | index=library_ids[all_match_ids]) 169 | 170 | if m_mass_matches is not None: 171 | matches_df["mass_match"] = m_mass_matches[all_match_ids, i] 172 | 173 | if m_spec2vec_similarities is not None: 174 | matches_df["s2v_score"] = m_spec2vec_similarities[all_match_ids, i] 175 | elif "spec2vec" in include_scores: 176 | spec2vec_similarity = Spec2Vec(model=model, 177 | intensity_weighting_power=intensity_weighting_power, 178 | allowed_missing_percentage=allowed_missing_percentage) 179 | spec2vec_scores = [] 180 | for match_id in library_ids[all_match_ids]: 181 | spec2vec_scores.append(spec2vec_similarity.pair(documents_library[match_id], 182 | documents_query[i])) 183 | matches_df["s2v_score"] = spec2vec_scores 184 | found_matches.append(matches_df.fillna(0)) 185 | else: 186 | found_matches.append([]) 187 | 188 | return found_matches 189 | -------------------------------------------------------------------------------- /custom_functions/networking.py: -------------------------------------------------------------------------------- 1 | # Import libraries 2 | import numpy as np 3 | import networkx as nx 4 | from community import community_louvain 5 | from networkx.algorithms.connectivity import minimum_st_edge_cut # , minimum_st_node_cut 6 | from networkx.algorithms.flow import shortest_augmenting_path 7 | import pandas as pd 8 | from matplotlib import pyplot as plt 9 | import matplotlib 10 | 11 | # ---------------------------------------------------------------------------- 12 | # ---------------- Graph / networking related functions ---------------------- 13 | # ---------------------------------------------------------------------------- 14 | 15 | 16 | def create_network(similars_idx, 17 | similars, 18 | max_links=10, 19 | cutoff=0.7, 20 | link_method='single'): 21 | """ 22 | Function to create network from given top-n similarity values. 23 | 24 | Args: 25 | -------- 26 | similars_idx: numpy array 27 | Array with indices of top-n most similar nodes. 28 | similars: numpy array 29 | Array with similarity values of top-n most similar nodes. 30 | max_links: int 31 | Maximum number of links to add per node. Default = 10. 32 | Due to incoming links, total number of links per node can be higher. 33 | cutoff: float 34 | Threshold for given similarities. Edges/Links will only be made for 35 | similarities > cutoff. Default = 0.7. 36 | link_method: str 37 | Chose between 'single' and 'mutual'. 'single will add all links based 38 | on individual nodes. 'mutual' will only add links if that link appears 39 | in the given top-n list for both nodes. 40 | """ 41 | 42 | dimension = similars_idx.shape[0] 43 | 44 | # Initialize network graph, add nodes 45 | msnet = nx.Graph() 46 | msnet.add_nodes_from(np.arange(0, dimension)) 47 | 48 | # Add edges based on global threshold for weights 49 | for i in range(0, dimension): 50 | idx = np.where(similars[i, :] > cutoff)[0][:max_links] 51 | if link_method == "single": 52 | new_edges = [(i, int(similars_idx[i, x]), float(similars[i, x])) 53 | for x in idx if similars_idx[i, x] != i] 54 | elif link_method == "mutual": 55 | new_edges = [(i, int(similars_idx[i, x]), float(similars[i, x])) 56 | for x in idx 57 | if similars_idx[i, x] != i and i in similars_idx[x, :] 58 | ] 59 | else: 60 | print("Link method not kown") 61 | msnet.add_weighted_edges_from(new_edges) 62 | 63 | return msnet 64 | 65 | 66 | def sample_cuts(graph, max_steps=1000, max_cuts=1): 67 | """ Function to help find critical links in the given graph. 68 | Critical links here are links which -once removed- would disconnect considerable 69 | parts of the network. Those links are searched for by counting minimum cuts between 70 | a large number of node pairs (up to max_steps pairs will be explored). 71 | If more pairs exist than max_steps allows to explore, pick max_steps random pairs. 72 | 73 | Args: 74 | ------- 75 | graph: networkx graph 76 | Graph of individual cluster (created using networkx). 77 | max_steps 78 | Up to max_steps pairs will be explored to search for cuts. Default = 1000. 79 | max_cuts 80 | Maximum numbers of links allowed to be cut. Default = 1. 81 | """ 82 | 83 | num_nodes = graph.number_of_nodes() 84 | # num_edges = graph.number_of_edges() 85 | 86 | # Make list of all pairs within graph 87 | nodes = np.array(graph.nodes) 88 | pairs = np.array(np.meshgrid(nodes, nodes)).T 89 | remove_diagonal = np.array([(i * num_nodes + i) for i in range(num_nodes)]) 90 | pairs = np.delete(pairs.reshape(-1, 2), remove_diagonal, axis=0) 91 | 92 | sampled_cuts = [] 93 | if pairs.shape[0] <= max_steps: 94 | max_steps = pairs.shape[0] 95 | else: 96 | # If more pairs exist than max_steps allows to explore, pick max_steps random pairs. 97 | choices = np.random.choice(np.arange(pairs.shape[0]), 98 | max_steps, 99 | replace=False) 100 | pairs = pairs[choices, :] 101 | 102 | for pair in pairs: 103 | cuts = minimum_st_edge_cut(graph, 104 | pair[0], 105 | pair[1], 106 | flow_func=shortest_augmenting_path) 107 | # nx.node_connectivity(graphs[4], 592, 376) 108 | # cuts = nx.minimum_st_edge_cut(graph, pair[0], pair[1]) 109 | # cuts = nx.minimum_edge_cut(graph, pair[0], pair[1])#, flow_func=shortest_augmenting_path) 110 | if len(cuts) <= max_cuts: 111 | sampled_cuts.append(cuts) 112 | 113 | return sampled_cuts 114 | 115 | 116 | def weak_link_finder(graph, max_steps=1000, max_cuts=1): 117 | """ Function to detect critical links in the given graph. 118 | Critical links here are links which -once removed- would disconnect considerable 119 | parts of the network. Those links are searched for by counting minimum cuts between 120 | a large number of node pairs (up to max_steps pairs will be explored). 121 | If more pairs exist than max_steps allows to explore, pick max_steps random pairs. 122 | 123 | Args: 124 | ------- 125 | graph: networkx graph 126 | Graph of individual cluster (created using networkx). 127 | max_steps 128 | Up to max_steps pairs will be explored to search for cuts. Default = 1000. 129 | max_cuts 130 | Maximum numbers of links allowed to be cut. Default = 1. 131 | """ 132 | 133 | sampled_cuts = sample_cuts(graph, max_steps=max_steps, max_cuts=max_cuts) 134 | 135 | sampled_cuts_len = [len(x) for x in sampled_cuts] 136 | proposed_cuts = [] 137 | for min_cuts in list(set(sampled_cuts_len)): 138 | sampled_cuts_select = [ 139 | list(x)[:min_cuts] for x in sampled_cuts if len(x) == min_cuts 140 | ] 141 | 142 | sampled_cuts_select = np.array(sampled_cuts_select) 143 | # Sort array 144 | if min_cuts > 1: 145 | sampled_cuts_select = np.sort(np.sort(sampled_cuts_select, axis=2), 146 | axis=1) 147 | else: 148 | sampled_cuts_select = np.sort(sampled_cuts_select, axis=2) 149 | 150 | # Find unique cuts and count occurences 151 | cuts_unique, cuts_count = row_counts( 152 | sampled_cuts_select.reshape(-1, min_cuts * 2)) 153 | 154 | # Return most promising cuts 155 | proposed_cuts.append((min_cuts, cuts_unique, cuts_count)) 156 | 157 | return proposed_cuts 158 | 159 | 160 | def dilate_cluster(graph_main, 161 | similars_idx, 162 | similars, 163 | max_cluster_size=100, 164 | min_cluster_size=10, 165 | max_per_node=1, 166 | max_per_cluster=None, 167 | min_weight=0.5): 168 | """ Add more links to clusters that are < min_cluster_size. 169 | This function is in particular made to avoid small remaining clusters or singletons. 170 | 171 | Will only add links if they won't lead to clusters > max_cluster_size, 172 | and if the links have weights > min_weight. 173 | Starts iteratively from highest weight links that are not yet part of the network 174 | (out of given top-n links). 175 | 176 | Args: 177 | -------- 178 | graph_main: networkx graph 179 | Graph, e.g. made using create_network() function. Based on networkx. 180 | similars_idx: numpy array 181 | Array with indices of top-n most similar nodes. 182 | similars: numpy array 183 | Array with similarity values of top-n most similar nodes. 184 | max_cluster_size: int 185 | Maximum desired size of clusters. Default = 100. 186 | min_cluster_size: int 187 | Minimum desired size of clusters. Default = 10. 188 | max_per_node: int 189 | Only add the top max_addition ones per cluster. Default = 1. 190 | max_per_cluster: int, None 191 | Only add the top max_addition ones per cluster. Ignore if set to None. Default = None. 192 | min_weight: float 193 | Set minimum weight to be considered for making link. Default = 0.5. 194 | """ 195 | 196 | links_added = [] 197 | 198 | # Split graph into separate clusters 199 | graphs = list(nx.connected_component_subgraphs(graph_main)) 200 | 201 | for graph in graphs: 202 | cluster_size = len(graph.nodes) 203 | if cluster_size < min_cluster_size: 204 | best_scores = [] 205 | potential_links = [] 206 | 207 | for ID in graph.nodes: 208 | nodes_connected = [] 209 | for key in graph[ID].keys(): 210 | nodes_connected.append(key) 211 | 212 | potential_new_links = [(i, x) 213 | for i, x in enumerate(similars_idx[ID]) 214 | if x not in nodes_connected and x != ID] 215 | best_score_arr = similars[ID][[ 216 | x[0] for x in potential_new_links 217 | ]] 218 | select = np.where( 219 | best_score_arr >= min_weight)[0][:max_per_node] 220 | # if best_score >= min_weight: 221 | if select.shape[0] > 0: 222 | for s in select: 223 | best_scores.append(best_score_arr[s]) 224 | potential_link = (ID, 225 | [x[1] 226 | for x in potential_new_links][s]) 227 | potential_links.append(potential_link) 228 | 229 | if max_per_cluster is None: 230 | selected_candidates = np.argsort(best_scores)[::-1] 231 | else: 232 | # Only add the top max_addition ones 233 | selected_candidates = np.argsort( 234 | best_scores)[::-1][:max_per_cluster] 235 | 236 | for ID in selected_candidates: 237 | # node_id = list(graph.nodes)[ID] 238 | node_id = potential_links[ID][0] 239 | 240 | # Only add link if no cluster > max_cluster_size is formed by it 241 | if (len( 242 | nx.node_connected_component(graph_main, 243 | potential_links[ID][1])) + 244 | cluster_size) <= max_cluster_size: 245 | # Actual adding of new links 246 | graph_main.add_edge(node_id, 247 | potential_links[ID][1], 248 | weight=best_scores[ID]) 249 | links_added.append((node_id, potential_links[ID][1])) 250 | # Update cluster_size to keep track of growing clusters 251 | cluster_size = len( 252 | nx.node_connected_component(graph_main, 253 | potential_links[ID][1])) 254 | 255 | return graph_main, links_added 256 | 257 | 258 | def erode_clusters(graph_main, max_cluster_size=100, keep_weights_above=0.8): 259 | """ Remove links from clusters that are > max_cluster_size. 260 | This function is in particular made to avoid small remaining clusters or singletons. 261 | 262 | Will only add links if they won't lead to clusters > max_cluster_size, 263 | and if the links have weights > min_weight. 264 | Starts iteratively from highest weight links that are not yet part of the network. 265 | 266 | Args: 267 | -------- 268 | graph_main: networkx graph 269 | Graph, e.g. made using create_network() function. Based on networkx. 270 | max_cluster_size: int 271 | Maximum desired size of clusters. Default = 100. 272 | keep_weights_above: float 273 | Set threshold above which weights will not be removed. Default = 0.8. 274 | """ 275 | 276 | links_removed = [] 277 | 278 | # Split graph into separate clusters 279 | graphs = list(nx.connected_component_subgraphs(graph_main)) 280 | 281 | for graph in graphs: 282 | cluster_size = len(graph.nodes) 283 | while cluster_size > max_cluster_size: 284 | 285 | edges = list(graph.edges) 286 | edges_weights = np.array( 287 | [graph[x[0]][x[1]]['weight'] for x in edges]) 288 | 289 | weakest_edge = edges_weights.argsort()[0] 290 | if edges_weights[weakest_edge] < keep_weights_above: 291 | print("Remove edge:", edges[weakest_edge][0], 292 | edges[weakest_edge][1]) 293 | graph.remove_edge(edges[weakest_edge][0], 294 | edges[weakest_edge][1]) 295 | graph_main.remove_edge(edges[weakest_edge][0], 296 | edges[weakest_edge][1]) 297 | links_removed.append(edges[weakest_edge]) 298 | 299 | # If link removal caused split of cluster: 300 | if not nx.is_connected(graph): 301 | subgraphs = list(nx.connected_component_subgraphs(graph)) 302 | print("Getting from cluster with", len(graph.nodes), 303 | "nodes, to clusters with", 304 | [len(x.nodes) for x in subgraphs], "nodes.") 305 | idx1 = np.argmax([len(x.nodes) for x in subgraphs]) 306 | graph = subgraphs[idx1] # keep largest subcluster here 307 | 308 | cluster_size = len(graph.nodes) 309 | 310 | return graph_main, links_removed 311 | 312 | 313 | def add_intra_cluster_links(graph_main, m_sim, min_weight=0.5, max_links=20): 314 | """ Add links within each separate cluster if weights above min_weight. 315 | 316 | Args: 317 | ------- 318 | graph_main: networkx graph 319 | Graph, e.g. made using create_network() function. Based on networkx. 320 | m_sim: numpy array 321 | 2D array with all reference similarity values between all-vs-all nodes. 322 | min_weight: float 323 | Set minimum weight to be considered for making link. Default = 0.5. 324 | """ 325 | # Split graph into separate clusters 326 | graphs = list(nx.connected_component_subgraphs(graph_main)) 327 | 328 | for graph in graphs: 329 | nodes = list(graph.nodes) 330 | nodes0 = nodes.copy() 331 | for node in nodes: 332 | del nodes0[0] 333 | weights = m_sim[node, nodes0] 334 | weights_select = weights.argsort()[::-1][:max_links] 335 | weights_select = np.where(weights[weights_select] >= min_weight)[0] 336 | new_edges = [(node, nodes0[x], weights[x]) for x in weights_select] 337 | 338 | graph_main.add_weighted_edges_from(new_edges) 339 | 340 | return graph_main 341 | 342 | 343 | def split_cluster(graph_main, 344 | max_cluster_size=100, 345 | min_cluster_size=10, 346 | max_search_steps=1000, 347 | max_cuts=1, 348 | multiple_cuts_per_level=True): 349 | """ 350 | Function to split clusters at weak links. 351 | 352 | Args: 353 | --------- 354 | graph_main: networkx graph 355 | Graph, e.g. made using create_network() function. Based on networkx. 356 | max_cluster_size: int 357 | Maximum desired size of clusters. Default = 100. 358 | min_cluster_size: int 359 | Minimum desired size of clusters. Default = 10. 360 | max_steps 361 | Up to max_steps pairs will be explored to search for cuts. Default = 1000. 362 | max_cuts 363 | Maximum numbers of links allowed to be cut. Default = 1. 364 | multiple_cuts_per_level 365 | If true allow multiple cuts to be done per level and run. Default = True. 366 | """ 367 | 368 | # Split graph into separate clusters 369 | graphs = list(nx.connected_component_subgraphs(graph_main)) 370 | 371 | links_removed = [] 372 | for i, graph in enumerate(graphs): 373 | if len(graph.nodes) > max_cluster_size: 374 | # Detect potential weak links 375 | weak_links = weak_link_finder(graph, 376 | max_steps=max_search_steps, 377 | max_cuts=max_cuts) 378 | 379 | split_done = False 380 | j = 0 381 | new_graph = graph.copy() 382 | while not split_done and j < len(weak_links): 383 | 384 | # Test best candidates 385 | 386 | new_graph_testing = new_graph.copy() 387 | pairs = weak_links[j][1] 388 | pair_counts = weak_links[j][2] 389 | pairs = pairs[pair_counts.argsort()[::-1]] 390 | # print(i,j, pairs) 391 | 392 | # ---------------------------------------------- 393 | # Check if pairs have already been removed in former iteration 394 | # ---------------------------------------------- 395 | pairs_still_present = [] 396 | for i, pair in enumerate(pairs): 397 | all_edges_present = True 398 | for m in range(int(pairs.shape[1] / 2)): 399 | edge = (pair[m * 2], pair[m * 2 + 1]) 400 | if edge not in new_graph_testing.edges: 401 | all_edges_present = False 402 | if all_edges_present: 403 | pairs_still_present.append(i) 404 | pairs_still_present = list(set(pairs_still_present)) 405 | pairs = pairs[ 406 | pairs_still_present] # Remove pairs which have been cut out already 407 | 408 | # ---------------------------------------------- 409 | # Test removing proposed links for all pairs 410 | # ---------------------------------------------- 411 | if len(pairs) > 0: 412 | min_size_after_cutting = [] 413 | for pair in pairs: 414 | new_graph_testing = new_graph.copy() 415 | 416 | # Remove edges in pair 417 | for m in range(int(pairs.shape[1] / 2)): 418 | new_graph_testing.remove_edge( 419 | pair[m * 2], pair[m * 2 + 1]) 420 | 421 | # Check if created subclustes are big enough: 422 | subgraphs = list( 423 | nx.connected_component_subgraphs( 424 | new_graph_testing)) 425 | min_size_after_cutting.append( 426 | min([len(x.nodes) for x in subgraphs])) 427 | 428 | # Select best partition of graph (creating most similar sized subclusters) 429 | min_size_after_cutting = np.array(min_size_after_cutting) 430 | best_partition = np.argmax(min_size_after_cutting) 431 | else: 432 | min_size_after_cutting = [0] 433 | best_partition = 0 434 | 435 | # ---------------------------------------------- 436 | # Actual removal of links 437 | # ---------------------------------------------- 438 | if min_size_after_cutting[best_partition] >= min_cluster_size: 439 | new_graph_testing = new_graph.copy() 440 | pair = pairs[best_partition] 441 | 442 | # Remove edges in selected pair 443 | for m in range(int(pairs.shape[1] / 2)): 444 | # Remove edge from current cluster: 445 | new_graph_testing.remove_edge(pair[m * 2], 446 | pair[m * 2 + 1]) 447 | # Remove edge from main graph: 448 | graph_main.remove_edge(pair[m * 2], pair[m * 2 + 1]) 449 | links_removed.append((pair[m * 2], pair[m * 2 + 1])) 450 | subgraphs = list( 451 | nx.connected_component_subgraphs(new_graph_testing)) 452 | 453 | if int(pairs.shape[1] / 2) > 1: 454 | print("Removed", int(pairs.shape[1] / 2), "edges:", 455 | pair) 456 | else: 457 | print("Removed", int(pairs.shape[1] / 2), "edge:", 458 | pair) 459 | 460 | print("Getting from cluster with", len(new_graph.nodes), 461 | "nodes, to clusters with", 462 | [len(x.nodes) for x in subgraphs], "nodes.") 463 | idx1 = np.argmax([len(x.nodes) for x in subgraphs]) 464 | new_graph = subgraphs[idx1] # keep largest subcluster here 465 | 466 | if len(new_graph.nodes) <= max_cluster_size: 467 | split_done = True 468 | else: 469 | pass 470 | 471 | # Check if more suited cuts are expected for the same number of cuts 472 | if len(min_size_after_cutting) > 1: 473 | idx = np.argsort(min_size_after_cutting)[::-1][1] 474 | if min_size_after_cutting[ 475 | idx] >= min_cluster_size and multiple_cuts_per_level: 476 | pass 477 | else: 478 | j += 1 479 | else: 480 | j += 1 481 | 482 | return graph_main, links_removed 483 | 484 | 485 | # ---------------------------------------------------------------------------- 486 | # ---------------------- Functions to refine network ------------------------- 487 | # ---------------------------------------------------------------------------- 488 | 489 | 490 | def refine_network(graph_main, 491 | similars_idx, 492 | similars, 493 | weigh_bounds=(0.6, 1), 494 | filename=None, 495 | max_cluster_size=100, 496 | min_cluster_size=10, 497 | max_search_steps=1000, 498 | max_cuts=2, 499 | max_split_iterations=10, 500 | basic_splitting=True, 501 | dilation=False): 502 | """ 503 | Args: 504 | ------- 505 | """ 506 | # Split graph into separate clusters 507 | graphs = list(nx.connected_component_subgraphs(graph_main)) 508 | 509 | links_removed = [] 510 | links_added = [] 511 | 512 | # n_cluster = len(graphs) 513 | cluster_max = np.max([len(x.nodes) for x in graphs]) 514 | counter = 0 515 | 516 | print(20 * '---') 517 | while cluster_max > max_cluster_size and counter < max_split_iterations: 518 | print("Splitting iteration:", counter + 1, "Max cluster size =", 519 | cluster_max, '\n') 520 | graph_main, links = split_cluster(graph_main.copy(), 521 | max_cluster_size=max_cluster_size, 522 | min_cluster_size=min_cluster_size, 523 | max_search_steps=max_search_steps, 524 | max_cuts=max_cuts, 525 | multiple_cuts_per_level=True) 526 | links_removed.extend(links) 527 | 528 | # Split updated graph into separate clusters 529 | graphs = list(nx.connected_component_subgraphs(graph_main)) 530 | cluster_max = np.max([len(x.nodes) for x in graphs]) 531 | counter += 1 532 | 533 | if basic_splitting: 534 | print(20 * '---') 535 | print("Extra splitting step to sanitize clusters.") 536 | graph_main, links = split_cluster( 537 | graph_main, 538 | max_cluster_size=2 * 539 | min_cluster_size, # ! here we try to 'sanitize most clusters' 540 | min_cluster_size=min_cluster_size, 541 | max_search_steps=max_search_steps, 542 | max_cuts=1, 543 | multiple_cuts_per_level=False) 544 | links_removed.extend(links) 545 | 546 | if dilation: 547 | print(20 * '---') 548 | print("Runing dilation function for smaller clusters <", 549 | min_cluster_size) 550 | graph_main, links = dilate_cluster(graph_main, 551 | similars_idx, 552 | similars, 553 | max_cluster_size=max_cluster_size, 554 | min_cluster_size=min_cluster_size, 555 | min_weight=weigh_bounds[0]) 556 | links_added.extend(links) 557 | 558 | if filename is not None: 559 | # Export graph for drawing (e.g. using Cytoscape) 560 | nx.write_graphml(graph_main, filename) 561 | print("Network stored as graphml file under: ", filename) 562 | 563 | return graph_main, links_added, links_removed 564 | 565 | 566 | # ---------------------------------------------------------------------------- 567 | # -------------------- Functions to evaluate networks ------------------------ 568 | # ---------------------------------------------------------------------------- 569 | 570 | 571 | def evaluate_clusters(graph_main, m_sim_ref): 572 | """ Evaluate separate clusters of network based on given reference matrix. 573 | 574 | Args: 575 | ------- 576 | graph_main: networkx graph 577 | Graph, e.g. made using create_network() function. Based on networkx. 578 | m_sim_ref: numpy array 579 | 2D array with all reference similarity values between all-vs-all nodes. 580 | """ 581 | 582 | # Split graph into separate clusters 583 | graphs = list(nx.connected_component_subgraphs(graph_main)) 584 | 585 | num_nodes = [] 586 | num_edges = [] 587 | ref_sim_mean_edges = [] 588 | ref_sim_var_edges = [] 589 | ref_sim_mean_nodes = [] 590 | ref_sim_var_nodes = [] 591 | 592 | # Loop through clusters 593 | for graph in graphs: 594 | num_nodes.append(len(graph.nodes)) 595 | if len(graph.edges) > 0: # no edges for singletons 596 | num_edges.append(len(graph.edges)) 597 | 598 | edges = list(graph.edges) 599 | mol_sim_edges = np.array([m_sim_ref[x] for x in edges]) 600 | mol_sim_edges = np.nan_to_num(mol_sim_edges) 601 | ref_sim_mean_edges.append(np.mean(mol_sim_edges)) 602 | ref_sim_var_edges.append(np.var(mol_sim_edges)) 603 | else: 604 | num_edges.append(0) 605 | ref_sim_mean_edges.append(0) 606 | ref_sim_var_edges.append(0) 607 | 608 | nodes = list(graph.nodes) 609 | mean_mol_sims = [] 610 | for node in nodes: 611 | mean_mol_sims.append(m_sim_ref[node, nodes]) 612 | 613 | ref_sim_mean_nodes.append(np.mean(mean_mol_sims)) 614 | ref_sim_var_nodes.append(np.var(mean_mol_sims)) 615 | 616 | zipped = zip(num_nodes, num_edges, ref_sim_mean_edges, ref_sim_var_edges, ref_sim_mean_nodes, ref_sim_var_nodes) 617 | cluster_data = pd.DataFrame(list(zipped), columns=['num_nodes', 'num_edges', 'ref_sim_mean_edges', 618 | 'ref_sim_var_edges', 'ref_sim_mean_nodes', 'ref_sim_var_nodes']) 619 | return cluster_data 620 | 621 | 622 | def evaluate_clusters_louvain(graph_main, m_sim_ref, resolution=1.0): 623 | """ Cluster given network using Louvain algorithm. 624 | Then evaluate clusters of network based on given reference matrix. 625 | 626 | Args: 627 | ------- 628 | graph_main: networkx.Graph 629 | Graph, e.g. made using create_network() function. Based on networkx. 630 | m_sim_ref: numpy array 631 | 2D array with all reference similarity values between all-vs-all nodes. 632 | resolution: float 633 | Louvain algorithm resolution parameter. Will change size of communities. 634 | See also: https://python-louvain.readthedocs.io/en/latest/api.html Default=1.0 635 | """ 636 | plt.style.use('ggplot') 637 | # Find clusters using Louvain algorithm (and python-louvain library) 638 | communities = community_louvain.best_partition(graph_main, 639 | weight='weight', 640 | resolution=resolution) 641 | nx.set_node_attributes(graph_main, communities, 'modularity') 642 | 643 | clusters = [] 644 | for cluster_id in set(communities.values()): 645 | cluster = [ 646 | nodes for nodes in communities.keys() 647 | if communities[nodes] == cluster_id 648 | ] 649 | clusters.append(cluster) 650 | 651 | num_nodes = [] 652 | ref_sim_mean_nodes = [] 653 | ref_sim_var_nodes = [] 654 | 655 | for cluster in clusters: 656 | num_nodes.append(len(cluster)) 657 | mean_mol_sims = [] 658 | for node in cluster: 659 | mean_mol_sims.append(m_sim_ref[node, cluster]) 660 | 661 | ref_sim_mean_nodes.append(np.mean(mean_mol_sims)) 662 | ref_sim_var_nodes.append(np.var(mean_mol_sims)) 663 | 664 | cluster_data = pd.DataFrame( 665 | list(zip(num_nodes, ref_sim_mean_nodes, ref_sim_var_nodes)), 666 | columns=['num_nodes', 'ref_sim_mean_nodes', 'ref_sim_var_nodes']) 667 | 668 | return graph_main, cluster_data 669 | 670 | 671 | # ---------------------------------------------------------------------------- 672 | # --------------------- Graph related plotting functions --------------------- 673 | # ---------------------------------------------------------------------------- 674 | def plots_cluster_evaluations(cluster_data_collection, 675 | m_sim_ref, 676 | total_num_nodes, 677 | size_bins, 678 | labels, 679 | title, 680 | filename=None): 681 | """ Plot cluster sizes and mean node similarity. 682 | 683 | Args: 684 | -------- 685 | cluster_data_collection: list 686 | List of cluster data for all scenarios to be plotted. 687 | m_sim_ref: numpy array 688 | 2D array with all reference similarity values between all-vs-all nodes. 689 | total_num_nodes: int 690 | Total number of nodes of graph. 691 | size_bins: list of int 692 | List of bins for cluster sizes. 693 | labels: list of str 694 | List of labels for all scenarios in list of cluster_data_collection. 695 | title: str 696 | Title for plot. Default = None 697 | filename: str 698 | If not None: save figure to file with given name. 699 | """ 700 | 701 | plt.style.use('ggplot') 702 | fig = plt.figure(figsize=(12, 5)) 703 | ax = plt.subplot(111) 704 | 705 | num_plots = len(cluster_data_collection) 706 | cmap = matplotlib.cm.get_cmap('inferno') # 'Spectral') 707 | bins = [0] + [x + 1 for x in size_bins] 708 | x_labels = ['<' + str(bins[1])] 709 | x_labels += [ 710 | str(bins[i]) + '-' + str(bins[i + 1] - 1) 711 | for i in range(1, 712 | len(bins) - 2) 713 | ] 714 | x_labels += ['>' + str(bins[-2] - 1)] 715 | 716 | for count, cluster_data in enumerate(cluster_data_collection): 717 | 718 | num_elements = [] 719 | mean_edge_sim = [] 720 | mean_node_sim = [] 721 | for i in range(len(bins) - 1): 722 | num_elements.append( 723 | np.sum(cluster_data[(cluster_data['num_nodes'] <= bins[i + 1]) 724 | & (cluster_data['num_nodes'] > bins[i])] 725 | ['num_nodes'].values)) 726 | 727 | if 'ref_sim_mean_edges' in cluster_data.columns: 728 | mean_edge_sim.append( 729 | np.mean( 730 | cluster_data[(cluster_data['num_nodes'] <= bins[i + 1]) 731 | & (cluster_data['num_nodes'] > bins[i])] 732 | ['ref_sim_mean_edges'].values)) 733 | 734 | mean_node_sim.append( 735 | np.mean(cluster_data[(cluster_data['num_nodes'] <= bins[i + 1]) 736 | & (cluster_data['num_nodes'] > bins[i])] 737 | ['ref_sim_mean_nodes'].values)) 738 | 739 | num_elements[0] = total_num_nodes - np.sum(num_elements[1:]) 740 | if 'ref_sim_mean_edges' in cluster_data.columns: 741 | if np.isnan(mean_edge_sim[0]): 742 | mean_edge_sim[0] = 0 743 | 744 | plt.scatter(x_labels, 745 | mean_node_sim, 746 | s=num_elements, 747 | facecolor="None", 748 | edgecolors=[cmap(count / num_plots)], 749 | lw=3, 750 | alpha=0.7, 751 | label=labels[count]) 752 | 753 | plt.xlabel('cluster size') 754 | plt.ylabel('mean molecular similarity of nodes in cluster') 755 | chartbox = ax.get_position() 756 | ax.set_position( 757 | [chartbox.x0, chartbox.y0, chartbox.width * 0.8, chartbox.height]) 758 | lgnd = ax.legend(loc='upper center', bbox_to_anchor=(1.12, 1)) 759 | for i in range(num_plots): 760 | lgnd.legendHandles[i]._sizes = [30] 761 | 762 | plt.title(title) 763 | 764 | # Save figure to file 765 | if filename is not None: 766 | plt.savefig(filename, dpi=600) 767 | 768 | 769 | def plot_clustering_performance(data_collection, 770 | labels, 771 | thres_well=0.5, 772 | thres_poor=0.5, 773 | title=None, 774 | filename=None, 775 | size_xy=(8, 5)): 776 | """ Plot cluster evaluations for all conditions found in data_collection. 777 | Cluster will be classified as "well clustered" if the mean(similarity) across 778 | all nodes is > thres_well. Or as "poorly clustered" if < thres_poor. 779 | Clusters with only one node (singletons) will be counted as "non-clustered". 780 | 781 | Args: 782 | -------- 783 | data_collection: list of pandas.DataFrame() 784 | List of DataFrames as created by evaluate_clusters(). 785 | labels: list 786 | List of labels for the different conditions found in data_collection. 787 | thres_well: float 788 | Threshold above which clusters will be classified as "well clustered". Default = 0.5. 789 | thres_poor: float 790 | Threshold below which clusters will be classified as "poorly clustered". Default = 0.5. 791 | title: str 792 | Title for plot. Default = None 793 | filename: str 794 | If not none: save figure to file with given name. 795 | size_xy: tuple 796 | Figure size. Default is (8,5). 797 | """ 798 | plt.style.use('ggplot') 799 | performance_data = [] 800 | ymax = np.sum(data_collection[0]['num_nodes'].values) # total_num_nodes 801 | legend_labels = [ 802 | "average structural similarity >= {:.2}".format(thres_well), 803 | "average structural similarity < {:.2}".format(thres_poor), 804 | 'non-clustered nodes' 805 | ] 806 | 807 | for cluster_data in data_collection: 808 | nodes_clustered_well = np.sum( 809 | cluster_data[(cluster_data['num_nodes'] > 1) 810 | & (cluster_data['ref_sim_mean_nodes'] >= thres_well)] 811 | ['num_nodes'].values) 812 | nodes_clustered_poor = np.sum( 813 | cluster_data[(cluster_data['num_nodes'] > 1) 814 | & (cluster_data['ref_sim_mean_nodes'] < thres_poor)] 815 | ['num_nodes'].values) 816 | nodes_not_clustered = np.sum( 817 | cluster_data[(cluster_data['num_nodes'] < 2)]['num_nodes'].values) 818 | 819 | performance_data.append( 820 | [nodes_clustered_well, nodes_clustered_poor, nodes_not_clustered]) 821 | 822 | fig = plt.figure(figsize=size_xy) 823 | ax = plt.subplot(111) 824 | ax.set_aspect(aspect=5) 825 | plt.plot(labels, [x[0] / ymax for x in performance_data], 826 | 'o-', 827 | color='crimson', 828 | label=legend_labels[0]) 829 | plt.plot(labels, [x[1] / ymax for x in performance_data], 830 | 'o-', 831 | color='teal', 832 | label=legend_labels[1]) 833 | plt.plot(labels, [x[2] / ymax for x in performance_data], 834 | 'o-', 835 | color='darkblue', 836 | alpha=0.6, 837 | label=legend_labels[2]) 838 | plt.title(title) 839 | plt.ylabel("Fraction of total nodes") 840 | plt.xlabel("networking conditions") 841 | plt.ylim(-0.03, 1) 842 | plt.legend() 843 | 844 | # Place legend 845 | # chartbox = ax.get_position() 846 | # ax.set_position([chartbox.x0, chartbox.y0, chartbox.width*0.8, chartbox.height]) 847 | # ax.legend(loc='upper center', bbox_to_anchor=(1.25, 1)) 848 | 849 | # Save figure to file 850 | if filename is not None: 851 | plt.savefig(filename, dpi=600) 852 | 853 | 854 | def plot_cluster(g, filename=None): 855 | """ Very basic plotting function to inspect small to medium sized clusters (or networks). 856 | 857 | Args: 858 | -------- 859 | g: networkx.Graph 860 | Networkx generated graph containing nodes and edges. 861 | filename: str 862 | If not none: save figure to file with given name. 863 | """ 864 | if len(g.nodes) > 1: 865 | edges = [(u, v) for (u, v, d) in g.edges(data=True)] 866 | weights = [d['weight'] for (u, v, d) in g.edges(data=True)] 867 | weights = weights - 0.95 * np.min(weights) 868 | weights = weights / np.max(weights) 869 | 870 | # Positions for all nodes 871 | pos = nx.spring_layout(g) 872 | 873 | plt.figure(figsize=(12, 12)) 874 | 875 | # Nodes 876 | nx.draw_networkx_nodes(g, pos, node_size=100) 877 | 878 | # Edges 879 | nx.draw_networkx_edges(g, 880 | pos, 881 | edgelist=edges, 882 | width=4 * weights, 883 | alpha=0.5) 884 | 885 | # Labels 886 | nx.draw_networkx_labels(g, pos, font_size=5, font_family='sans-serif') 887 | 888 | plt.axis('off') 889 | plt.show() 890 | 891 | if filename is not None: 892 | plt.savefig(filename, dpi=600) 893 | else: 894 | print("Given graph has not enough nodes to plot network.") 895 | 896 | 897 | # ---------------------------------------------------------------------------- 898 | # -------------------------- Small helper functions -------------------------- 899 | # ---------------------------------------------------------------------------- 900 | 901 | 902 | def row_counts(array): 903 | """ 904 | Function to find unique rows and count their occurences. 905 | """ 906 | dt = np.dtype((np.void, array.dtype.itemsize * array.shape[1])) 907 | b = np.ascontiguousarray(array).view(dt) 908 | unq, cnt = np.unique(b, return_counts=True) 909 | unq = unq.view(array.dtype).reshape(-1, array.shape[1]) 910 | 911 | return unq, cnt 912 | -------------------------------------------------------------------------------- /custom_functions/plotting_functions.py: -------------------------------------------------------------------------------- 1 | """Plotting functions for spec2vec""" 2 | import numpy as np 3 | from matchms.similarity.spectrum_similarity_functions import collect_peak_pairs 4 | from matplotlib import pyplot as plt 5 | from scipy import spatial 6 | from rdkit import Chem 7 | from rdkit.Chem import Draw 8 | from IPython.display import SVG, display 9 | from spec2vec import SpectrumDocument 10 | from matchms.filtering import normalize_intensities 11 | from matchms.filtering import select_by_mz 12 | from matchms.filtering import select_by_relative_intensity 13 | 14 | 15 | def plot_precentile(arr_ref, arr_sim, num_bins=1000, show_top_percentile=1.0, 16 | ignore_diagonal=False): 17 | """ Plot top percentile (as specified by show_top_percentile) of best restults 18 | in arr_sim and compare against reference values in arr_ref. 19 | 20 | Args: 21 | ------- 22 | arr_ref: numpy array 23 | Array of reference values to evaluate the quality of arr_sim. 24 | arr_sim: numpy array 25 | Array of similarity values to evaluate. 26 | num_bins: int 27 | Number of bins to divide data (default = 1000) 28 | show_top_percentile 29 | Choose which part to plot. Will plot the top 'show_top_percentile' part of 30 | all similarity values given in arr_sim. Default = 1.0 31 | """ 32 | def _ignore_reference_nans(arr_ref, arr_sim): 33 | assert arr_ref.shape == arr_sim.shape, "Expected two arrays of identical shape." 34 | idx_not_nans = np.where(np.isnan(arr_ref) == False) 35 | arr_sim = arr_sim[idx_not_nans] 36 | arr_ref = arr_ref[idx_not_nans] 37 | return arr_ref, arr_sim 38 | 39 | if ignore_diagonal: 40 | np.fill_diagonal(arr_ref, np.nan) 41 | 42 | arr_ref, arr_sim = _ignore_reference_nans(arr_ref, arr_sim) 43 | 44 | start = int(arr_sim.shape[0] * show_top_percentile / 100) 45 | idx = np.argpartition(arr_sim, -start) 46 | starting_point = arr_sim[idx[-start]] 47 | if starting_point == 0: 48 | print("not enough datapoints != 0 above given top-precentile") 49 | 50 | # Remove all data below show_top_percentile 51 | low_as = np.where(arr_sim < starting_point)[0] 52 | 53 | length_selected = arr_sim.shape[0] - low_as.shape[0] # start+1 54 | 55 | data = np.zeros((2, length_selected)) 56 | data[0, :] = np.delete(arr_sim, low_as) 57 | data[1, :] = np.delete(arr_ref, low_as) 58 | data = data[:, np.lexsort((data[1, :], data[0, :]))] 59 | 60 | ref_score_cum = [] 61 | 62 | for i in range(num_bins): 63 | low = int(i * length_selected / num_bins) 64 | # high = int((i+1) * length_selected/num_bins) 65 | ref_score_cum.append(np.mean(data[1, low:])) 66 | ref_score_cum = np.array(ref_score_cum) 67 | x_percentiles = (show_top_percentile / num_bins * (1 + np.arange(num_bins)))[::-1] 68 | 69 | fig, ax = plt.subplots(figsize=(6, 6)) 70 | plt.plot( 71 | x_percentiles, 72 | ref_score_cum, 73 | color='black') 74 | plt.xticks(np.linspace(0, show_top_percentile, 5), 75 | ["{:.2f}%".format(x) for x in np.linspace(0, show_top_percentile, 5)]) 76 | plt.xlabel("Top percentile of spectral similarity score g(s,s')") 77 | plt.ylabel("Mean molecular similarity (f(t,t') within that percentile)") 78 | 79 | return ref_score_cum 80 | 81 | 82 | def plot_spectra_comparison(spectrum1_in, spectrum2_in, 83 | model, 84 | intensity_weighting_power=0.5, 85 | num_decimals=2, 86 | min_mz=5, 87 | max_mz=500, 88 | intensity_threshold=0.01, 89 | method='cosine', 90 | tolerance=0.005, 91 | wordsim_cutoff=0.5, 92 | circle_size=5, 93 | circle_scaling='wordsim', 94 | padding=10, 95 | display_molecules=False, 96 | figsize=(12, 12), 97 | filename=None): 98 | """ In-depth visual comparison of spectral similarity scores, 99 | calculated based on cosine/mod.cosine and Spev2Vec. 100 | 101 | Parameters 102 | ---------- 103 | method: str 104 | 'cosine' or 'modcos' (modified cosine score) 105 | circle_scaling: str 106 | Scale circles based on 'wordsim' or 'peak_product' 107 | """ 108 | 109 | def apply_filters(s): 110 | s = normalize_intensities(s) 111 | s = select_by_mz(s, mz_from=min_mz, mz_to=max_mz) 112 | s = select_by_relative_intensity(s, intensity_from=intensity_threshold) 113 | s.losses = None 114 | return s 115 | 116 | spectrum1 = apply_filters(spectrum1_in) 117 | spectrum2 = apply_filters(spectrum2_in) 118 | 119 | plt.style.use("seaborn-white")#('ggplot') 120 | plot_colors = ['darkcyan', 'purple'] 121 | 122 | # Definitions for the axes 123 | left, width = 0.1, 0.6 124 | bottom, height = 0.1, 0.6 125 | spacing = 0.01 126 | 127 | rect_wordsim = [left, bottom, width, height] 128 | rect_specx = [left, bottom + height + spacing, width, 0.2] 129 | rect_specy = [left + width + spacing, bottom, 0.25, height] 130 | 131 | document1 = SpectrumDocument(spectrum1, n_decimals=num_decimals) 132 | document2 = SpectrumDocument(spectrum2, n_decimals=num_decimals) 133 | 134 | # Remove words/peaks that are not in dictionary 135 | select1 = np.asarray([i for i, word in enumerate(document1.words) if word in model.wv.vocab]) 136 | select2 = np.asarray([i for i, word in enumerate(document2.words) if word in model.wv.vocab]) 137 | peaks1 = np.asarray(spectrum1.peaks[:]).T 138 | peaks2 = np.asarray(spectrum2.peaks[:]).T 139 | peaks1 = peaks1[select1, :] 140 | peaks2 = peaks2[select2, :] 141 | min_peaks1 = np.min(peaks1[:, 0]) 142 | min_peaks2 = np.min(peaks2[:, 0]) 143 | max_peaks1 = np.max(peaks1[:, 0]) 144 | max_peaks2 = np.max(peaks2[:, 0]) 145 | possible_grid_points = np.arange(0, 2000, 50) 146 | grid_points1 = possible_grid_points[(possible_grid_points > min_peaks1 - padding) \ 147 | & (possible_grid_points < max_peaks1 + padding)] 148 | grid_points2 = possible_grid_points[(possible_grid_points > min_peaks2 - padding) \ 149 | & (possible_grid_points < max_peaks2 + padding)] 150 | 151 | word_vectors1 = model.wv[[document1.words[x] for x in select1]] 152 | word_vectors2 = model.wv[[document2.words[x] for x in select2]] 153 | 154 | csim_words = 1 - spatial.distance.cdist(word_vectors1, word_vectors2, 'cosine') 155 | csim_words[csim_words < wordsim_cutoff] = 0 # Remove values below cutoff 156 | print(np.min(csim_words), np.max(csim_words)) 157 | 158 | # Plot spectra 159 | # ------------------------------------------------------------------------- 160 | fig = plt.figure(figsize=figsize) 161 | # Word similariy plot (central) 162 | ax_wordsim = plt.axes(rect_wordsim) 163 | ax_wordsim.tick_params(direction='in', top=True, right=True) 164 | # Spectra plot (top) 165 | ax_specx = plt.axes(rect_specx) 166 | ax_specx.tick_params(direction='in', labelbottom=False) 167 | # Spectra plot 2 (right) 168 | ax_specy = plt.axes(rect_specy) 169 | ax_specy.tick_params(direction='in', labelleft=False) 170 | 171 | # Spec2Vec similarity plot: 172 | # ------------------------------------------------------------------------- 173 | data_x = [] 174 | data_y = [] 175 | data_z = [] 176 | data_peak_product = [] 177 | for i in range(len(select1)): 178 | for j in range(len(select2)): 179 | data_x.append(peaks1[i, 0]) 180 | data_y.append(peaks2[j, 0]) 181 | data_z.append(csim_words[i, j]) 182 | data_peak_product.append(peaks1[i, 1] * peaks2[j, 1]) 183 | 184 | # Sort by word similarity 185 | data_x = np.array(data_x) 186 | data_y = np.array(data_y) 187 | data_z = np.array(data_z) 188 | data_peak_product = np.array(data_peak_product) 189 | idx = np.lexsort((data_x, data_y, data_z)) 190 | 191 | cm = plt.cm.get_cmap('RdYlBu_r') # 'YlOrRd') #'RdBu_r') 192 | 193 | # Plot word similarities 194 | if circle_scaling == 'peak_product': 195 | wordsimplot = ax_wordsim.scatter(data_x[idx], 196 | data_y[idx], 197 | s=100 * circle_size * 198 | (0.01 + data_peak_product[idx]**2), 199 | marker="o", 200 | c=data_z[idx], 201 | cmap=cm, 202 | alpha=0.6) 203 | elif circle_scaling == 'wordsim': 204 | wordsimplot = ax_wordsim.scatter(data_x[idx], 205 | data_y[idx], 206 | s=100 * circle_size * 207 | (0.01 + data_z[idx]**2), 208 | marker="o", 209 | c=data_z[idx], 210 | cmap=cm, 211 | alpha=0.6) 212 | 213 | # (Modified) Cosine similarity plot: 214 | # ------------------------------------------------------------------------- 215 | if method == 'cosine': 216 | score_classical, used_matches = cosine_score(spectrum1, spectrum2, tolerance, modified_cosine=False) 217 | elif method == 'modcos': 218 | score_classical, used_matches = cosine_score(spectrum1, spectrum2, tolerance, modified_cosine=True) 219 | else: 220 | print("Given method unkown.") 221 | 222 | idx1, idx2, _ = zip(*used_matches) 223 | cosine_x = [] 224 | cosine_y = [] 225 | for i in range(len(idx1)): 226 | if idx1[i] in select1 and idx2[i] in select2: 227 | cosine_x.append(peaks1[idx1[i], 0]) 228 | cosine_y.append(peaks2[idx2[i], 0]) 229 | 230 | # Plot (mod.) cosine similarities 231 | ax_wordsim.scatter(cosine_x, cosine_y, s=100, c='black', marker=(5, 2)) 232 | ax_wordsim.set_xlim(min_peaks1 - padding, max_peaks1 + padding) 233 | ax_wordsim.set_ylim(min_peaks2 - padding, max_peaks2 + padding) 234 | ax_wordsim.set_xlabel('spectrum 1 - fragment mz', fontsize=16) 235 | ax_wordsim.set_ylabel('spectrum 2 - fragment mz', fontsize=16) 236 | ax_wordsim.tick_params(labelsize=13) 237 | ax_wordsim.set_xticks(grid_points1) 238 | ax_wordsim.set_yticks(grid_points2) 239 | ax_wordsim.grid(True) 240 | 241 | # Plot spectra 1 242 | ax_specx.vlines(peaks1[:, 0], [0], peaks1[:, 1], color=plot_colors[0]) 243 | ax_specx.plot(peaks1[:, 0], peaks1[:, 1], '.') # Stem ends 244 | ax_specx.plot([peaks1[:, 0].max(), peaks1[:, 0].min()], [0, 0], 245 | '--') # Middle bar 246 | ax_specx.set_xlim(min_peaks1 - padding, max_peaks1 + padding) 247 | ax_specx.set_yticks([0,0.25,0.5,0.75,1]) 248 | ax_specx.set_xticks(grid_points1) 249 | ax_specx.set_ylabel('peak intensity (relative)', fontsize=16) 250 | ax_specx.tick_params(labelsize=13) 251 | 252 | ax_specx.grid(True) 253 | 254 | # Plot spectra 2 255 | ax_specy.hlines(peaks2[:, 0], [0], peaks2[:, 1], color=plot_colors[1]) 256 | ax_specy.plot(peaks2[:, 1], peaks2[:, 0], '.') # Stem ends 257 | ax_specy.plot([0, 0], [peaks2[:, 0].min(), peaks2[:, 0].max()], 258 | '--') # Middle bar 259 | ax_specy.set_ylim(min_peaks2 - padding, max_peaks2 + padding) 260 | ax_specy.set_xticks([0,0.25,0.5,0.75,1]) 261 | ax_specy.set_yticks(grid_points2) 262 | ax_specy.set_xlabel('peak intensity (relative)', fontsize=16) 263 | ax_specy.tick_params(labelsize=13) 264 | 265 | ax_specy.grid(True) 266 | 267 | fig.colorbar(wordsimplot, ax=ax_specy) 268 | if filename is not None: 269 | plt.savefig(filename) 270 | plt.show() 271 | 272 | # Plot molecules 273 | # ------------------------------------------------------------------------- 274 | if display_molecules: 275 | smiles = [spectrum1.get("smiles"), spectrum2.get("smiles")] 276 | molecules = [Chem.MolFromSmiles(x) for x in smiles] 277 | display(Draw.MolsToGridImage(molecules, molsPerRow=2, subImgSize=(400, 400))) 278 | 279 | 280 | # def scour_svg_cleaning(target, source, env=[]): 281 | # """ Use scour to clean an svg file. 282 | 283 | # """ 284 | # options = scour.generateDefaultOptions() 285 | 286 | # # override defaults for max cleansing 287 | # options.enable_viewboxing = True 288 | # options.strip_comments = True 289 | # options.strip_ids = True 290 | # options.remove_metadata = True 291 | # options.indent_type = None 292 | # options.shorten_ids = True 293 | 294 | # if 'SCOUR_OPTIONS' in env: 295 | # options.__dict__.update(env['SCOUR_OPTIONS']) 296 | 297 | # instream = open(source, 'rb') 298 | # outstream = open(target, 'wb') 299 | 300 | # scour.start(options, instream, outstream) 301 | 302 | 303 | # def plot_molecules(smiles_lst, filename=None): 304 | # """ Plot molecule from smile(s). 305 | # Uses Scour to clean rdkit svg. 306 | 307 | # filename: str 308 | # If filename is given, molecules will be saved to filename. 309 | # """ 310 | # if not isinstance(smiles_lst, list): 311 | # smiles_lst = [smiles_lst] 312 | # for i, smiles in enumerate(smiles_lst): 313 | # temp_file = "draw_mols_temp.svg" 314 | # mol = Chem.MolFromSmiles(smiles) 315 | # Draw.MolToFile(mol, temp_file) 316 | 317 | # # Clean svg using scour 318 | # if filename is not None: 319 | # file = filename.split('.svg')[0] + str(i) + '.svg' 320 | # else: 321 | # file = "draw_mols_temp_corr.svg" 322 | # scour(file, temp_file, []) 323 | 324 | # # Display cleaned svg 325 | # display(SVG(filename=temp_file)) 326 | 327 | 328 | def cosine_score(spectrum1, spectrum2, tolerance, modified_cosine=False): 329 | """ 330 | 331 | Parameters 332 | ---------- 333 | spectrum1 : TYPE 334 | DESCRIPTION. 335 | spectrum2 : TYPE 336 | DESCRIPTION. 337 | tolerance : TYPE 338 | DESCRIPTION. 339 | 340 | Returns 341 | ------- 342 | TYPE 343 | DESCRIPTION. 344 | 345 | """ 346 | def get_peaks_arrays(): 347 | """Get peaks mz and intensities as numpy array.""" 348 | spec1 = np.vstack((spectrum1.peaks.mz, spectrum1.peaks.intensities)).T 349 | spec2 = np.vstack((spectrum2.peaks.mz, spectrum2.peaks.intensities)).T 350 | assert max(spec1[:, 1]) <= 1, ("Input spectrum1 is not normalized. ", 351 | "Apply 'normalize_intensities' filter first.") 352 | assert max(spec2[:, 1]) <= 1, ("Input spectrum2 is not normalized. ", 353 | "Apply 'normalize_intensities' filter first.") 354 | return spec1, spec2 355 | 356 | def get_matching_pairs(): 357 | """Get pairs of peaks that match within the given tolerance.""" 358 | zero_pairs = collect_peak_pairs(spec1, spec2, tolerance, shift=0.0) 359 | if modified_cosine: 360 | message = "Precursor_mz missing. Apply 'add_precursor_mz' filter first." 361 | assert spectrum1.get("precursor_mz") and spectrum2.get("precursor_mz"), message 362 | mass_shift = spectrum1.get("precursor_mz") - spectrum2.get("precursor_mz") 363 | nonzero_pairs = collect_peak_pairs(spec1, spec2, tolerance, shift=mass_shift) 364 | unsorted_matching_pairs = zero_pairs + nonzero_pairs 365 | else: 366 | unsorted_matching_pairs = zero_pairs 367 | return sorted(unsorted_matching_pairs, key=lambda x: x[2], reverse=True) 368 | 369 | def calc_score(): 370 | """Calculate cosine similarity score.""" 371 | used1 = set() 372 | used2 = set() 373 | score = 0.0 374 | used_matches = [] 375 | for match in matching_pairs: 376 | if not match[0] in used1 and not match[1] in used2: 377 | score += match[2] 378 | used1.add(match[0]) # Every peak can only be paired once 379 | used2.add(match[1]) # Every peak can only be paired once 380 | used_matches.append(match) 381 | # Normalize score: 382 | score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2)) 383 | return score, used_matches 384 | 385 | spec1, spec2 = get_peaks_arrays() 386 | matching_pairs = get_matching_pairs() 387 | return calc_score() 388 | -------------------------------------------------------------------------------- /custom_functions/pubchem_lookup.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | import pubchempy as pcp 4 | import numpy as np 5 | from matchms.utils import is_valid_inchikey 6 | 7 | 8 | def pubchem_metadata_lookup(spectrum_in, name_search_depth=10, formula_search=False, 9 | min_formula_length=6, formula_search_depth=25, verbose=1): 10 | """ 11 | 12 | Parameters 13 | ---------- 14 | spectrum_in 15 | Matchms type spectrum as input. 16 | name_search_depth: int 17 | How many of the most relevant name matches to explore deeper. Default = 10. 18 | 19 | """ 20 | if spectrum_in is None: 21 | return None 22 | 23 | spectrum = spectrum_in.clone() 24 | if is_valid_inchikey(spectrum.get("inchikey")): 25 | return spectrum 26 | 27 | def _plausible_name(compound_name): 28 | return (isinstance(compound_name, str) and len(compound_name) > 4) 29 | 30 | compound_name = spectrum.get("compound_name") 31 | if not _plausible_name(compound_name): 32 | return spectrum 33 | 34 | # Start pubchem search 35 | inchi = spectrum.get("inchi") 36 | parent_mass = spectrum.get("parent_mass") 37 | if isinstance(parent_mass, np.ndarray): 38 | parent_mass = parent_mass[0] 39 | formula = spectrum.get("formula") 40 | 41 | # 1) Search for matching compound name 42 | results_pubchem = pubchem_name_search(compound_name, name_search_depth=name_search_depth, 43 | verbose=verbose) 44 | 45 | if len(results_pubchem) > 0: 46 | 47 | # 1a) Search for matching inchi 48 | if likely_has_inchi(inchi): 49 | inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_inchi_match(results_pubchem, inchi, 50 | verbose=verbose) 51 | # 1b) Search for matching mass 52 | if not likely_has_inchi(inchi) or inchikey_pubchem is None: 53 | inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_mass_match(results_pubchem, parent_mass, 54 | verbose=verbose) 55 | 56 | if inchikey_pubchem is not None and inchi_pubchem is not None: 57 | logging.info("Matching compound name: %s", compound_name) 58 | if verbose >= 1: 59 | print(f"Matching compound name: {compound_name}") 60 | spectrum.set("inchikey", inchikey_pubchem) 61 | spectrum.set("inchi", inchi_pubchem) 62 | spectrum.set("smiles", smiles_pubchem) 63 | return spectrum 64 | 65 | elif verbose >= 2: 66 | print(f"No matches found for compound name: {compound_name}") 67 | 68 | # 2) Search for matching formula 69 | if formula_search and formula and len(formula) >= min_formula_length: 70 | results_pubchem = pubchem_formula_search(formula, formula_search_depth=formula_search_depth, 71 | verbose=verbose) 72 | 73 | if len(results_pubchem) > 0: 74 | 75 | # 2a) Search for matching inchi 76 | if likely_has_inchi(inchi): 77 | inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_inchi_match(results_pubchem, inchi) 78 | # 2b) Search for matching mass 79 | if inchikey_pubchem is None: 80 | inchi_pubchem, inchikey_pubchem, smiles_pubchem = find_pubchem_mass_match(results_pubchem, parent_mass) 81 | 82 | if inchikey_pubchem is not None and inchi_pubchem is not None: 83 | logging.info("Matching formula: %s", formula) 84 | if verbose >= 1: 85 | print(f"Matching formula: {formula}") 86 | spectrum.set("inchikey", inchikey_pubchem) 87 | spectrum.set("inchi", inchi_pubchem) 88 | spectrum.set("smiles", smiles_pubchem) 89 | return spectrum 90 | 91 | elif verbose >= 2: 92 | print(f"No matches found for formula: {formula}") 93 | 94 | return spectrum 95 | 96 | 97 | def likely_has_inchi(inchi): 98 | """Quick test to avoid excess in-depth testing""" 99 | if inchi is None: 100 | return False 101 | inchi = inchi.strip('"') 102 | regexp = r"(InChI=1|1)(S\/|\/)[0-9, A-Z, a-z,\.]{2,}\/(c|h)[0-9]" 103 | if not re.search(regexp, inchi): 104 | return False 105 | return True 106 | 107 | 108 | def likely_inchi_match(inchi_1, inchi_2, min_agreement=3): 109 | """Try to match defective inchi to non-defective ones. 110 | 111 | Compares inchi parts seperately. Match is found if at least the first 112 | 'min_agreement' parts are a good enough match. 113 | The main 'defects' this method accounts for are missing '-' in the inchi. 114 | In addition, differences between '-', '+', and '?'will be ignored. 115 | 116 | Parameters 117 | ---------- 118 | inchi_1: str 119 | inchi of molecule. 120 | inchi_2: str 121 | inchi of molecule. 122 | min_agreement: int 123 | Minimum number of first parts that MUST be a match between both input 124 | inchi to finally consider it a match. Default is min_agreement=3. 125 | """ 126 | if min_agreement < 2: 127 | print("Warning! 'min_agreement' < 2 has no discriminative power. Should be => 2.") 128 | if min_agreement == 2: 129 | print("Warning! 'min_agreement' == 2 has little discriminative power", 130 | "(only looking at structure formula. Better use > 2.") 131 | agreement = 0 132 | 133 | # Remove spaces and '"' to account for different notations. 134 | # Remove everything with little discriminative power. 135 | ignore_lst = ['"', ' ', '-', '+', '?'] 136 | for ignore in ignore_lst: 137 | inchi_1 = inchi_1.replace(ignore, '') 138 | inchi_2 = inchi_2.replace(ignore, '') 139 | 140 | # Split inchi in parts. 141 | inchi_1_parts = inchi_1.split('/') 142 | inchi_2_parts = inchi_2.split('/') 143 | 144 | # Check if both inchi have sufficient parts (seperated by '/') 145 | if len(inchi_1_parts) >= min_agreement and len( 146 | inchi_2_parts) >= min_agreement: 147 | # Count how many parts agree well 148 | for i in range(min_agreement): 149 | agreement += (inchi_1_parts[i] == inchi_2_parts[i]) 150 | 151 | if agreement == min_agreement: 152 | return True 153 | else: 154 | return False 155 | 156 | 157 | def likely_inchikey_match(inchikey_1, inchikey_2, min_agreement=1): 158 | """Try to match inchikeys. 159 | 160 | Compares inchikey parts seperately. Match is found if at least the first 161 | 'min_agreement' parts are a good enough match. 162 | 163 | Parameters 164 | ---------- 165 | inchikey_1: str 166 | inchikey of molecule. 167 | inchikey_2: str 168 | inchikey of molecule. 169 | min_agreement: int 170 | Minimum number of first parts that MUST be a match between both input 171 | inchikey to finally consider it a match. Default is min_agreement=1. 172 | """ 173 | if min_agreement not in [1, 2, 3]: 174 | print("Warning! 'min_agreement' should be 1, 2, or 3.") 175 | agreement = 0 176 | 177 | # Harmonize strings 178 | inchikey_1 = inchikey_1.upper().replace('"', '').replace(' ', '') 179 | inchikey_2 = inchikey_2.upper().replace('"', '').replace(' ', '') 180 | 181 | # Split inchikey in parts. 182 | inchikey_1_parts = inchikey_1.split('-') 183 | inchikey_2_parts = inchikey_2.split('-') 184 | 185 | # Check if both inchikey have sufficient parts (seperated by '/') 186 | if len(inchikey_1_parts) >= min_agreement and len( 187 | inchikey_2_parts) >= min_agreement: 188 | # Count how many parts mostly agree 189 | for i in range(min_agreement): 190 | agreement += (inchikey_1_parts[i] == inchikey_2_parts[i]) 191 | 192 | return agreement == min_agreement 193 | 194 | 195 | def pubchem_name_search(compound_name: str, name_search_depth=10, verbose=1): 196 | """Search pubmed for compound name""" 197 | results_pubchem = pcp.get_compounds(compound_name, 198 | 'name', 199 | listkey_count=name_search_depth) 200 | if verbose >=2: 201 | print("Found at least", len(results_pubchem), 202 | "compounds of that name on pubchem.") 203 | return results_pubchem 204 | 205 | 206 | def pubchem_formula_search(compound_formula: str, formula_search_depth=25, verbose=1): 207 | """Search pubmed for compound formula""" 208 | sids_pubchem = pcp.get_sids(compound_formula, 209 | 'formula', 210 | listkey_count=formula_search_depth) 211 | 212 | results_pubchem = [] 213 | for sid in sids_pubchem: 214 | result = pcp.Compound.from_cid(sid['CID']) 215 | results_pubchem.append(result) 216 | 217 | if verbose >=2: 218 | print(f"Found at least {len(results_pubchem)} compounds of with formula: {compound_formula}.") 219 | return results_pubchem 220 | 221 | 222 | def find_pubchem_inchi_match(results_pubchem, 223 | inchi, 224 | min_inchi_match=3, 225 | verbose=1): 226 | """Searches pubmed matches for inchi match. 227 | Then check if inchi can be matched to (defective) input inchi. 228 | 229 | 230 | Outputs found inchi and found inchikey (will be None if none is found). 231 | 232 | Parameters 233 | ---------- 234 | results_pubchem: List[dict] 235 | List of name search results from Pubchem. 236 | inchi: str 237 | Inchi (correct, or defective...). Set to None to ignore. 238 | min_inchi_match: int 239 | Minimum number of first parts that MUST be a match between both input 240 | inchi to finally consider it a match. Default is min_inchi_match=3. 241 | """ 242 | 243 | inchi_pubchem = None 244 | inchikey_pubchem = None 245 | smiles_pubchem = None 246 | 247 | # Loop through first 'name_search_depth' results found on pubchem. Stop once first match is found. 248 | for result in results_pubchem: 249 | inchi_pubchem = '"' + result.inchi + '"' 250 | inchikey_pubchem = result.inchikey 251 | smiles_pubchem = result.isomeric_smiles 252 | if smiles_pubchem is None: 253 | smiles_pubchem = result.canonical_smiles 254 | 255 | match_inchi = likely_inchi_match(inchi, inchi_pubchem, 256 | min_agreement=min_inchi_match) 257 | 258 | if match_inchi: 259 | logging.info("Matching inchi: %s", inchi) 260 | if verbose >= 1: 261 | print(f"Found matching compound for inchi: {inchi} (Pubchem: {inchi_pubchem}") 262 | break 263 | 264 | if not match_inchi: 265 | inchi_pubchem = None 266 | inchikey_pubchem = None 267 | smiles_pubchem = None 268 | 269 | if verbose >= 2: 270 | print("No matches found for inchi", inchi, "\n") 271 | 272 | return inchi_pubchem, inchikey_pubchem, smiles_pubchem 273 | 274 | 275 | def find_pubchem_mass_match(results_pubchem, 276 | parent_mass, 277 | mass_tolerance=2.0, 278 | verbose=1): 279 | """Searches pubmed matches for inchi match. 280 | Then check if inchi can be matched to (defective) input inchi. 281 | 282 | 283 | Outputs found inchi and found inchikey (will be None if none is found). 284 | 285 | Parameters 286 | ---------- 287 | results_pubchem: List[dict] 288 | List of name search results from Pubchem. 289 | parent_mass: float 290 | Spectrum"s guessed parent mass. 291 | mass_tolerance: float 292 | Acceptable mass difference between query compound and pubchem result. 293 | """ 294 | inchi_pubchem = None 295 | inchikey_pubchem = None 296 | smiles_pubchem = None 297 | 298 | for result in results_pubchem: 299 | inchi_pubchem = '"' + result.inchi + '"' 300 | inchikey_pubchem = result.inchikey 301 | smiles_pubchem = result.isomeric_smiles 302 | if smiles_pubchem is None: 303 | smiles_pubchem = result.canonical_smiles 304 | 305 | pubchem_mass = results_pubchem[0].exact_mass 306 | match_mass = (np.abs(pubchem_mass - parent_mass) <= mass_tolerance) 307 | 308 | if match_mass: 309 | logging.info("Matching molecular weight %s vs parent mass of %s", 310 | str(np.round(pubchem_mass,1)), 311 | str(np.round(parent_mass,1))) 312 | if verbose >= 1: 313 | print(f"Matching molecular weight ({pubchem_mass:.1f} vs parent mass of {parent_mass:.1f})") 314 | break 315 | 316 | if not match_mass: 317 | inchi_pubchem = None 318 | inchikey_pubchem = None 319 | smiles_pubchem = None 320 | 321 | if verbose >= 2: 322 | print(f"No matches found for mass {parent_mass} Da") 323 | 324 | return inchi_pubchem, inchikey_pubchem, smiles_pubchem 325 | -------------------------------------------------------------------------------- /custom_functions/similarity_matrix.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def all_vs_all_similarity_matrix(spectrums, similarity_function, 5 | filename=None, safety_points=None): 6 | """Calculate similarity matrix of all spectrums vs all spectrums. 7 | 8 | Args: 9 | ---- 10 | spectrums 11 | similarity_function 12 | filename=None 13 | safety_points=None 14 | """ 15 | 16 | if safety_points is not None: 17 | # Save matrix along process 18 | total_num_calculations = int((len(spectrums)**2)/2 + 0.5 * len(spectrums)) 19 | safety_interval = int(total_num_calculations/safety_points) 20 | 21 | similarities = np.zeros((len(spectrums), len(spectrums))) 22 | num_matches = np.zeros((len(spectrums), len(spectrums))) 23 | 24 | count = 0 25 | for i in range(len(spectrums)): 26 | for j in range(i, len(spectrums)): 27 | score, matches = similarity_function.pair(spectrums[i], spectrums[j]) 28 | similarities[i, j] = score 29 | num_matches[i, j] = matches 30 | count += 1 31 | # Show progress 32 | if (count+1) % 10000 == 0: 33 | print("\r", "About {:.3f}% of similarity scores calculated.".format(100 * count/total_num_calculations), end="") 34 | 35 | # Create safety points 36 | if filename is not None and safety_points is not None: 37 | if (count+1) % safety_interval == 0: 38 | safety_filename = filename.split(".")[0] + "safety" 39 | np.save(safety_filename + ".npy", similarities) 40 | np.save(safety_filename + "_matches.npy", num_matches) 41 | 42 | # Symmetric matrix --> fill 43 | for i in range(1, len(spectrums)): 44 | for j in range(i): 45 | similarities[i, j] = similarities[j, i] 46 | num_matches[i, j] = num_matches[j, i] 47 | 48 | # Save final results 49 | if filename is not None: 50 | np.save(filename, similarities) 51 | np.save(filename.split(".")[0] + "_matches.npy", num_matches) 52 | 53 | return similarities, num_matches 54 | -------------------------------------------------------------------------------- /notebooks/iomega-2-split_data_into_subsets.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Iomega workflow\n", 8 | "## Split cleaned data into subsets\n", 9 | "Here we split the previously cleaned dataset (>150,000 spectra) into various subsets for further analysis." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import os\n", 19 | "import sys\n", 20 | "\n", 21 | "ROOT = os.path.dirname(os.getcwd())\n", 22 | "path_data = os.path.join(ROOT, 'data')" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "### Import data from fully pre-processed data" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": { 36 | "scrolled": true 37 | }, 38 | "outputs": [], 39 | "source": [ 40 | "# Load fully processed dataset\n", 41 | "from matchms.importing import load_from_json\n", 42 | "\n", 43 | "filename = os.path.join(path_data,'gnps_all_cleand_by_matchms_and_pubchem_lookups.json')\n", 44 | "reference_spectrums = load_from_json(filename)" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "### Select positive mode spectra only" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 3, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "reference_spectrums_positive = [s.clone() for s in reference_spectrums if s.get(\"ionmode\") == \"positive\"]" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 4, 66 | "metadata": {}, 67 | "outputs": [ 68 | { 69 | "name": "stdout", 70 | "output_type": "stream", 71 | "text": [ 72 | "Number of spectra: 112956\n", 73 | "Inchis: 92997 -- 16071 unique\n", 74 | "Smiles: 92964 -- 20540 unique\n", 75 | "Inchikeys: 92954 -- 13717 unique (first 14 characters)\n" 76 | ] 77 | } 78 | ], 79 | "source": [ 80 | "def count_annotations(spectra):\n", 81 | " inchi_lst = []\n", 82 | " smiles_lst = []\n", 83 | " inchikey_lst = []\n", 84 | " for i, spec in enumerate(spectra):\n", 85 | " inchi_lst.append(spec.get(\"inchi\"))\n", 86 | " smiles_lst.append(spec.get(\"smiles\"))\n", 87 | " inchikey = spec.get(\"inchikey\")\n", 88 | " if inchikey is None:\n", 89 | " inchikey = spec.get(\"inchikey_inchi\")\n", 90 | " inchikey_lst.append(inchikey)\n", 91 | "\n", 92 | " inchi_count = sum([1 for x in inchi_lst if x])\n", 93 | " smiles_count = sum([1 for x in smiles_lst if x])\n", 94 | " inchikey_count = sum([1 for x in inchikey_lst if x])\n", 95 | " print(\"Inchis:\", inchi_count, \"--\", len(set(inchi_lst)), \"unique\")\n", 96 | " print(\"Smiles:\", smiles_count, \"--\", len(set(smiles_lst)), \"unique\")\n", 97 | " print(\"Inchikeys:\", inchikey_count, \"--\", \n", 98 | " len(set([x[:14] for x in inchikey_lst if x])), \"unique (first 14 characters)\")\n", 99 | " \n", 100 | "print(\"Number of spectra:\", len(reference_spectrums_positive))\n", 101 | "count_annotations(reference_spectrums_positive)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 6, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "from matchms.exporting import save_as_json\n", 111 | "\n", 112 | "filename = os.path.join(path_data,'gnps_positive_ionmode_cleaned_by_matchms_and_lookups.json')\n", 113 | "save_as_json(reference_spectrums_positive, filename)" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": 3, 119 | "metadata": {}, 120 | "outputs": [ 121 | { 122 | "name": "stderr", 123 | "output_type": "stream", 124 | "text": [ 125 | "RDKit WARNING: [20:50:33] Enabling RDKit 2019.09.3 jupyter extensions\n" 126 | ] 127 | } 128 | ], 129 | "source": [ 130 | "# or load results\n", 131 | "\"\"\"from matchms.importing import load_from_json\n", 132 | "\n", 133 | "filename = os.path.join(path_data,'gnps_positive_ionmode_cleaned_by_matchms_and_lookups.json')\n", 134 | "reference_spectrums_positive = load_from_json(filename)\"\"\"" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": {}, 140 | "source": [ 141 | "### Select subset of unique InchiKeys\n", 142 | "Create strongly reduced dataset to limit computation times for benchmarking.\n", 143 | "For every unique InchiKey (only considering first 14 characters), only one spectrum will be picked. In cases where many spectra exit for the same InchiKey, the one with most peaks above an relative intensity threshold is selected." 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 4, 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [ 152 | "import numpy\n", 153 | "\n", 154 | "def count_higher_peaks(spectrum, threshold = 0.1):\n", 155 | " return numpy.sum(spectrum.peaks.intensities/spectrum.peaks.intensities.max() >= threshold)" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 5, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "# Get collection/dictionary of inchikeys\n", 165 | "inchikey_collection = {}\n", 166 | "for i, spec in enumerate(reference_spectrums_positive):\n", 167 | " inchikey = spec.get(\"inchikey\")\n", 168 | " if inchikey:\n", 169 | " if inchikey[:14] in inchikey_collection:\n", 170 | " inchikey_collection[inchikey[:14]] += [i]\n", 171 | " else:\n", 172 | " inchikey_collection[inchikey[:14]] = [i]" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 6, 178 | "metadata": {}, 179 | "outputs": [ 180 | { 181 | "data": { 182 | "text/plain": [ 183 | "13717" 184 | ] 185 | }, 186 | "execution_count": 6, 187 | "metadata": {}, 188 | "output_type": "execute_result" 189 | } 190 | ], 191 | "source": [ 192 | "len(inchikey_collection)" 193 | ] 194 | }, 195 | { 196 | "cell_type": "markdown", 197 | "metadata": {}, 198 | "source": [ 199 | "#### Actual picking of the unique InchiKey spectra" 200 | ] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "execution_count": 31, 205 | "metadata": {}, 206 | "outputs": [], 207 | "source": [ 208 | "import numpy as np\n", 209 | "\n", 210 | "intensity_thres = 0.01\n", 211 | "n_peaks_required = 10\n", 212 | "ID_picks = []\n", 213 | "\n", 214 | "inchikey14_unique = [x for x in inchikey_collection.keys()]\n", 215 | "\n", 216 | "# Loop through all unique inchiques (considering 14 first characters)\n", 217 | "for inchikey14 in inchikey14_unique:\n", 218 | " specIDs = np.array(inchikey_collection[inchikey14])\n", 219 | " if specIDs.size == 1:\n", 220 | " ID_picks.append(specIDs[0])\n", 221 | " else:\n", 222 | " # Step 1 - select spectrum with sufficient peaks (e.g. 10 with intensity 0.01)\n", 223 | " num_peaks = np.array([count_higher_peaks(reference_spectrums_positive[specID], intensity_thres) for specID in specIDs])\n", 224 | " sufficient_peaks = np.where(num_peaks >= n_peaks_required)[0]\n", 225 | " if sufficient_peaks.size == 0:\n", 226 | " sufficient_peaks = np.where(num_peaks == max(num_peaks))[0]\n", 227 | " step1IDs = specIDs[sufficient_peaks]\n", 228 | "\n", 229 | " # Step 2 - select best spectrum qualities (according to gnps measure). 1 > 2 > 3\n", 230 | " qualities = np.array([int(reference_spectrums_positive[specID].get(\"library_class\")) for specID in step1IDs])\n", 231 | " step2IDs = step1IDs[np.where(qualities == min(qualities))[0]]\n", 232 | "\n", 233 | " # Step 3 Select the ones with most peaks > threshold\n", 234 | " num_peaks = np.array([count_higher_peaks(reference_spectrums_positive[specID], intensity_thres) for specID in step2IDs])\n", 235 | " pick = np.argmax(num_peaks)\n", 236 | " ID_picks.append(step2IDs[pick])" 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": 32, 242 | "metadata": {}, 243 | "outputs": [ 244 | { 245 | "data": { 246 | "text/plain": [ 247 | "13717" 248 | ] 249 | }, 250 | "execution_count": 32, 251 | "metadata": {}, 252 | "output_type": "execute_result" 253 | } 254 | ], 255 | "source": [ 256 | "#Check if indeed correct number of unique inchikeys:\n", 257 | "test_inchikeys14 = []\n", 258 | "for ID in ID_picks:\n", 259 | " test_inchikeys14.append(reference_spectrums_positive[ID].get(\"inchikey\")[:14])\n", 260 | " \n", 261 | "len(set(test_inchikeys14))" 262 | ] 263 | }, 264 | { 265 | "cell_type": "code", 266 | "execution_count": 40, 267 | "metadata": {}, 268 | "outputs": [], 269 | "source": [ 270 | "import json\n", 271 | "filename = os.path.join(path_data,'unique_inchikeys_positive_ionmode_IDs200519.json')\n", 272 | "with open(filename, 'w') as f:\n", 273 | " json.dump([int(x) for x in ID_picks], f)" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": 41, 279 | "metadata": {}, 280 | "outputs": [ 281 | { 282 | "data": { 283 | "text/plain": [ 284 | "13717" 285 | ] 286 | }, 287 | "execution_count": 41, 288 | "metadata": {}, 289 | "output_type": "execute_result" 290 | } 291 | ], 292 | "source": [ 293 | "spectrums_unique_inchikeys_positive = [reference_spectrums_positive[ID].clone() for ID in ID_picks]\n", 294 | "len(spectrums_unique_inchikeys_positive)" 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": 42, 300 | "metadata": {}, 301 | "outputs": [ 302 | { 303 | "name": "stdout", 304 | "output_type": "stream", 305 | "text": [ 306 | "Inchis: 13717 -- 13717 unique\n", 307 | "Smiles: 13717 -- 13674 unique\n", 308 | "Inchikeys: 13717 -- 13717 unique (first 14 characters)\n" 309 | ] 310 | } 311 | ], 312 | "source": [ 313 | "count_annotations(spectrums_unique_inchikeys_positive)" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 43, 319 | "metadata": {}, 320 | "outputs": [], 321 | "source": [ 322 | "from matchms.exporting import save_as_json\n", 323 | "\n", 324 | "filename = os.path.join(path_data,'gnps_positive_ionmode_unique_inchikey_cleaned_by_matchms_and_lookups.json')\n", 325 | "save_as_json(spectrums_unique_inchikeys_positive, filename)" 326 | ] 327 | } 328 | ], 329 | "metadata": { 330 | "kernelspec": { 331 | "display_name": "Python 3", 332 | "language": "python", 333 | "name": "python3" 334 | }, 335 | "language_info": { 336 | "codemirror_mode": { 337 | "name": "ipython", 338 | "version": 3 339 | }, 340 | "file_extension": ".py", 341 | "mimetype": "text/x-python", 342 | "name": "python", 343 | "nbconvert_exporter": "python", 344 | "pygments_lexer": "ipython3", 345 | "version": "3.7.6" 346 | } 347 | }, 348 | "nbformat": 4, 349 | "nbformat_minor": 2 350 | } 351 | -------------------------------------------------------------------------------- /notebooks/iomega-4-fingerprint-based-similarities.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Iomega workflow\n", 8 | "## Calculate molecular fingerprint based similarities\n", 9 | "Calculate all-vs-all similarity matrices for the data subset \"Unique InchiKeys\" (>12,000 spectra) using molecular fingerprints." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import os\n", 19 | "import sys\n", 20 | "\n", 21 | "path_data = os.path.join(os.path.dirname(os.getcwd()), 'data')" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "### Import pre-processed data subset \"Unique InchiKeys\"" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": {}, 35 | "outputs": [ 36 | { 37 | "name": "stdout", 38 | "output_type": "stream", 39 | "text": [ 40 | "number of spectra: 13717\n" 41 | ] 42 | } 43 | ], 44 | "source": [ 45 | "from matchms.importing import load_from_json\n", 46 | "\n", 47 | "filename = os.path.join(path_data,'gnps_positive_ionmode_unique_inchikey_cleaned_by_matchms_and_lookups.json')\n", 48 | "spectrums = load_from_json(filename)\n", 49 | "\n", 50 | "print(\"number of spectra:\", len(spectrums))" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "### Post-process spectra\n", 58 | "+ Normalize spectrum\n", 59 | "+ Remove peaks outside m/z ratios between 0 and 1000.0\n", 60 | "+ Discard spectra with less then 10 remaining peaks (to make it consistent with later spec2vec analysis)\n", 61 | "+ Remove peaks with relative intensity lower than 0.01" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 3, 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "from matchms.filtering import normalize_intensities\n", 71 | "from matchms.filtering import require_minimum_number_of_peaks\n", 72 | "from matchms.filtering import select_by_mz\n", 73 | "from matchms.filtering import select_by_relative_intensity" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 4, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "def post_process(s):\n", 83 | " s = normalize_intensities(s)\n", 84 | " s = select_by_mz(s, mz_from=0, mz_to=1000)\n", 85 | " s = require_minimum_number_of_peaks(s, n_required=10)\n", 86 | " s = select_by_relative_intensity(s, intensity_from=0.01, intensity_to=1.0)\n", 87 | " return s\n", 88 | "\n", 89 | "# apply filters to the data\n", 90 | "spectrums = [post_process(s) for s in spectrums]\n", 91 | "\n", 92 | "# omit spectrums that didn't qualify for analysis\n", 93 | "spectrums = [s for s in spectrums if s is not None]" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 5, 99 | "metadata": {}, 100 | "outputs": [ 101 | { 102 | "name": "stdout", 103 | "output_type": "stream", 104 | "text": [ 105 | "Remaining number of spectra: 12797\n" 106 | ] 107 | } 108 | ], 109 | "source": [ 110 | "print(\"Remaining number of spectra:\", len(spectrums))" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "## Derive molecular fingerprints\n", 118 | "+ Fingerprints will be derived from smiles if possible, otherwise from inchi\n", 119 | "+ Different fingerprint types can be selected: ``daylight``, ``morgan1``, ``morgan2``, ``morgan3`` (all using rdkit)\n", 120 | "+ vector size is specified with ``nbits``, here set to 2048\n", 121 | "--> will be used with ``Jaccard index``" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 6, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "from matchms.filtering.add_fingerprint import add_fingerprint\n", 131 | "\n", 132 | "spectrums = [add_fingerprint(s, fingerprint_type=\"daylight\", nbits=2048) for s in spectrums]" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 9, 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "name": "stdout", 142 | "output_type": "stream", 143 | "text": [ 144 | "543 no fingerprint\n", 145 | "1246 no fingerprint\n" 146 | ] 147 | } 148 | ], 149 | "source": [ 150 | "for i, spec1 in enumerate(spectrums):\n", 151 | " if spec1.get(\"fingerprint\") is None:\n", 152 | " print(i, \"no fingerprint\")\n", 153 | " elif spec1.get(\"fingerprint\").sum() < 1:\n", 154 | " print(i, \"weird\")\n", 155 | " " 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "metadata": {}, 161 | "source": [ 162 | "# Calculate similarity score matrices\n", 163 | "+ Similarities between all possible pairs of spectra will be calculated. This will give a similarity score matrix of size 12,797 x 12,797." 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": 10, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "from matchms.similarity import FingerprintSimilarity\n", 173 | "\n", 174 | "similarity_measure = FingerprintSimilarity(similarity_measure=\"jaccard\")\n", 175 | "scores_mol_similarity = similarity_measure.matrix(spectrums, spectrums)\n", 176 | "\n", 177 | "#start 22:42 end 22:49\n", 178 | "import numpy as np\n", 179 | "filename = os.path.join(path_data, \"similarities_daylight2048_jaccard.npy\")\n", 180 | "np.save(filename, scores_mol_similarity)" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": 31, 186 | "metadata": {}, 187 | "outputs": [ 188 | { 189 | "name": "stdout", 190 | "output_type": "stream", 191 | "text": [ 192 | "40 \n", 193 | "\n", 194 | "-------------------- CC[C@@]1([C@H](C(=C2C(=CC3=C2C(=C4C(=O)C=CC(=O)C4=C3O)O)[C@H]1O[C@@H]5C[C@@H]([C@H]([C@@H](O5)C)N(C)C)O)O)[C@H]6C(=O)C7=C([C@H]([C@@]6(CC)O[C@@H]8C[C@H]([C@@H]([C@H](O8)C)O)OC)O[C@@H]9C[C@@H]([C@H]([C@@H](O9)C)N(C)C)O)C(=[N+]=[N-])C1=C(C2=C(C(=O)C=CC2=O)C(=C71)O)O)O[C@@H]1C[C@H]([C@@H]([C@H](O1)C)O)OC\n", 195 | "-------------------- InChI=1S/C68H82N4O24/c1-13-67(95-40-22-36(87-11)57(79)26(5)91-40)52(63(85)43-29(65(67)93-38-20-34(77)55(71(7)8)24(3)89-38)19-28-42(43)60(82)45-31(74)16-15-30(73)44(45)59(28)81)53-64(86)49-48-50(62(84)47-33(76)18-17-32(75)46(47)61(48)83)54(70-69)51(49)66(94-39-21-35(78)56(72(9)10)25(4)90-39)68(53,14-2)96-41-23-37(88-12)58(80)27(6)92-41/h15-19,24-27,34-41,52-53,55-58,65-66,77-85H,13-14,20-23H2,1-12H3/t24-,25-,26+,27+,34-,35-,36+,37+,38+,39+,40+,41+,52-,53-,55-,56-,57+,58+,65+,66+,67-,68-/m0/s1\n", 196 | "---------- AVVUVGNOUBNZKQ\n", 197 | "---------- NZZSDJHUISSTSC\n", 198 | "294 \n", 199 | "\n", 200 | "-------------------- CC[C@@]1([C@H](C(=C2C(=CC3=C2C(=C4C(=O)C=CC(=O)C4=C3O)O)[C@H]1O[C@@H]5C[C@@H]([C@H]([C@@H](O5)C)N(C)C)O)O)[C@H]6C(=O)C7=C([C@H]([C@@]6(CC)O[C@@H]8C[C@H]([C@@H]([C@H](O8)C)OC)OC)O[C@@H]9C[C@@H]([C@H]([C@@H](O9)C)N(C)C)O)C(=[N+]=[N-])C1=C(C2=C(C(=O)C=CC2=O)C(=C71)O)O)O[C@@H]1C[C@H]([C@@H]([C@H](O1)C)O)OC\n", 201 | "-------------------- InChI=1S/C69H84N4O24/c1-14-68(96-41-23-37(87-11)58(80)27(5)92-41)53(63(85)44-30(66(68)94-39-21-35(78)56(72(7)8)25(3)90-39)20-29-43(44)60(82)46-32(75)17-16-31(74)45(46)59(29)81)54-64(86)50-49-51(62(84)48-34(77)19-18-33(76)47(48)61(49)83)55(71-70)52(50)67(95-40-22-36(79)57(73(9)10)26(4)91-40)69(54,15-2)97-42-24-38(88-12)65(89-13)28(6)93-42/h16-20,25-28,35-42,53-54,56-58,65-67,78-85H,14-15,21-24H2,1-13H3/t25-,26-,27+,28+,35-,36-,37+,38+,39+,40+,41+,42+,53-,54-,56-,57-,58+,65+,66+,67+,68-,69-/m0/s1\n", 202 | "---------- DNWGQXWLEKIRHJ\n", 203 | "---------- QJVZZPREOYBOFG\n", 204 | "295 \n", 205 | "\n", 206 | "-------------------- CC[C@@]1([C@H](C(=C2C(=CC3=C2C(=C4C(=O)C=CC(=O)C4=C3O)O)[C@H]1O[C@@H]5C[C@@H]([C@H]([C@@H](O5)C)N(C)C)O)O)[C@H]6C(=O)C7=C([C@H]([C@@]6(CC)O[C@H]8C[C@@H]([C@H]([C@@H](O8)C)OC)OC)O[C@@H]9C[C@@H]([C@H]([C@@H](O9)C)N(C)C)O)C(=[N+]=[N-])C1=C(C2=C(C(=O)C=CC2=O)C(=C71)O)O)O[C@H]1C[C@@H]([C@H]([C@@H](O1)C)OC)OC\n", 207 | "-------------------- InChI=1S/C70H86N4O24/c1-15-69(97-42-24-38(87-11)65(89-13)28(5)93-42)54(63(85)45-31(67(69)95-40-22-36(79)57(73(7)8)26(3)91-40)21-30-44(45)60(82)47-33(76)18-17-32(75)46(47)59(30)81)55-64(86)51-50-52(62(84)49-35(78)20-19-34(77)48(49)61(50)83)56(72-71)53(51)68(96-41-23-37(80)58(74(9)10)27(4)92-41)70(55,16-2)98-43-25-39(88-12)66(90-14)29(6)94-43/h17-21,26-29,36-43,54-55,57-58,65-68,79-85H,15-16,22-25H2,1-14H3/t26-,27-,28-,29-,36-,37-,38-,39-,40+,41+,42-,43-,54-,55-,57-,58-,65-,66-,67+,68+,69-,70-/m0/s1\n", 208 | "---------- UOWMHMSOUAAHTP\n", 209 | "---------- UUFARPZDPLRTPW\n", 210 | "958 \n", 211 | "\n", 212 | "-------------------- O=C1OCC(=O)N2NCCCC2C(=O)NCC(=O)N2C(C(=O)N3C(C(=O)NC1)CCCN3)CCCN2\n", 213 | "-------------------- InChI=1S/C31H47ClN8O9/c1-18(2)8-5-4-6-9-21-28(46)40-24(12-19(32)14-34-40)29(47)39-22(10-7-11-33-39)27(45)37-31(3,17-41)30(48)49-16-25(43)38-23(26(44)36-21)13-20(42)15-35-38/h4-6,8,18-24,33-35,41-42H,7,9-17H2,1-3H3,(H,36,44)(H,37,45)\n", 214 | "---------- JQWUSEVRGIXWTP\n", 215 | "---------- WZHWJPRBJXEKMW\n", 216 | "964 \n", 217 | "\n", 218 | "-------------------- CC[C@]1([C@@H](c2[c-](c3c(c2C(=O)[C@@H]1[C@H]4C(=O)c5c([c-](c6c5C(=O)c7c(ccc(c7C6=O)O)O)[N+]#N)[C@H]([C@@]4(CC)O[C@H]8C[C@@H]([C@H]([C@@H](O8)C)O)OC)O[C@H]9C[C@H]([C@@H]([C@H](O9)C)N(C)C)O)C(=O)c1c(ccc(c1C3=O)O)O)[N+]#N)O[C@H]1C[C@H]([C@@H]([C@H](O1)C)N(C)C)O)O[C@H]1C[C@@H]([C@H]([C@@H](O1)C)O)OC\n", 219 | "-------------------- InChI=1S/C68H80N6O24/c1-13-67(97-37-21-33(89-11)57(81)25(5)93-37)51(63(87)45-43-47(61(85)41-29(77)17-15-27(75)39(41)59(43)83)53(71-69)49(45)65(67)95-35-19-31(79)55(73(7)8)23(3)91-35)52-64(88)46-44-48(62(86)42-30(78)18-16-28(76)40(42)60(44)84)54(72-70)50(46)66(96-36-20-32(80)56(74(9)10)24(4)92-36)68(52,14-2)98-38-22-34(90-12)58(82)26(6)94-38/h15-18,23-26,31-38,51-52,55-58,65-66,75-82H,13-14,19-22H2,1-12H3/t23-,24-,25+,26+,31-,32-,33+,34+,35+,36+,37+,38+,51+,52+,55-,56-,57+,58+,65-,66-,67+,68+/m1/s1\n", 220 | "---------- PNIOKHKXUGHMAV\n", 221 | "---------- VGDUHSQTOJRQNB\n", 222 | "965 \n", 223 | "\n", 224 | "-------------------- CC1OC(CC(O7)=O)C7C(C(C3=C2C(O)=C(C4=CC(O)=C(C(C(C(O8)C(CC8=O)OC6C)=C6C5=O)=O)C5=C4O)C=C3O)=O)=C1C2=O\n", 225 | "-------------------- InChI=1S/C32H22O14/c1-7-17-23(31-13(43-7)5-15(35)45-31)29(41)19-11(33)3-9(25(37)21(19)27(17)39)10-4-12(34)20-22(26(10)38)28(40)18-8(2)44-14-6-16(36)46-32(14)24(18)30(20)42/h3-4,7-8,13-14,31-32,39-42H,5-6H2,1-2H3\n", 226 | "---------- QCDBYUUEYOONDY\n", 227 | "---------- FYMVLOXRVLSINK\n", 228 | "1247 \n", 229 | "\n", 230 | "-------------------- CCC=CCc1nc2ccccc2c(O)c1C\n", 231 | "-------------------- InChI=1S/C14H15NO/c1-2-3-4-7-11-10-14(16)12-8-5-6-9-13(12)15-11/h3-6,8-10H,2,7H2,1H3,(H,15,16)\n", 232 | "---------- KRQABMVYTPDCNA\n", 233 | "---------- STKKKXHSAFYHBA\n", 234 | "4859 \n", 235 | "\n", 236 | "-------------------- COC1=CC2=C(C=C1)C3=C(N2)C(C)=NCC3\n", 237 | "-------------------- InChI=1S/C13H14N2O/c1-8-13-11(5-6-14-8)10-4-3-9(16-2)7-12(10)15-13/h3-4,7,14H,5-6H2,1-2H3\n", 238 | "---------- QJOZJXNKVMFAET\n", 239 | "---------- RERZNCLIYCABFS\n", 240 | "4908 \n", 241 | "\n", 242 | "-------------------- COC1=C(O)C2=C3C(C4=C(C2=O)C(C)(C)C(C)O4)=C(C)C=C(O)C3=C1O\n", 243 | "-------------------- InChI=1S/C20H20O6/c1-7-6-9(21)11-12-10(7)18-14(20(3,4)8(2)26-18)15(22)13(12)17(24)19(25-5)16(11)23/h6,8,22-24H,1-5H3\n", 244 | "---------- KMPAOJFBQSXEAI\n", 245 | "---------- JLAHJKGDLZKPQY\n", 246 | "5102 \n", 247 | "\n", 248 | "-------------------- CCC(=O)C(=O)C1=C(O)C=C(OC1=O)\\C=C\\C\n", 249 | "-------------------- InChI=1S/C12H12O5/c1-3-5-7-6-9(14)10(12(16)17-7)11(15)8(13)4-2/h3,5-6,15H,4H2,1-2H3/b5-3+,11-10?\n", 250 | "---------- CDTMNHHLRVCBBE\n", 251 | "---------- CEVGSSZDDUFFQW\n", 252 | "5226 \n", 253 | "\n", 254 | "-------------------- COC(=O)C12OC3=C(C(=O)C1=C(O)CCC2O)C(O)=C4C5C=CC6(CC4=C3)C(=O)C7=C(C(O)=CC(C)=C7)C(O)=C6C5=O\n", 255 | "-------------------- InChI=1S/C31H24O11/c1-11-7-14-20(16(33)8-11)26(37)23-24(35)13-5-6-30(23,28(14)39)10-12-9-17-21(25(36)19(12)13)27(38)22-15(32)3-4-18(34)31(22,42-17)29(40)41-2/h5-9,13,18,33-34,36-38H,3-4,10H2,1-2H3\n", 256 | "---------- XCWGCTNGDUDAMO\n", 257 | "---------- KIWTVJVARRPBPR\n", 258 | "5423 \n", 259 | "\n", 260 | "-------------------- COC1=C(C(=CC(C)=C1)C(O)=O)C2=C(O)C(=O)C3=C(C=CC=C3O)C2=O\n", 261 | "-------------------- InChI=1S/C19H14O7/c1-8-6-10(19(24)25)14(12(7-8)26-2)15-16(21)9-4-3-5-11(20)13(9)17(22)18(15)23/h3-7,20-21H,1-2H3,(H,24,25)\n", 262 | "---------- CPRXVUOMCYXPHA\n", 263 | "---------- PBKZJIMGHNPKBJ\n", 264 | "5695 \n", 265 | "\n", 266 | "-------------------- COC1=C(SC)C(\\C=N\\O)=NC(=C1)C2=NC=CC=C2\n", 267 | "-------------------- InChI=1S/C13H13N3O2S/c1-18-12-7-10(9-5-3-4-6-14-9)16-11(8-15-17)13(12)19-2/h3-8,16H,1-2H3\n", 268 | "---------- FXPSZLHFJDHOMI\n", 269 | "---------- NQGMIPUYCWIEAW\n", 270 | "5719 \n", 271 | "\n", 272 | "-------------------- CCCC(=O)C1=C(O)C(C)=C(O)C(CC2=C(O)C(C)(C)C(O)=C(C(C)=O)C2=O)=C1O\n", 273 | "-------------------- InChI=1S/C22H26O8/c1-6-7-13(24)15-17(26)9(2)16(25)11(18(15)27)8-12-19(28)14(10(3)23)21(30)22(4,5)20(12)29/h25-29H,6-8H2,1-5H3\n", 274 | "---------- PPRFIMGXDRYLGD\n", 275 | "---------- XRWVZSPWRNDJNS\n", 276 | "8445 \n", 277 | "\n", 278 | "-------------------- CNC(=O)OC1=CC=CC(=C1)N=CN(C)C\n", 279 | "-------------------- InChI=1S/C11H15N3O2.ClH/c1-12-11(15)16-10-6-4-5-9(7-10)13-8-14(2)3;/h4-8H,1-3H3,(H,12,15);1H/b13-8+;\n", 280 | "---------- MYPKGPZHHQEODQ\n", 281 | "---------- RMFNNCGOSPBBAD\n", 282 | "10711 \n", 283 | "\n", 284 | "-------------------- CC1C2C(CC(=C3C(C2OC1=O)C(=CC3=O)C)C)O.O\n", 285 | "-------------------- InChI=1S/C15H18O4/c1-6-4-10(17)13-8(3)15(18)19-14(13)12-7(2)5-9(16)11(6)12/h5,8,10,12-14,17H,4H2,1-3H3\n", 286 | "---------- YMUOZXZDDBRJEP\n", 287 | "---------- ADUHAEBXPKBNDK\n", 288 | "11145 \n", 289 | "\n", 290 | "-------------------- [Cl-1]\n", 291 | "-------------------- InChI=1S/C25H30N3.ClH/c1-26(2)22-13-7-19(8-14-22)25(20-9-15-23(16-10-20)27(3)4)21-11-17-24(18-12-21)28(5)6;/h7-18H,1-6H3;1H/q+1;/p-1\n", 292 | "---------- ZXJXZNDDNMQXFV\n", 293 | "---------- VEXZGXHMUGYJMC\n", 294 | "12656 \n", 295 | "\n", 296 | "-------------------- CCC1=C(C2=NC1=CC3=C(C4=C([N-]3)C(=C5C(C(C(=N5)C=C6C(=C(C(=C2)[N-]6)C=C)C)C)CCC(=O)OCC=C(C)CCCC(C)CCCC(C)CCCC(C)C)C(C4=O)C(=O)OC)C)C.[Mg+2]\n", 297 | "-------------------- InChI=1S/C55H73N4O5.Mg/c1-13-39-35(8)42-28-44-37(10)41(24-25-48(60)64-27-26-34(7)23-17-22-33(6)21-16-20-32(5)19-15-18-31(3)4)52(58-44)50-51(55(62)63-12)54(61)49-38(11)45(59-53(49)50)30-47-40(14-2)36(9)43(57-47)29-46(39)56-42;/h13,26,28-33,37,41,51H,1,14-25,27H2,2-12H3,(H-,56,57,58,59,61);/q-1;+2/p-1\n", 298 | "---------- SYHZSPMCAROBPW\n", 299 | "---------- ATNHDLDRLWWWCB\n", 300 | "12657 \n", 301 | "\n", 302 | "-------------------- CCC1=C(C2=NC1=CC3=C(C4=C([N-]3)C(=C5C(C(C(=N5)C=C6C(=C(C(=C2)[N-]6)C=C)C)C)CCC(=O)OCC=C(C)CCCC(C)CCCC(C)CCCC(C)C)C(C4=O)C(=O)OC)C)C=O.[Mg+2]\n", 303 | "-------------------- InChI=1S/C55H71N4O6.Mg/c1-12-38-35(8)42-27-43-36(9)40(23-24-48(61)65-26-25-34(7)22-16-21-33(6)20-15-19-32(5)18-14-17-31(3)4)52(58-43)50-51(55(63)64-11)54(62)49-37(10)44(59-53(49)50)28-46-39(13-2)41(30-60)47(57-46)29-45(38)56-42;/h12,25,27-33,36,40,51H,1,13-24,26H2,2-11H3,(H-,56,57,58,59,60,62);/q-1;+2/p-1\n", 304 | "---------- MSLKMRUEVOYOOZ\n", 305 | "---------- NSMUHPMZFPKNMZ\n" 306 | ] 307 | } 308 | ], 309 | "source": [ 310 | "for i, spec in enumerate(spectrums):\n", 311 | " inchikey_smiles = None\n", 312 | " inchikey_inchi = None\n", 313 | " inchi = spec.get(\"inchi\")\n", 314 | " if inchi:\n", 315 | " inchikey_inchi = mol_converter(inchi, \"inchi\", \"inchikey\")\n", 316 | " smiles = spec.get(\"smiles\")\n", 317 | " if smiles:\n", 318 | " inchikey_smiles = mol_converter(smiles, \"smiles\", \"inchikey\")\n", 319 | " if inchikey_inchi and inchikey_smiles:\n", 320 | " if not inchikey_inchi[:14] == inchikey_smiles[:14]:\n", 321 | " print(i, \"\\n\")\n", 322 | " print(10* \"--\", smiles)\n", 323 | " print(10* \"--\", inchi)\n", 324 | " print(5* \"--\", inchikey_inchi[:14])\n", 325 | " print(5* \"--\", inchikey_smiles[:14])" 326 | ] 327 | }, 328 | { 329 | "cell_type": "markdown", 330 | "metadata": {}, 331 | "source": [ 332 | "## Derive different type of molecular fingerprints\n", 333 | "+ Here: ``morgan3``\n", 334 | "+ Then using ``Dice Similarity Coefficient``" 335 | ] 336 | }, 337 | { 338 | "cell_type": "code", 339 | "execution_count": 11, 340 | "metadata": {}, 341 | "outputs": [], 342 | "source": [ 343 | "spectrums = [add_fingerprint(s, fingerprint_type=\"morgan3\", nbits=2048) for s in spectrums]" 344 | ] 345 | }, 346 | { 347 | "cell_type": "markdown", 348 | "metadata": {}, 349 | "source": [ 350 | "### Calculate similarity score matrices" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": 12, 356 | "metadata": {}, 357 | "outputs": [], 358 | "source": [ 359 | "similarity_measure = FingerprintSimilarity(similarity_measure=\"jaccard\")\n", 360 | "scores_mol_similarity = similarity_measure.matrix(spectrums, spectrums)\n", 361 | "\n", 362 | "import numpy as np\n", 363 | "filename = os.path.join(path_data, \"similarities_morgan3_2048_dice.npy\")\n", 364 | "np.save(filename, scores_mol_similarity)" 365 | ] 366 | } 367 | ], 368 | "metadata": { 369 | "kernelspec": { 370 | "display_name": "Python 3", 371 | "language": "python", 372 | "name": "python3" 373 | }, 374 | "language_info": { 375 | "codemirror_mode": { 376 | "name": "ipython", 377 | "version": 3 378 | }, 379 | "file_extension": ".py", 380 | "mimetype": "text/x-python", 381 | "name": "python", 382 | "nbconvert_exporter": "python", 383 | "pygments_lexer": "ipython3", 384 | "version": "3.7.6" 385 | } 386 | }, 387 | "nbformat": 4, 388 | "nbformat_minor": 2 389 | } 390 | -------------------------------------------------------------------------------- /notebooks/iomega-6-compute-spec2vec-similarities.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Compute spec2vec similarities on mass spectra dataset" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import os\n", 17 | "import sys\n", 18 | "import gensim\n", 19 | "import numpy as np\n", 20 | "\n", 21 | "ROOT = os.path.dirname(os.getcwd())\n", 22 | "#path_data = os.path.join(ROOT, 'data')\n", 23 | "path_data = 'C:\\\\OneDrive - Netherlands eScience Center\\\\Project_Wageningen_iOMEGA\\\\matchms\\\\data\\\\'\n", 24 | "sys.path.insert(0, ROOT)" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "### Import pre-processed dataset \"Unique InchiKeys\"" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 2, 37 | "metadata": {}, 38 | "outputs": [ 39 | { 40 | "name": "stdout", 41 | "output_type": "stream", 42 | "text": [ 43 | "number of spectra: 13717\n" 44 | ] 45 | } 46 | ], 47 | "source": [ 48 | "from matchms.importing import load_from_json\n", 49 | "\n", 50 | "filename = os.path.join(path_data,'gnps_positive_ionmode_unique_inchikey_cleaned_by_matchms_and_lookups.json')\n", 51 | "spectrums = load_from_json(filename)\n", 52 | "\n", 53 | "print(\"number of spectra:\", len(spectrums))" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "### Post-processing of data" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 3, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "from matchms.filtering import normalize_intensities\n", 70 | "from matchms.filtering import require_minimum_number_of_peaks\n", 71 | "from matchms.filtering import select_by_mz\n", 72 | "from matchms.filtering import select_by_relative_intensity\n", 73 | "from matchms.filtering import reduce_to_number_of_peaks\n", 74 | "from matchms.filtering import add_losses" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 4, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "def post_process(s):\n", 84 | " s = normalize_intensities(s)\n", 85 | " s = select_by_mz(s, mz_from=0, mz_to=1000)\n", 86 | " s = require_minimum_number_of_peaks(s, n_required=10)\n", 87 | " s = reduce_to_number_of_peaks(s, n_required=10, ratio_desired=0.5)\n", 88 | " if s is None:\n", 89 | " return None\n", 90 | " s_remove_low_peaks = select_by_relative_intensity(s, intensity_from=0.001)\n", 91 | " if len(s_remove_low_peaks.peaks) >= 10:\n", 92 | " s = s_remove_low_peaks\n", 93 | " \n", 94 | " s = add_losses(s, loss_mz_from=5.0, loss_mz_to=200.0)\n", 95 | " return s\n", 96 | "\n", 97 | "# apply post processing steps to the data\n", 98 | "spectrums_postprocessed = [post_process(s) for s in spectrums]\n", 99 | "\n", 100 | "# omit spectrums that didn't qualify for analysis\n", 101 | "spectrums_postprocessed = [s for s in spectrums_postprocessed if s is not None]" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "metadata": {}, 107 | "source": [ 108 | "### Load pretrained spec2vec model" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": 5, 114 | "metadata": {}, 115 | "outputs": [ 116 | { 117 | "ename": "FileNotFoundError", 118 | "evalue": "[Errno 2] No such file or directory: 'C:\\\\OneDrive - Netherlands eScience Center\\\\Project_Wageningen_iOMEGA\\\\matchms\\\\data\\\\trained_models\\\\spec2vec_UniqueInchikeys_ratio05_filtered_201101_iter_15.model'", 119 | "output_type": "error", 120 | "traceback": [ 121 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", 122 | "\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", 123 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;31m# Load pretrained model\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgensim\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodels\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mWord2Vec\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel_file\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", 124 | "\u001b[1;32m~\\Anaconda3\\envs\\matchms-dev\\lib\\site-packages\\gensim\\models\\word2vec.py\u001b[0m in \u001b[0;36mload\u001b[1;34m(cls, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1139\u001b[0m \"\"\"\n\u001b[0;32m 1140\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1141\u001b[1;33m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mWord2Vec\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcls\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1142\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1143\u001b[0m \u001b[1;31m# for backward compatibility for `max_final_vocab` feature\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 125 | "\u001b[1;32m~\\Anaconda3\\envs\\matchms-dev\\lib\\site-packages\\gensim\\models\\base_any2vec.py\u001b[0m in \u001b[0;36mload\u001b[1;34m(cls, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1228\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1229\u001b[0m \"\"\"\n\u001b[1;32m-> 1230\u001b[1;33m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mBaseWordEmbeddingsModel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcls\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1231\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'ns_exponent'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1232\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mns_exponent\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0.75\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 126 | "\u001b[1;32m~\\Anaconda3\\envs\\matchms-dev\\lib\\site-packages\\gensim\\models\\base_any2vec.py\u001b[0m in \u001b[0;36mload\u001b[1;34m(cls, fname_or_handle, **kwargs)\u001b[0m\n\u001b[0;32m 600\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 601\u001b[0m \"\"\"\n\u001b[1;32m--> 602\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mBaseAny2VecModel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcls\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname_or_handle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 603\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 604\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0msave\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfname_or_handle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 127 | "\u001b[1;32m~\\Anaconda3\\envs\\matchms-dev\\lib\\site-packages\\gensim\\utils.py\u001b[0m in \u001b[0;36mload\u001b[1;34m(cls, fname, mmap)\u001b[0m\n\u001b[0;32m 433\u001b[0m \u001b[0mcompress\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msubname\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mSaveLoad\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_adapt_by_suffix\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 434\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 435\u001b[1;33m \u001b[0mobj\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0munpickle\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 436\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_load_specials\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmmap\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcompress\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msubname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 437\u001b[0m \u001b[0mlogger\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"loaded %s\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 128 | "\u001b[1;32m~\\Anaconda3\\envs\\matchms-dev\\lib\\site-packages\\gensim\\utils.py\u001b[0m in \u001b[0;36munpickle\u001b[1;34m(fname)\u001b[0m\n\u001b[0;32m 1393\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1394\u001b[0m \"\"\"\n\u001b[1;32m-> 1395\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'rb'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1396\u001b[0m \u001b[1;31m# Because of loading from S3 load can't be used (missing readline in smart_open)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1397\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0msys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mversion_info\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 129 | "\u001b[1;32m~\\Anaconda3\\envs\\matchms-dev\\lib\\site-packages\\smart_open\\smart_open_lib.py\u001b[0m in \u001b[0;36mopen\u001b[1;34m(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)\u001b[0m\n\u001b[0;32m 187\u001b[0m \u001b[0mbuffering\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mbuffering\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 188\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mencoding\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 189\u001b[1;33m \u001b[0merrors\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0merrors\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 190\u001b[0m )\n\u001b[0;32m 191\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mfobj\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 130 | "\u001b[1;32m~\\Anaconda3\\envs\\matchms-dev\\lib\\site-packages\\smart_open\\smart_open_lib.py\u001b[0m in \u001b[0;36m_shortcut_open\u001b[1;34m(uri, mode, ignore_ext, buffering, encoding, errors)\u001b[0m\n\u001b[0;32m 360\u001b[0m \u001b[0mopen_kwargs\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'errors'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 361\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 362\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0m_builtin_open\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlocal_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbuffering\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mbuffering\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mopen_kwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 363\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 364\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", 131 | "\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'C:\\\\OneDrive - Netherlands eScience Center\\\\Project_Wageningen_iOMEGA\\\\matchms\\\\data\\\\trained_models\\\\spec2vec_UniqueInchikeys_ratio05_filtered_201101_iter_15.model'" 132 | ] 133 | } 134 | ], 135 | "source": [ 136 | "path_models = os.path.join(path_data, \"trained_models\")\n", 137 | "model_file = os.path.join(path_models, \"spec2vec_UniqueInchikeys_ratio05_filtered_iter_50.model\")\n", 138 | "\n", 139 | "# Load pretrained model\n", 140 | "model = gensim.models.Word2Vec.load(model_file)" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 6, 146 | "metadata": {}, 147 | "outputs": [], 148 | "source": [ 149 | "from spec2vec import Spec2Vec\n", 150 | "from spec2vec import SpectrumDocument " 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 11, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "documents = [SpectrumDocument(s, n_decimals=2) for s in spectrums_postprocessed]" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 12, 165 | "metadata": {}, 166 | "outputs": [ 167 | { 168 | "data": { 169 | "text/plain": [ 170 | "['peak@289.29',\n", 171 | " 'peak@295.55',\n", 172 | " 'peak@298.49',\n", 173 | " 'peak@317.32',\n", 174 | " 'peak@319.66',\n", 175 | " 'peak@324.48',\n", 176 | " 'peak@325.32',\n", 177 | " 'peak@339.79',\n", 178 | " 'peak@343.95',\n", 179 | " 'peak@347.02',\n", 180 | " 'peak@347.91',\n", 181 | " 'peak@361.15',\n", 182 | " 'peak@361.84',\n", 183 | " 'peak@364.23',\n", 184 | " 'peak@364.86',\n", 185 | " 'peak@365.85',\n", 186 | " 'peak@368.22',\n", 187 | " 'peak@368.97',\n", 188 | " 'peak@375.07',\n", 189 | " 'peak@375.75',\n", 190 | " 'peak@382.75',\n", 191 | " 'peak@384.20',\n", 192 | " 'peak@390.57',\n", 193 | " 'peak@394.05',\n", 194 | " 'peak@397.11',\n", 195 | " 'peak@404.42',\n", 196 | " 'peak@411.09',\n", 197 | " 'peak@413.78',\n", 198 | " 'peak@427.67',\n", 199 | " 'peak@436.19',\n", 200 | " 'peak@443.27',\n", 201 | " 'peak@446.27',\n", 202 | " 'peak@447.75',\n", 203 | " 'peak@455.25',\n", 204 | " 'peak@456.11',\n", 205 | " 'peak@457.54',\n", 206 | " 'peak@464.29',\n", 207 | " 'peak@469.87',\n", 208 | " 'peak@471.06',\n", 209 | " 'peak@475.26',\n", 210 | " 'peak@476.14',\n", 211 | " 'peak@476.98',\n", 212 | " 'peak@478.89',\n", 213 | " 'peak@479.98',\n", 214 | " 'peak@483.24',\n", 215 | " 'peak@487.21',\n", 216 | " 'peak@488.16',\n", 217 | " 'peak@491.19',\n", 218 | " 'peak@494.28',\n", 219 | " 'peak@495.65',\n", 220 | " 'peak@498.41',\n", 221 | " 'peak@503.03',\n", 222 | " 'peak@504.34',\n", 223 | " 'peak@505.15',\n", 224 | " 'peak@510.18',\n", 225 | " 'peak@512.17',\n", 226 | " 'peak@513.27',\n", 227 | " 'peak@514.96',\n", 228 | " 'peak@515.92',\n", 229 | " 'peak@520.97',\n", 230 | " 'peak@521.82',\n", 231 | " 'peak@523.17',\n", 232 | " 'peak@529.04',\n", 233 | " 'peak@530.99',\n", 234 | " 'peak@532.38',\n", 235 | " 'peak@534.58',\n", 236 | " 'peak@538.00',\n", 237 | " 'peak@539.22',\n", 238 | " 'peak@540.67',\n", 239 | " 'peak@548.06',\n", 240 | " 'peak@554.12',\n", 241 | " 'peak@556.03',\n", 242 | " 'peak@557.29',\n", 243 | " 'peak@558.00',\n", 244 | " 'peak@559.94',\n", 245 | " 'peak@561.33',\n", 246 | " 'peak@564.12',\n", 247 | " 'peak@564.95',\n", 248 | " 'peak@566.44',\n", 249 | " 'peak@571.33',\n", 250 | " 'peak@572.05',\n", 251 | " 'peak@575.22',\n", 252 | " 'peak@577.10',\n", 253 | " 'peak@579.65',\n", 254 | " 'peak@580.94',\n", 255 | " 'peak@582.11',\n", 256 | " 'peak@583.46',\n", 257 | " 'peak@585.24',\n", 258 | " 'peak@598.17',\n", 259 | " 'peak@599.35',\n", 260 | " 'peak@600.38',\n", 261 | " 'peak@602.27',\n", 262 | " 'peak@609.30',\n", 263 | " 'peak@613.42',\n", 264 | " 'peak@622.21',\n", 265 | " 'peak@623.02',\n", 266 | " 'peak@623.99',\n", 267 | " 'peak@625.22',\n", 268 | " 'peak@638.30',\n", 269 | " 'peak@640.27',\n", 270 | " 'peak@641.24',\n", 271 | " 'peak@646.10',\n", 272 | " 'peak@649.28',\n", 273 | " 'peak@651.53',\n", 274 | " 'peak@657.13',\n", 275 | " 'peak@658.09',\n", 276 | " 'peak@659.42',\n", 277 | " 'peak@663.39',\n", 278 | " 'peak@668.33',\n", 279 | " 'peak@669.36',\n", 280 | " 'peak@680.22',\n", 281 | " 'peak@681.99',\n", 282 | " 'peak@685.96',\n", 283 | " 'peak@691.65',\n", 284 | " 'peak@693.23',\n", 285 | " 'peak@694.31',\n", 286 | " 'peak@696.33',\n", 287 | " 'peak@697.13',\n", 288 | " 'peak@709.46',\n", 289 | " 'peak@710.80',\n", 290 | " 'peak@711.74',\n", 291 | " 'peak@714.07',\n", 292 | " 'peak@715.58',\n", 293 | " 'peak@723.27',\n", 294 | " 'peak@724.08',\n", 295 | " 'peak@725.49',\n", 296 | " 'peak@728.35',\n", 297 | " 'peak@735.81',\n", 298 | " 'peak@738.35',\n", 299 | " 'peak@744.37',\n", 300 | " 'peak@747.46',\n", 301 | " 'peak@753.27',\n", 302 | " 'peak@761.61',\n", 303 | " 'peak@764.46',\n", 304 | " 'peak@765.28',\n", 305 | " 'peak@769.28',\n", 306 | " 'peak@770.33',\n", 307 | " 'peak@771.39',\n", 308 | " 'peak@787.43',\n", 309 | " 'peak@796.14',\n", 310 | " 'peak@797.23',\n", 311 | " 'peak@806.56',\n", 312 | " 'peak@808.44',\n", 313 | " 'peak@811.64',\n", 314 | " 'peak@812.30',\n", 315 | " 'peak@813.15',\n", 316 | " 'peak@817.22',\n", 317 | " 'peak@820.27',\n", 318 | " 'peak@821.29',\n", 319 | " 'peak@823.36',\n", 320 | " 'peak@824.62',\n", 321 | " 'peak@828.52',\n", 322 | " 'peak@830.41',\n", 323 | " 'peak@831.31',\n", 324 | " 'peak@832.11',\n", 325 | " 'peak@833.18',\n", 326 | " 'peak@835.21',\n", 327 | " 'peak@836.08',\n", 328 | " 'peak@838.52',\n", 329 | " 'peak@839.46',\n", 330 | " 'peak@845.61',\n", 331 | " 'peak@847.43',\n", 332 | " 'peak@848.13',\n", 333 | " 'peak@851.38',\n", 334 | " 'peak@852.37',\n", 335 | " 'peak@853.27',\n", 336 | " 'peak@865.60',\n", 337 | " 'peak@866.30',\n", 338 | " 'peak@867.19',\n", 339 | " 'peak@868.37',\n", 340 | " 'peak@869.33',\n", 341 | " 'peak@871.56',\n", 342 | " 'peak@877.14',\n", 343 | " 'peak@880.22',\n", 344 | " 'peak@883.44',\n", 345 | " 'peak@888.17',\n", 346 | " 'peak@889.28',\n", 347 | " 'peak@892.13',\n", 348 | " 'peak@893.47',\n", 349 | " 'peak@895.61',\n", 350 | " 'peak@899.01',\n", 351 | " 'peak@901.35',\n", 352 | " 'peak@902.33',\n", 353 | " 'peak@909.42',\n", 354 | " 'peak@910.52',\n", 355 | " 'peak@911.53',\n", 356 | " 'peak@914.31',\n", 357 | " 'peak@915.22',\n", 358 | " 'peak@918.67',\n", 359 | " 'peak@919.40',\n", 360 | " 'peak@921.12',\n", 361 | " 'peak@922.21',\n", 362 | " 'peak@925.06',\n", 363 | " 'peak@931.13',\n", 364 | " 'peak@932.35',\n", 365 | " 'peak@933.52',\n", 366 | " 'peak@935.49',\n", 367 | " 'peak@936.55',\n", 368 | " 'peak@937.59',\n", 369 | " 'peak@938.47',\n", 370 | " 'peak@939.62',\n", 371 | " 'peak@946.26',\n", 372 | " 'peak@949.37',\n", 373 | " 'peak@950.28',\n", 374 | " 'peak@951.55',\n", 375 | " 'peak@953.40',\n", 376 | " 'peak@954.49',\n", 377 | " 'peak@963.69',\n", 378 | " 'peak@964.52',\n", 379 | " 'peak@965.19',\n", 380 | " 'peak@982.22',\n", 381 | " 'loss@16.35',\n", 382 | " 'loss@17.02',\n", 383 | " 'loss@17.85',\n", 384 | " 'loss@27.05',\n", 385 | " 'loss@28.14',\n", 386 | " 'loss@29.99',\n", 387 | " 'loss@31.26',\n", 388 | " 'loss@32.17',\n", 389 | " 'loss@35.28',\n", 390 | " 'loss@41.92',\n", 391 | " 'loss@43.07',\n", 392 | " 'loss@43.95',\n", 393 | " 'loss@44.99',\n", 394 | " 'loss@46.05',\n", 395 | " 'loss@48.02',\n", 396 | " 'loss@49.19',\n", 397 | " 'loss@50.41',\n", 398 | " 'loss@56.48',\n", 399 | " 'loss@59.33',\n", 400 | " 'loss@60.42',\n", 401 | " 'loss@62.14',\n", 402 | " 'loss@62.87',\n", 403 | " 'loss@66.32',\n", 404 | " 'loss@67.23',\n", 405 | " 'loss@70.01',\n", 406 | " 'loss@71.02',\n", 407 | " 'loss@72.12',\n", 408 | " 'loss@79.21',\n", 409 | " 'loss@80.19',\n", 410 | " 'loss@82.53',\n", 411 | " 'loss@85.93',\n", 412 | " 'loss@88.07',\n", 413 | " 'loss@89.41',\n", 414 | " 'loss@92.26',\n", 415 | " 'loss@93.37',\n", 416 | " 'loss@98.10',\n", 417 | " 'loss@101.32',\n", 418 | " 'loss@104.40',\n", 419 | " 'loss@109.98',\n", 420 | " 'loss@112.21',\n", 421 | " 'loss@113.17',\n", 422 | " 'loss@114.35',\n", 423 | " 'loss@115.24',\n", 424 | " 'loss@115.94',\n", 425 | " 'loss@128.27',\n", 426 | " 'loss@129.17',\n", 427 | " 'loss@130.16',\n", 428 | " 'loss@133.41',\n", 429 | " 'loss@134.11',\n", 430 | " 'loss@135.93',\n", 431 | " 'loss@142.08',\n", 432 | " 'loss@143.02',\n", 433 | " 'loss@145.46',\n", 434 | " 'loss@146.33',\n", 435 | " 'loss@148.36',\n", 436 | " 'loss@149.43',\n", 437 | " 'loss@150.23',\n", 438 | " 'loss@151.13',\n", 439 | " 'loss@153.02',\n", 440 | " 'loss@156.92',\n", 441 | " 'loss@158.18',\n", 442 | " 'loss@160.25',\n", 443 | " 'loss@161.27',\n", 444 | " 'loss@164.32',\n", 445 | " 'loss@168.39',\n", 446 | " 'loss@169.24',\n", 447 | " 'loss@169.90',\n", 448 | " 'loss@173.10',\n", 449 | " 'loss@174.98',\n", 450 | " 'loss@184.31',\n", 451 | " 'loss@185.40',\n", 452 | " 'loss@194.11']" 453 | ] 454 | }, 455 | "execution_count": 12, 456 | "metadata": {}, 457 | "output_type": "execute_result" 458 | } 459 | ], 460 | "source": [ 461 | "documents[0].words" 462 | ] 463 | }, 464 | { 465 | "cell_type": "markdown", 466 | "metadata": {}, 467 | "source": [ 468 | "## Actual score calculation\n", 469 | "+ Using ``Spec2Vec`` with ``intensity_weighting_power=0.5``.\n", 470 | "+ Calculate matrix of all-vs-all similarity scores." 471 | ] 472 | }, 473 | { 474 | "cell_type": "code", 475 | "execution_count": 13, 476 | "metadata": {}, 477 | "outputs": [], 478 | "source": [ 479 | "spec2vec_similarity = Spec2Vec(model, intensity_weighting_power=0.5)\n", 480 | "similarity_matrix = spec2vec_similarity.matrix(documents, documents, is_symmetric=True)" 481 | ] 482 | }, 483 | { 484 | "cell_type": "markdown", 485 | "metadata": {}, 486 | "source": [ 487 | "## Store similarity matrix" 488 | ] 489 | }, 490 | { 491 | "cell_type": "code", 492 | "execution_count": 14, 493 | "metadata": {}, 494 | "outputs": [], 495 | "source": [ 496 | "filename = os.path.join(path_data,'similarities_spec2vec_2dec_15iter.npy')\n", 497 | "np.save(filename, similarity_matrix)" 498 | ] 499 | }, 500 | { 501 | "cell_type": "markdown", 502 | "metadata": {}, 503 | "source": [ 504 | "---" 505 | ] 506 | }, 507 | { 508 | "cell_type": "markdown", 509 | "metadata": {}, 510 | "source": [ 511 | "# Same but now with model trained on all positive ionmode spectra\n", 512 | "(or more preciselym all that had >= 10 peaks)" 513 | ] 514 | }, 515 | { 516 | "cell_type": "markdown", 517 | "metadata": {}, 518 | "source": [ 519 | "### Load pretrained spec2vec model" 520 | ] 521 | }, 522 | { 523 | "cell_type": "code", 524 | "execution_count": 2, 525 | "metadata": {}, 526 | "outputs": [], 527 | "source": [ 528 | "path_models = os.path.join(path_data, \"trained_models\")\n", 529 | "model_file = os.path.join(path_models, \"spec2vec_AllPositive_ratio05_filtered_201101_iter_15.model\")\n", 530 | "\n", 531 | "# Load pretrained model\n", 532 | "model = gensim.models.Word2Vec.load(model_file)" 533 | ] 534 | }, 535 | { 536 | "cell_type": "code", 537 | "execution_count": 9, 538 | "metadata": {}, 539 | "outputs": [ 540 | { 541 | "data": { 542 | "text/plain": [ 543 | "115910" 544 | ] 545 | }, 546 | "execution_count": 9, 547 | "metadata": {}, 548 | "output_type": "execute_result" 549 | } 550 | ], 551 | "source": [ 552 | "len(model.wv.vocab)" 553 | ] 554 | }, 555 | { 556 | "cell_type": "code", 557 | "execution_count": 7, 558 | "metadata": {}, 559 | "outputs": [], 560 | "source": [ 561 | "from spec2vec import Spec2Vec\n", 562 | "from spec2vec import SpectrumDocument " 563 | ] 564 | }, 565 | { 566 | "cell_type": "code", 567 | "execution_count": 8, 568 | "metadata": {}, 569 | "outputs": [], 570 | "source": [ 571 | "documents = [SpectrumDocument(s, n_decimals=2) for s in spectrums_postprocessed]" 572 | ] 573 | }, 574 | { 575 | "cell_type": "code", 576 | "execution_count": 9, 577 | "metadata": {}, 578 | "outputs": [ 579 | { 580 | "data": { 581 | "text/plain": [ 582 | "['peak@289.29',\n", 583 | " 'peak@295.55',\n", 584 | " 'peak@298.49',\n", 585 | " 'peak@317.32',\n", 586 | " 'peak@319.66',\n", 587 | " 'peak@324.48',\n", 588 | " 'peak@325.32',\n", 589 | " 'peak@339.79',\n", 590 | " 'peak@343.95',\n", 591 | " 'peak@347.02']" 592 | ] 593 | }, 594 | "execution_count": 9, 595 | "metadata": {}, 596 | "output_type": "execute_result" 597 | } 598 | ], 599 | "source": [ 600 | "documents[0].words[:10]" 601 | ] 602 | }, 603 | { 604 | "cell_type": "markdown", 605 | "metadata": {}, 606 | "source": [ 607 | "## Actual score calculation\n", 608 | "+ Using ``Spec2Vec`` with ``intensity_weighting_power=0.5``.\n", 609 | "+ Calculate matrix of all-vs-all similarity scores." 610 | ] 611 | }, 612 | { 613 | "cell_type": "code", 614 | "execution_count": 10, 615 | "metadata": {}, 616 | "outputs": [], 617 | "source": [ 618 | "spec2vec_similarity = Spec2Vec(model, intensity_weighting_power=0.5)\n", 619 | "\n", 620 | "similarity_matrix = spec2vec_similarity.matrix(documents, documents, is_symmetric=True)" 621 | ] 622 | }, 623 | { 624 | "cell_type": "markdown", 625 | "metadata": {}, 626 | "source": [ 627 | "## Store similarity matrix" 628 | ] 629 | }, 630 | { 631 | "cell_type": "code", 632 | "execution_count": 11, 633 | "metadata": {}, 634 | "outputs": [], 635 | "source": [ 636 | "filename = os.path.join(path_data,'similarities_spec2vec_2dec_AllPositiveModel_15iter_201101.npy')\n", 637 | "np.save(filename, similarity_matrix)" 638 | ] 639 | } 640 | ], 641 | "metadata": { 642 | "kernelspec": { 643 | "display_name": "Python 3", 644 | "language": "python", 645 | "name": "python3" 646 | }, 647 | "language_info": { 648 | "codemirror_mode": { 649 | "name": "ipython", 650 | "version": 3 651 | }, 652 | "file_extension": ".py", 653 | "mimetype": "text/x-python", 654 | "name": "python", 655 | "nbconvert_exporter": "python", 656 | "pygments_lexer": "ipython3", 657 | "version": "3.7.6" 658 | } 659 | }, 660 | "nbformat": 4, 661 | "nbformat_minor": 4 662 | } 663 | -------------------------------------------------------------------------------- /notebooks/iomega-extra-get-spec2vec-embeddings.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Derive spec2vec embeddings of MS/MS spectra" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "### Imports" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import os\n", 24 | "import sys\n", 25 | "import gensim\n", 26 | "import numpy as np\n", 27 | "\n", 28 | "ROOT = os.path.dirname(os.getcwd())\n", 29 | "#path_data = os.path.join(ROOT, 'data')\n", 30 | "path_data = 'C:\\\\OneDrive - Netherlands eScience Center\\\\Project_Wageningen_iOMEGA\\\\matchms\\\\data\\\\'\n", 31 | "sys.path.insert(0, ROOT)" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "### Import dataset to create embeddings from, here: pre-processed dataset \"Unique InchiKeys\"" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 2, 44 | "metadata": {}, 45 | "outputs": [ 46 | { 47 | "name": "stdout", 48 | "output_type": "stream", 49 | "text": [ 50 | "number of spectra: 13717\n" 51 | ] 52 | } 53 | ], 54 | "source": [ 55 | "from matchms.importing import load_from_json\n", 56 | "\n", 57 | "filename = os.path.join(path_data,'gnps_positive_ionmode_unique_inchikey_cleaned_by_matchms_and_lookups.json')\n", 58 | "spectrums = load_from_json(filename)\n", 59 | "\n", 60 | "print(\"number of spectra:\", len(spectrums))" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "### Post-processing of data" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 3, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "from matchms.filtering import normalize_intensities\n", 77 | "from matchms.filtering import require_minimum_number_of_peaks\n", 78 | "from matchms.filtering import select_by_mz\n", 79 | "from matchms.filtering import select_by_relative_intensity\n", 80 | "from matchms.filtering import reduce_to_number_of_peaks\n", 81 | "from matchms.filtering import add_losses" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 4, 87 | "metadata": {}, 88 | "outputs": [], 89 | "source": [ 90 | "def post_process(s):\n", 91 | " s = normalize_intensities(s)\n", 92 | " s = select_by_mz(s, mz_from=0, mz_to=1000)\n", 93 | " s = require_minimum_number_of_peaks(s, n_required=10)\n", 94 | " s = reduce_to_number_of_peaks(s, n_required=10, ratio_desired=0.5)\n", 95 | " if s is None:\n", 96 | " return None\n", 97 | " s_remove_low_peaks = select_by_relative_intensity(s, intensity_from=0.001)\n", 98 | " if len(s_remove_low_peaks.peaks) >= 10:\n", 99 | " s = s_remove_low_peaks\n", 100 | " \n", 101 | " s = add_losses(s, loss_mz_from=5.0, loss_mz_to=200.0)\n", 102 | " return s\n", 103 | "\n", 104 | "# apply post processing steps to the data\n", 105 | "spectrums_postprocessed = [post_process(s) for s in spectrums]\n", 106 | "\n", 107 | "# omit spectrums that didn't qualify for analysis\n", 108 | "spectrums_postprocessed = [s for s in spectrums_postprocessed if s is not None]" 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "metadata": {}, 114 | "source": [ 115 | "### Load pretrained spec2vec model\n", 116 | "- See for instance: https://doi.org/10.5281/zenodo.4173596 (model pretrained on AllPositive dataset)" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 5, 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "path_models = os.path.join(path_data, \"trained_models\")\n", 126 | "model_file = os.path.join(path_models, \"spec2vec_AllPositive_ratio05_filtered_201101_iter_15.model\")\n", 127 | "\n", 128 | "# Load pretrained model\n", 129 | "model = gensim.models.Word2Vec.load(model_file)" 130 | ] 131 | }, 132 | { 133 | "cell_type": "markdown", 134 | "metadata": {}, 135 | "source": [ 136 | "### Create spectrum \"documents\"" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": 6, 142 | "metadata": {}, 143 | "outputs": [], 144 | "source": [ 145 | "from spec2vec import Spec2Vec\n", 146 | "from spec2vec import SpectrumDocument " 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": 7, 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "documents = [SpectrumDocument(s, n_decimals=2) for s in spectrums_postprocessed]" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 8, 161 | "metadata": {}, 162 | "outputs": [ 163 | { 164 | "data": { 165 | "text/plain": [ 166 | "['peak@289.29',\n", 167 | " 'peak@295.55',\n", 168 | " 'peak@298.49',\n", 169 | " 'peak@317.32',\n", 170 | " 'peak@319.66',\n", 171 | " 'peak@324.48',\n", 172 | " 'peak@325.32',\n", 173 | " 'peak@339.79',\n", 174 | " 'peak@343.95',\n", 175 | " 'peak@347.02',\n", 176 | " 'peak@347.91',\n", 177 | " 'peak@361.15',\n", 178 | " 'peak@361.84',\n", 179 | " 'peak@364.23',\n", 180 | " 'peak@364.86',\n", 181 | " 'peak@365.85',\n", 182 | " 'peak@368.22',\n", 183 | " 'peak@368.97',\n", 184 | " 'peak@375.07',\n", 185 | " 'peak@375.75']" 186 | ] 187 | }, 188 | "execution_count": 8, 189 | "metadata": {}, 190 | "output_type": "execute_result" 191 | } 192 | ], 193 | "source": [ 194 | "documents[0].words[:20]" 195 | ] 196 | }, 197 | { 198 | "cell_type": "markdown", 199 | "metadata": {}, 200 | "source": [ 201 | "### Derive embeddings" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": 9, 207 | "metadata": {}, 208 | "outputs": [], 209 | "source": [ 210 | "from tqdm.notebook import tqdm # optional, just to get a progress bar\n", 211 | "from spec2vec.vector_operations import calc_vector" 212 | ] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": 10, 217 | "metadata": {}, 218 | "outputs": [ 219 | { 220 | "name": "stdout", 221 | "output_type": "stream", 222 | "text": [ 223 | "Embedding vector size: 300\n" 224 | ] 225 | }, 226 | { 227 | "data": { 228 | "application/vnd.jupyter.widget-view+json": { 229 | "model_id": "4b56a308996b48188fb2fff85e8b787f", 230 | "version_major": 2, 231 | "version_minor": 0 232 | }, 233 | "text/plain": [ 234 | "HBox(children=(HTML(value=''), FloatProgress(value=0.0, max=12797.0), HTML(value='')))" 235 | ] 236 | }, 237 | "metadata": {}, 238 | "output_type": "display_data" 239 | }, 240 | { 241 | "name": "stdout", 242 | "output_type": "stream", 243 | "text": [ 244 | "\n" 245 | ] 246 | } 247 | ], 248 | "source": [ 249 | "intensity_weighting_power = 0.5\n", 250 | "allowed_missing_percentage = 10 # specify the maximum (weighted) fraction of the spectrum that is allowed to be missing\n", 251 | "\n", 252 | "vector_size = model.vector_size\n", 253 | "print(f\"Embedding vector size: {vector_size}\")\n", 254 | "\n", 255 | "embeddings_spec2vec = np.zeros((len(documents), vector_size), dtype=\"float\")\n", 256 | "for i, doc in enumerate(tqdm(documents)):\n", 257 | " embeddings_spec2vec[i, 0:vector_size] = calc_vector(model, doc,\n", 258 | " intensity_weighting_power,\n", 259 | " allowed_missing_percentage)" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": 12, 265 | "metadata": {}, 266 | "outputs": [ 267 | { 268 | "data": { 269 | "text/plain": [ 270 | "(12797, 300)" 271 | ] 272 | }, 273 | "execution_count": 12, 274 | "metadata": {}, 275 | "output_type": "execute_result" 276 | } 277 | ], 278 | "source": [ 279 | "embeddings_spec2vec.shape" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": 20, 285 | "metadata": {}, 286 | "outputs": [ 287 | { 288 | "name": "stdout", 289 | "output_type": "stream", 290 | "text": [ 291 | "[42.0178, -43.2054, 34.7513, -107.5347, -3.2445, -76.6327, -11.3103, -36.3136, 4.8236, 41.0216, 39.7589, -3.3456, 25.8479, -39.3929, -16.6266, -38.0523, 20.2984, 37.2182, 16.1859, 42.593, 33.4526, -22.1193, 72.8967, 13.4784, -9.9983, 28.5276, -21.382, -4.9061, -1.5627, 16.9605, -54.0134, -28.2718, 9.271, 33.7729, 32.5119, 1.4593, 3.954, 33.8745, -0.9841, -10.5822, 31.8189, -17.6984, 44.6887, -39.6979, 4.4911, -27.5185, -15.1705, 36.0776, 17.4914, 47.657, -37.9565, -2.4548, 0.2419, 41.5399, -51.2658, 19.3386, -44.8592, 7.5528, -20.032, -12.4599, -6.3517, -3.3403, -29.8746, 0.0414, -16.2784, 9.1359, 14.9801, -6.6536, 74.3326, -24.2418, 6.308, 26.0182, -27.0743, -6.403, 30.0604, -2.4306, -25.09, 58.094, 11.1743, 18.9769, -45.2443, 49.2554, 8.8223, -8.9952, -30.1558, 10.2108, -43.2419, -24.7698, 6.6931, 48.0061, 16.3499, 64.5272, 35.6992, 61.1264, 16.8335, 11.5313, 76.2697, 10.0867, 39.2198, -19.8674, -9.7124, -8.2465, -15.4243, -5.7536, -18.4063, -26.6288, 8.6747, -15.5598, 31.7884, 11.6019, 34.7642, -17.3149, 19.2221, 1.0544, -9.3589, -21.299, 2.8585, -59.1753, -46.7396, -27.7369, -35.5156, 1.4075, 22.8252, -52.6066, 21.0821, -63.915, 14.5507, 12.4017, 28.144, 91.5688, 8.9935, 56.2665, 12.4462, 3.0205, -15.1616, -47.5922, 35.9081, -15.7173, 49.47, 16.2656, 15.5834, -0.4051, -4.3243, -10.2834, 27.2632, -15.4928, -46.9121, 35.2568, 16.7973, -4.4028, 8.4896, -32.6048, -12.4744, -14.9225, 22.1052, -11.8415, 12.4949, -13.0579, 0.1762, 48.4055, 2.0232, 52.3739, 22.929, -2.531, -11.4015, -49.9718, 36.4911, -36.9411, -4.4625, -26.0895, -19.0099, 12.5625, -25.3182, 28.4458, -1.6888, -38.6568, -41.8601, 11.0498, 2.5899, 13.7508, 13.0312, -12.3197, 19.5758, 41.3145, 3.1327, -39.0884, -31.2428, -26.8675, 7.1927, 0.5592, 63.0551, -14.5367, -24.8314, -18.4052, 17.4689, 9.9714, 2.3212, 39.4402, 33.1608, 5.5943, 6.8414, -15.5299, -17.2614, 49.91, 23.3151, -0.6259, -4.4055, -0.0824, 1.5579, -16.4322, -9.0503, -14.4429, -15.841, 36.2084, -35.6031, 27.5839, 11.126, -38.1742, -36.9712, -0.7543, -13.6165, 6.1344, 10.9474, 27.0495, -7.7824, 20.9525, 22.6501, -20.2698, 13.0354, -1.565, 45.386, 3.791, -58.341, 9.3911, -7.5399, -10.1706, 4.0885, 12.5856, -19.9361, -27.1453, 17.7503, -31.0948, 16.2705, -7.2276, 0.2205, -2.5979, -39.0949, -30.1345, -9.0955, -8.0222, 1.8287, -16.2799, -17.6804, -2.9488, 30.8147, -2.1604, 44.6537, 26.612, 22.5985, 23.6299, 39.4961, 36.3558, 26.1925, -12.3952, -8.4688, 25.1523, 37.8104, 0.8851, 11.3241, -24.0909, -14.2435, 2.416, 4.2172, -22.5194, -12.3099, 36.8767, 12.3774, -9.5058, -7.7588, 24.5484, 2.2826, 1.1877, 7.5718, -15.6152, 77.5607, -12.2692, -10.7108, -1.7481, 21.2851, 14.5501, -8.5288, -3.2972, -29.6102, -59.1496, 81.3534, -27.9671, 12.1701, 16.4148, -70.3633, -35.641]\n" 292 | ] 293 | } 294 | ], 295 | "source": [ 296 | "print([np.round(x, 4) for x in embeddings_spec2vec[0,:]])" 297 | ] 298 | } 299 | ], 300 | "metadata": { 301 | "kernelspec": { 302 | "display_name": "Python 3", 303 | "language": "python", 304 | "name": "python3" 305 | }, 306 | "language_info": { 307 | "codemirror_mode": { 308 | "name": "ipython", 309 | "version": 3 310 | }, 311 | "file_extension": ".py", 312 | "mimetype": "text/x-python", 313 | "name": "python", 314 | "nbconvert_exporter": "python", 315 | "pygments_lexer": "ipython3", 316 | "version": "3.7.7" 317 | } 318 | }, 319 | "nbformat": 4, 320 | "nbformat_minor": 4 321 | } 322 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | 4 | from setuptools import setup 5 | from setuptools import find_packages 6 | 7 | here = os.path.abspath(os.path.dirname(__file__)) 8 | 9 | version = {} 10 | with open(os.path.join(here, "__version__.py")) as f: 11 | exec(f.read(), version) 12 | 13 | with open("README.md") as readme_file: 14 | readme = readme_file.read() 15 | 16 | setup( 17 | name="spec2vec_gnps_analysis", 18 | version=version["__version__"], 19 | description="Additional functions and for matchms and Spec2Vec.", 20 | long_description=readme, 21 | author="Netherlands eScience Center", 22 | author_email="", 23 | url="https://github.com/iomega/ms2query", 24 | packages=find_packages(), 25 | include_package_data=True, 26 | license="Apache Software License 2.0", 27 | zip_safe=False, 28 | test_suite="tests", 29 | python_requires='>=3.7', 30 | install_requires=[ 31 | "matchms>=0.6.2", 32 | "numpy", 33 | "pandas", 34 | "spec2vec", 35 | "networkx", 36 | "gensim", 37 | ], 38 | extras_require={"dev": ["bump2version", 39 | "isort>=4.2.5,<5", 40 | "prospector[with_pyroma]", 41 | "pytest", 42 | "pytest-cov", 43 | "sphinx>=3.0.0,!=3.2.0,<4.0.0", 44 | "sphinx_rtd_theme", 45 | "sphinxcontrib-apidoc", 46 | "yapf",], 47 | } 48 | ) 49 | -------------------------------------------------------------------------------- /tests/test_library_matching.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import numpy as np 4 | import pytest 5 | from matchms import Spectrum 6 | from spec2vec import Spec2Vec 7 | from spec2vec import SpectrumDocument 8 | #path_root = os.path.dirname(os.path.__file__) 9 | path_root = os.path.dirname(os.getcwd()) 10 | sys.path.insert(0, os.path.join(path_root, "custom_functions")) 11 | from custom_functions.library_search import library_matching 12 | 13 | 14 | def test_library_matching(): 15 | spectrum_1 = Spectrum(mz=np.array([100, 150, 200.]), 16 | intensities=np.array([0.7, 0.2, 0.1]), 17 | metadata={'precursor_mz': 500.5}) 18 | spectrum_2 = Spectrum(mz=np.array([100, 140, 190.]), 19 | intensities=np.array([0.4, 0.2, 0.1]), 20 | metadata={'precursor_mz': 500.11}) 21 | spectrum_3 = Spectrum(mz=np.array([100, 140, 190.]), 22 | intensities=np.array([0.3, 0.5, 0.2]), 23 | metadata={'precursor_mz': 501.1}) 24 | spectrum_4 = Spectrum(mz=np.array([97.5, 137.5, 200.]), 25 | intensities=np.array([0.8, 0.5, 0.4]), 26 | metadata={'precursor_mz': 500.1}) 27 | documents_library = [SpectrumDocument(s) for s in [spectrum_1, spectrum_2, spectrum_3]] 28 | documents_query = [SpectrumDocument(spectrum_4)] 29 | found_matches = library_matching(documents_query, documents_library, 30 | model=None, 31 | presearch_based_on=["precursor_mz"], 32 | include_scores=["cosine", "modcosine"], 33 | ignore_non_annotated=False, 34 | intensity_weighting_power=0.5, 35 | allowed_missing_percentage=5.0, 36 | cosine_tol=2.0, 37 | mass_tolerance=2.0, 38 | mass_tolerance_type="Dalton") 39 | 40 | scores_cosine = found_matches[0].values[:,0] 41 | expected_scores_cosine = np.array([0.05312127152597306, 0.0, 0.0]) 42 | scores_modcos = found_matches[0].values[:,2] 43 | expected_scores_modcos = np.array([0.05312127152597306, 0.0, 0.7757282939050968]) 44 | assert list(scores_cosine) == [pytest.approx(x, 1e-6) for x in expected_scores_cosine], \ 45 | "Expected different scores." 46 | assert list(scores_modcos) == [pytest.approx(x, 1e-6) for x in expected_scores_modcos], \ 47 | "Expected different mod. cosine scores." 48 | assert np.all(found_matches[0].values[:,3] == np.array([1, 0, 2])), \ 49 | "Expected different number of matches" 50 | assert np.all(found_matches[0].values[:,4]), "Expected all mass matches to be True" 51 | --------------------------------------------------------------------------------