├── .gitattributes ├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── deploy.sh ├── docs └── pictures │ └── Meth_comp.png ├── meta.yaml ├── mkdocs.yml ├── pycoMeth ├── CGI_Finder.py ├── Comp_Report.py ├── CoordGen.py ├── FileParser.py ├── Meth_Comp.py ├── Meth_Seg.py ├── __init__.py ├── __main__.py ├── common.py ├── loader.py ├── meth_seg │ ├── __init__.py │ ├── emissions.py │ ├── hmm.py │ ├── math.py │ ├── postprocessing.py │ ├── segment.py │ └── segments_csv_io.py └── templates │ ├── CpG_Interval.html.j2 │ └── CpG_summary.html.j2 ├── setup.py ├── versipy.yaml ├── versipy_history.txt └── versipy_templates ├── README.md ├── meta.yaml ├── pycoMeth └── __init__.py └── setup.py /.gitattributes: -------------------------------------------------------------------------------- 1 | tests/* linguist-documentation 2 | docs/formatter.rb linguist-documentation=false 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | tests/ 106 | 107 | # demo_results 108 | docs/*/results/ 109 | docs/*/data/ 110 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | language: python 3 | python: 3.7 4 | branches: 5 | only: 6 | - main 7 | - dev 8 | before_install: 9 | - pip install -U importlib_metadata 10 | install: 11 | - pip install pycometh 12 | script: true 13 | deploy: 14 | - provider: pypi 15 | username: __token__ 16 | server: https://test.pypi.org/legacy/ 17 | on: 18 | branch: dev 19 | password: 20 | secure: lfdlhjA11y6VEvlSx2NnxXV2JQjKCkHcM51VBARgrGBkMouY2arXTEyWZwVXNYa7caC6ekk/BdcIooINtcLzQpHlfPq2tI5cwzVspGlRAInhL+4AdNl20YBoOS3BhLQPWCd+jcqNkCrvErI1FNDQI4DB18Na+21CJpNXuj22RH1QaABDiZuYC4mKU4oOg1VuT+Nse5m0qSQ7WJiWskVA5qumb9BL76DgFgW37n9SuOctdevHYx4ebgSP7Tvz81XUZx89B6qCwFJu6Bzi+tMOpKjx1jAaRvjW0ttMX5/hSXgFKEUz6fzPuxkAZgj6mfV3SCQM1q2zImT9geC8eap/SEUkG8q4jYRqjBlspIr7eIQWpNMlQDZchQSBYs0bRUODiF31HrCwXVFWK1xHwbzB5rXt4R9bv4uvhkWIvVH+ACcC5GYAxJslInROY6M99Tt8HpMGsrBGmU1bT+RRYWadsyA4pmg/BgCaK3Fw2Y8mIEhBxsohsgTwdZfDn4RGOp+8FzshQe/j5r1LNFJUD5G+YWvbplV5ySPE3if+/4nLA3LDrDB931Ys0duvkUqe3IeioneiB+2DOUHhjmEDY9dOd+dju0dDeF0PL4TcJrlXxOo8oVRq3CZz/Q08ubHj3JaqDatP6UmTm04kEes8uenl8vwggGjUb3gZEgZWSqVS/Jk= 21 | - provider: pypi 22 | username: __token__ 23 | on: 24 | branch: main 25 | password: 26 | secure: BQq5jbwy70rRR9jngGFSvPhBH2sVB/ntUxQ2TYD0Khcl0C+UOU/NCDYpxW9cB/ZoMnVWGeiKgDVOQrRFmcQk8JT49xctk9awaCxczKgJ5SfOv9lwIIpdZQ+d7QPDgI9P0lhoTfaIFZx00rRKtYr07g0mzGOBKDOdMn/22dOjp7CRfIPVfDOPQc5mySI6R9hHqYGaCIMmpzFhV5Q2+lxpYqK8f9usO5jkySXdP5boRrd+RYCEaP47IuIsr7zNXEkhWQSbDx35gV7JKe5g2HfZUBu9SI8GFbD02ncnpLRUQFLd7aUIx7oxnIhVUd/S6tlYKNfQyWuzAcEcRqxTMZMUj8AGlA7P1BpYJvU2FsLtX5Pf1jJwWTht07I9UpOMEhccJHvB1oDcmICKUB8wQFEKBYAWd3CbBGIXv7L5e/iTGG3J9cpmoN2XqzhUd3JBGBnKzy92cdSX/2vdw6XjL8NB1T5EVCsc/1vRrjIfALwEDxzX78Jo0oz9sq8AyHeTZWCTFI/q186FfSO8BXEuGY5AdhgWYbwB1mWW/QICgWgv+2S1swBorgVJ790eBleLIQk93PtwITom3Fv4x7tHWZwF1mfuwawEGdWEuo/vdM4T24wvU69DeT9rUs7sNbirWeOCaHVyakb3GEIqwhZEU+PxkIUMC0rt14tEynff5hiXsdU= -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # pycoMeth Changelog 2 | 3 | ### 2022/06/09 v2.2.1 4 | * Meth_Seg now allows for filtering which read-groups to be used in the segmentation (parameter `--read_groups_to_include`) 5 | * Multiple bugfixes 6 | 7 | ### 2022/05/16 v2.2.0 8 | * Implemented Fisher-Exact test for 2-sample testing and Chi-squared test for multi-sample testing (with Fisher exact as a post-hoc test) 9 | 10 | ### 2022/03/07 v2.1.1 11 | * Implemented post-hoc testing and reporting for multi-sample tests 12 | * Multiple bugfixes and performance improvements 13 | 14 | ### 2022/02/16 v2.0.0 15 | * First release of PycoMeth 2.0 using the [MetH5](http://github.com/snajder-r/MetH5Format) as an input. 16 | * Meth_Seg is now a firm component of PycoMeth, allowing for de novo methylaiton segmentation 17 | * Additional testing modes: paired-CpG methylation rate testing as well as unpaired read-methylation rate testing 18 | * CpG_Aggregate and Interval_Aggregate removed as these are no longer necessary since random access to methylation calls is possible in the MetH5 Format 19 | * Meth_Comp can now be computed on multiple CPUs. 20 | 21 | ### 2020/07/16 v-0.4.14 22 | 23 | * Switch from orca to kaleido for static export 24 | * Export summary of intervals in table file for all intervals and not just for top hits 25 | * Add new plot for distance between CpG islands and closest tss 26 | 27 | ### 2020/07/09 v-0.4.8 28 | 29 | * Speed improvement to Comp_report 30 | * Add interactive API to Comp_report 31 | * Add ability to export static plots using orca 32 | 33 | ### 2020/01/15 v-0.4.5 34 | 35 | * Add tabular text reports to Comp_report 36 | * Add (default) option to write out all the intervals in Meth_Comp with reasons why excluded or included in DM analysis 37 | * Improve Comp_report summary table and include new fields from Meth_Comp 38 | * Tidy output folder structure for reports generated by meth_report 39 | * Add Chromosome ideogram plot to summary report 40 | * A fasta reference is now required to run Comp_Report 41 | 42 | ### 2020/04/01 v-0.4.0 43 | 44 | * General improvement of logging message output 45 | * Implement fancy color logger 46 | * Add position tracking within intervals 47 | * Add Comp_Report module to generate HTML reports of significant candidates 48 | 49 | ### 09/10/2019 v-0.1.0 50 | 51 | * Deep refactoring of Freq_meth_calculate to Aggregate 52 | * Major doc update including doc folders reorganisation 53 | * Implement autodoc from docstring 54 | * Fix and test CLI 55 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at aleg@ebi.ac.uk. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to pycoMeth 2 | 3 | First of all, thanks for considering contributing to `pycoMeth`! 👍 It's people like you that make it rewarding for us to work on `pycoMeth`. 4 | 5 | `pycoMeth` is an open source project, maintained by publicly funded academic researchers. 6 | 7 | [repo]: https://github.com/a-slide/pycoMeth 8 | [issues]: https://github.com/a-slide/pycoMeth/issues 9 | [new_issue]: https://github.com/a-slide/pycoMeth/issues/new 10 | [code_of_conduct]: https://github.com/a-slide/pycoMeth/blob/master/docs/code_of_conduct.md 11 | 12 | ## Code of conduct 13 | 14 | Please note that this project is released with a [Contributor Code of Conduct][code_of_conduct]. By participating in this project you agree to abide by its terms. 15 | 16 | ## How you can contribute 17 | 18 | There are several ways you can contribute to this project. If you want to know more about why and how to contribute to open source projects like this one, see this [Open Source Guide](https://opensource.guide/how-to-contribute/). 19 | 20 | ### Share the love ❤️ 21 | 22 | Think `pycoMeth` is useful? Let others discover it, by telling them in person, via Twitter or a blog post. 23 | 24 | Using `pycoMeth` for a paper you are writing? Please cite it. 25 | 26 | ### Ask a question ⁉️ 27 | 28 | Using `pycoMeth` and got stuck? Browse the documentation to see if you can find a solution. 29 | 30 | Still stuck? Post your question as an [issue on GitHub][new_issue]. 31 | 32 | While we cannot offer user support, we'll try to do our best to address it, as questions often lead to better documentation or the discovery of bugs. 33 | 34 | 35 | ### Propose an idea 💡 36 | 37 | Have an idea for a new `pycoMeth` feature? Take a look at the [issue list][issues] to see if it isn't included or suggested yet. If not, suggest your idea as an [issue on GitHub][new_issue]. While we can't promise to implement your idea, it helps to: 38 | 39 | * Explain in detail how it would work. 40 | * Keep the scope as narrow as possible. 41 | 42 | See below if you want to contribute code for your idea as well. 43 | 44 | ### Report a bug 🐛 45 | 46 | Using `pycoMeth` and discovered a bug? That's annoying! Don't let others have the same experience and report it as an [issue on GitHub][new_issue] so we can fix it. A good bug report makes it easier for us to do so, so please include: 47 | 48 | * Your operating system name and version (e.g. Mac OS 10.13.6). 49 | * Any details about your local setup that might be helpful in troubleshooting. 50 | * Detailed steps to reproduce the bug. 51 | 52 | ### Improve the documentation 📖 53 | 54 | Noticed a typo on the website? Think a function could use a better example? Good documentation makes all the difference, so your help to improve it is very welcome! 55 | 56 | 1. Fork [pycoMeth][repo] and clone it to your computer. To learn more about this process, see [this guide](https://guides.github.com/activities/forking/). 57 | 2. Edit the README.md file and submit a pull request. We will review your changes and include the fix in the next release. 58 | 59 | ### Contribute code 📝 60 | 61 | Care to fix bugs or implement new functionality for `pycoMeth`? Awesome! 👏 Have a look at the [issue list][issues] and leave a comment on the things you want to work on. See also the development guidelines below. 62 | 63 | ## Development guidelines 64 | 65 | We try to follow the [GitHub flow](https://guides.github.com/introduction/flow/) for development and the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style Guide for Python Code. 66 | 67 | 1. Fork [pycoMeth][repo] and clone it to your computer. To learn more about this process, see [this guide](https://guides.github.com/activities/forking/). 68 | 69 | 2. If you have forked and cloned the project before and it has been a while since you worked on it, [pull changes from the original repo](https://help.github.com/articles/merging-an-upstream-repository-into-your-fork/) to your clone by using `git pull upstream master`. 70 | 71 | 3. Make your changes and test the modified code. 72 | 73 | 4. Commit and push your changes. 74 | 75 | 5. Submit a [pull request](https://guides.github.com/activities/forking/#making-a-pull-request). 76 | 77 | 78 | --- 79 | 80 | This file was adapted from a template created by [peterdesmet](https://gist.github.com/peterdesmet/e90a1b0dc17af6c12daf6e8b2f044e7c). 81 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | # GNU GENERAL PUBLIC LICENSE 2 | **Version 3, 29 June 2007** 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | ## Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works.By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users.We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors.You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price.Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights.Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received.You must make sure that they, too, receive 37 | or can get the source code.And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software.For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so.This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software.The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable.Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products.If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary.To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | ## TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License.Each licensee is addressed as "you"."Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy.The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy.Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies.Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License.If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it."Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form.A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities.However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work.For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met.This License explicitly affirms your unlimited 159 | permission to run the unmodified Program.The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work.This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force.You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright.Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below.Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 7. 219 | This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy.This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged.This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit.Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source.This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge.You need not require recipients to copy the 279 | Corresponding Source along with the object code.If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source.Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling.In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage.For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product.A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source.The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information.But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed.Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law.If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it.(Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.)You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10.If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term.If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License.Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License.If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program.Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance.However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work.These actions infringe copyright if you do 443 | not accept this License.Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License.You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations.If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License.For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based.The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version.For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement).To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients."Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License.You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License.If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all.For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work.The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time.Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number.If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation.If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions.However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW.EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE.THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU.SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program.It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | {one line to give the program's name and a brief idea of what it does.} 635 | Copyright (C) {year}{name of author} 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program.If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | {project}Copyright (C) {year}{fullname} 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type show w. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type show c' for details. 659 | 660 | The hypothetical commands show w' and show c' should show the appropriate 661 | parts of the General Public License.Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs.If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library.If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License.But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![pycoMeth](https://pmbio.github.io/pycoMeth/images/pycoMeth_long.png) 2 | 3 | [![GitHub license](https://img.shields.io/github/license/a-slide/pycoMeth.svg)](https://github.com/a-slide/pycoMeth/blob/master/LICENSE) 4 | [![Language](https://img.shields.io/badge/Language-Python3.7+-yellow.svg)](https://www.python.org/) 5 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7373424.svg)](https://doi.org/10.5281/zenodo.7373424) 6 | 7 | [![PyPI version](https://badge.fury.io/py/pycoMeth.svg)](https://badge.fury.io/py/pycoMeth) 8 | [![PyPI downloads](https://pepy.tech/badge/pycoMeth)](https://pepy.tech/project/pycoMeth) 9 | [![Anaconda Version](https://anaconda.org/snajder-r/pycometh/badges/version.svg)](https://anaconda.org/snajder-r/pycometh) 10 | [![Anaconda Downloads](https://anaconda.org/snajder-r/pycometh/badges/downloads.svg)](https://anaconda.org/snajder-r/pycometh) 11 | 12 | --- 13 | Version in this branch: 2.2.2 14 | 15 | --- 16 | 17 | **DNA methylation analysis downstream to Nanopolish for Oxford Nanopore DNA sequencing datasets** 18 | 19 | `pycoMeth` can be used for further analyses starting from the output files generated by [`Nanopolish call-methylation`](https://github.com/jts/nanopolish). The package contains a suite of tools to **find CpG islands**, **segment methylome**, and to perform a **differential methylation analysis** across multiple samples. 20 | 21 | `pycoMeth` generates extensive tabulated reports and BED files which can be loaded in a genome browser. In addition, an interactive HTML report of differentially 22 | methylated intervals/islands can also generated at the end of the analysis. 23 | 24 | [`Methplotlib`](https://github.com/wdecoster/methplotlib) developed by [Wouter de coster](https://twitter.com/wouter_decoster) is an excellent complementary tool to visualise and explore methylation status for specific loci. 25 | 26 | Please be aware that `pycoMeth` is a research package that is still under development. The API, command line interface, and implementation might change without retro-compatibility. 27 | 28 | --- 29 | ### Installation 30 | 31 | Install either using conda: 32 | 33 | conda install -c snajder-r -c bioconda -c conda-forge pycometh 34 | 35 | Or using pip: 36 | 37 | pip install pycometh 38 | 39 | ### Documentation 40 | 41 | A more detailed usage documentation can be found at https://pmbio.github.io/pycoMeth/ 42 | 43 | ### pycoMeth workflow 44 | 45 | ![Workflow](https://pmbio.github.io/pycoMeth/images/overview.svg) 46 | 47 | 48 | ### pycoMeth example HTML report 49 | 50 | [Example HTML report 1](https://pmbio.github.io/pycoMeth/examples/pycoMeth_summary_report.html) 51 | 52 | [Example HTML interval report](https://pmbio.github.io/pycoMeth/examples/interval_reports/interval_0020_chr19-44756986-44758607.html) 53 | 54 | --- 55 | 56 | ### Citing 57 | 58 | The repository is archived at Zenodo. https://doi.org/10.5281/zenodo.6637645 59 | 60 | If you find pycoMeth useful, please cite our preprint: 61 | 62 | Snajder, Rene H., Oliver Stegle, and Marc Jan Bonder. 2022. "PycoMeth: A Toolbox for Differential Methylation Testing from Nanopore Methylation Calls." bioRxiv. https://doi.org/10.1101/2022.02.16.480699. 63 | 64 | @ARTICLE{Snajder2023-wd, 65 | title = "pycoMeth: a toolbox for differential methylation testing from Nanopore methylation calls", 66 | author = "Snajder, Rene and Leger, Adrien and Stegle, Oliver and Bonder, Marc Jan", 67 | journal = "Genome Biol.", volume = 24, number = 1, pages = "83", month = apr, year = 2023, 68 | } 69 | 70 | 71 | 72 | ### Authors 73 | 74 | * Rene Snajder (rene.snajder@gmail.com) 75 | * Adrien Leger 76 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # -*- coding: utf-8 -*- 3 | 4 | repostring="" 5 | if [[ $# -eq 1 ]]; then 6 | if [ "$1" = "test" ]; then 7 | repostring="--repository testpypi" 8 | fi 9 | fi 10 | 11 | echo "compile package from setup.py" 12 | python setup.py sdist 13 | 14 | echo "Uploading to pypi..." 15 | twine upload $repostring dist/* 16 | 17 | if [[ ! -z $repostring ]]; then 18 | echo "Testing complete - not attempting to upload to conda" 19 | exit 20 | fi 21 | 22 | echo "Build noarch package for conda..." 23 | conda-build meta.yaml --python 3.7 --output-folder conda_build -c bioconda -c conda-forge -c plotly -c snajder-r --no-include-recipe --no-test 24 | 25 | echo "Logging in to conda..." 26 | anaconda login 27 | 28 | echo "Deploying to conda..." 29 | anaconda upload conda_build/**/*.tar.bz2 30 | 31 | echo "Cleaning up" 32 | 33 | rm -Rf dist 34 | rm -Rf conda_build 35 | rm -Rf build 36 | rm -Rf *.egg-info 37 | rm -Rf .eggs 38 | 39 | exit 0 40 | -------------------------------------------------------------------------------- /docs/pictures/Meth_comp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PMBio/pycoMeth/8e81e0799a365ae65f42bafa28121218b1f8f1c3/docs/pictures/Meth_comp.png -------------------------------------------------------------------------------- /meta.yaml: -------------------------------------------------------------------------------- 1 | {% set version = "2.2.2" %} 2 | {% set name = "pycoMeth" %} 3 | 4 | package: 5 | name: {{ name|lower }} 6 | version: {{ version }} 7 | 8 | source: 9 | path: dist/{{ name }}-{{ version }}.tar.gz 10 | 11 | build: 12 | number: 0 13 | script: "pip install {{ name }}-{{ version }}.tar.gz --no-deps --ignore-installed -vv " 14 | entry_points: 15 | - pycoMeth=pycoMeth.__main__:main 16 | - pycometh=pycoMeth.__main__:main 17 | noarch: "python" 18 | 19 | requirements: 20 | build: 21 | - python>=3.7 22 | - pip>=19.2.1 23 | - ripgrep>=11.0.1 24 | - cython 25 | run: 26 | - numpy==1.22.2 27 | - scipy==1.4.1 28 | - statsmodels==0.13.2 29 | - pandas==1.4.1 30 | - Jinja2==3.0.3 31 | - plotly==5.6.0 32 | - pyfaidx==0.6.4 33 | - tqdm==4.62.3 34 | - colorlog==6.6.0 35 | - nbformat==5.1.3 36 | - meth5>=1.1.1 37 | 38 | test: 39 | imports: 40 | - pycoMeth.FileParser 41 | - pycoMeth.CoordGen 42 | - pycoMeth.Meth_Comp 43 | - pycoMeth.Meth_Seg 44 | - pycoMeth.Comp_Report 45 | commands: 46 | - pycoMeth Meth_Comp --help 47 | - pycoMeth Meth_Seg --help 48 | - pycoMeth Comp_Report --help 49 | - pycoMeth CGI_Finder --help 50 | 51 | about: 52 | home: "https://github.com/snajder-r/pycoMeth," 53 | license: "MIT" 54 | summary: "DNA methylation analysis for Oxford Nanopore DNA sequencing datasets downstream to Nanopolish" 55 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | # Required python packages to generate the documentation 2 | # * mkdocs 3 | # * mkdocs-material 4 | # * pygments 5 | # * pymdown-extensions 6 | # * mknotebooks 7 | 8 | # Project information 9 | site_name: 'pycoMeth' 10 | site_description: 'Python package for nanopore DNA methylation analysis downstream to Nanopolish' 11 | site_author: 'Adrien Leger' 12 | site_url: 'https://a-slide.github.io/pycoMeth/' 13 | 14 | # Repository 15 | repo_name: 'a-slide/pycoMeth' 16 | repo_url: 'https://github.com/a-slide/pycoMeth' 17 | 18 | # Copyright 19 | copyright: 'Copyright © 2019 Adrien Leger' 20 | 21 | # Navigation menu 22 | nav: 23 | - Home: index.md 24 | - Installation instructions: installation.md 25 | - CpG_Aggregate Usage: 26 | - Using CpG_Aggregate: CpG_Aggregate/usage.md 27 | - jupyter API usage: CpG_Aggregate/API_usage.ipynb 28 | - Command line usage: CpG_Aggregate/CLI_usage.ipynb 29 | - Interval_Aggregate Usage: 30 | - Using Interval_Aggregate: Interval_Aggregate/usage.md 31 | - jupyter API usage: Interval_Aggregate/API_usage.ipynb 32 | - Command line usage: Interval_Aggregate/CLI_usage.ipynb 33 | - Meth_Comp Usage: 34 | - Using Meth_Comp: Meth_Comp/usage.md 35 | - jupyter API usage: Meth_Comp/API_usage.ipynb 36 | - Command line usage: Meth_Comp/CLI_usage.ipynb 37 | - Comp_Report Usage: 38 | - Using Comp_Report: Comp_Report/usage.md 39 | - jupyter API usage: Comp_Report/API_usage.ipynb 40 | - Command line usage: Comp_Report/CLI_usage.ipynb 41 | - CGI_Finder Usage: 42 | - Using CGI_Finder: CGI_Finder/usage.md 43 | - jupyter API usage: CGI_Finder/API_usage.ipynb 44 | - Command line usage: CGI_Finder/CLI_usage.ipynb 45 | - Contributing: contributing.md 46 | - Code of conduct: code_of_conduct.md 47 | - Licence: licence.md 48 | - Changelog: changelog.md 49 | 50 | # Theme customization 51 | theme: 52 | name: 'material' 53 | highlightjs: true 54 | font: false 55 | 56 | markdown_extensions: 57 | - admonition 58 | - codehilite 59 | - pymdownx.betterem: 60 | smart_enable: all 61 | - pymdownx.caret 62 | - pymdownx.critic 63 | - pymdownx.details 64 | - pymdownx.emoji: 65 | emoji_generator: !!python/name:pymdownx.emoji.to_svg 66 | - pymdownx.inlinehilite 67 | - pymdownx.magiclink: 68 | repo_url_shorthand: true 69 | user: a-slide 70 | repo: pycoMeth 71 | - pymdownx.mark 72 | - pymdownx.smartsymbols 73 | - pymdownx.superfences 74 | - pymdownx.tasklist: 75 | custom_checkbox: true 76 | - pymdownx.tilde 77 | 78 | plugins: 79 | - mknotebooks 80 | -------------------------------------------------------------------------------- /pycoMeth/CGI_Finder.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# 4 | # Standard library imports 5 | from collections import OrderedDict, namedtuple, Counter 6 | 7 | # Third party imports 8 | from tqdm import trange 9 | 10 | # Third party imports 11 | from pyfaidx import Fasta 12 | 13 | # Local imports 14 | from pycoMeth.common import * 15 | 16 | # ~~~~~~~~~~~~~~~~~~~~~~~~CpG_Comp MAIN CLASS~~~~~~~~~~~~~~~~~~~~~~~~# 17 | 18 | 19 | def CGI_Finder( 20 | ref_fasta_fn: str, 21 | output_tsv_fn: str = None, 22 | output_bed_fn: str = None, 23 | merge_gap: int = 0, 24 | min_win_len: int = 200, 25 | min_CG_freq: float = 0.5, 26 | min_obs_CG_ratio: float = 0.6, 27 | verbose: bool = False, 28 | quiet: bool = False, 29 | progress: bool = False, 30 | **kwargs 31 | ): 32 | """ 33 | Simple method to find putative CpG islands in DNA sequences by using a sliding window and merging 34 | overlapping windows satisfying the CpG island definition. 35 | Results can be saved in bed and tsv format 36 | * ref_fasta_fn 37 | Reference file used for alignment in Fasta format (ideally already indexed with samtools faidx) 38 | * output_bed_fn 39 | Path to write a summary result file in BED format (At least 1 output file is required) 40 | * output_tsv_fn 41 | Path to write an more extensive result report in TSV format (At least 1 output file is required) 42 | * merge_gap 43 | Merge close CpG island within a given distance in bases 44 | * min_win_len 45 | Length of the minimal window containing CpG. Used as the sliding window length 46 | * min_CG_freq 47 | Minimal C+G frequency in a window to be counted as a valid CpG island 48 | * min_obs_CG_ratio 49 | Minimal Observed CG dinucleotidefrequency over expected distribution in a window to be counted as a valid CpG island 50 | """ 51 | 52 | # Init method 53 | opt_summary_dict = opt_summary(local_opt=locals()) 54 | log = get_logger(name="pycoMeth_CGI_Finder", verbose=verbose, quiet=quiet) 55 | 56 | log.warning("Checking options and input files") 57 | log_dict(opt_summary_dict, log.debug, "Options summary") 58 | 59 | # Init collections 60 | counter = Counter() 61 | 62 | # At least one output file is required, otherwise it doesn't make any sense 63 | log.debug("Checking required output") 64 | if not output_bed_fn and not output_tsv_fn: 65 | raise pycoMethError("At least 1 output file is requires (-t or -b)") 66 | 67 | log.warning("Parsing reference fasta file") 68 | try: 69 | with Fasta(ref_fasta_fn) as fasta_fp: 70 | with CGI_Writer(bed_fn=output_bed_fn, tsv_fn=output_tsv_fn, verbose=verbose) as writer: 71 | 72 | # Iterate over reference sequences in fasta file 73 | for seq in fasta_fp: 74 | 75 | # Hard copy of seq and cast to lower case 76 | seq_name = seq.name 77 | seq = str(seq).lower() 78 | log.info("Parsing Reference sequence: {}".format(seq_name)) 79 | counter["Number of reference sequences"] += 1 80 | 81 | # Loop control counters 82 | valid_win_start = valid_win_end = 0 83 | previous_valid = False 84 | 85 | # Compute and evaluate first window 86 | c_count = g_count = cg_count = 0 87 | for i in range(min_win_len): 88 | if seq[i] == "c": 89 | c_count += 1 90 | elif seq[i] == "g": 91 | g_count += 1 92 | if seq[i : i + 2] == "cg": 93 | cg_count += 1 94 | 95 | if valid_window(c_count, g_count, cg_count, min_win_len, min_CG_freq, min_obs_CG_ratio): 96 | counter["Valid minimal size windows"] += 1 97 | valid_win_start = i 98 | valid_win_end = i + min_win_len 99 | previous_valid = True 100 | 101 | for i in trange( 102 | 1, 103 | len(seq) - min_win_len, 104 | unit=" bases", 105 | unit_scale=True, 106 | desc="\tProgress", 107 | disable=not progress, 108 | ): 109 | 110 | # Decrement counters based on changes at previous start 111 | prev_start = i - 1 112 | if seq[prev_start] == "c": 113 | c_count -= 1 114 | elif seq[prev_start] == "g": 115 | g_count -= 1 116 | if seq[prev_start : prev_start + 2] == "cg": 117 | cg_count -= 1 118 | 119 | # Increment counters based on changes at new end 120 | end = i + min_win_len 121 | if seq[end] == "c": 122 | c_count += 1 123 | elif seq[end] == "g": 124 | g_count += 1 125 | if seq[end - 1 : end + 1] == "cg": 126 | cg_count += 1 127 | 128 | # Evaluate windows 129 | if valid_window(c_count, g_count, cg_count, min_win_len, min_CG_freq, min_obs_CG_ratio): 130 | counter["Valid minimal size windows"] += 1 131 | 132 | # Previous valid overlapping windows was valid => extend end 133 | if previous_valid: 134 | valid_win_end = i + min_win_len 135 | 136 | # Special case where reaching end with valid window => save window 137 | if i == len(seq) - min_win_len - 1: 138 | counter["Valid merged windows"] += 1 139 | win_len, win_cg_count, win_cg_freq, win_obs_exp = compute_win( 140 | seq[valid_win_start:valid_win_end] 141 | ) # Sometimes valid merged windows are not excatly matching the definition of a proper CpG_island 142 | writer.write( 143 | seq_name, 144 | valid_win_start, 145 | valid_win_end, 146 | win_len, 147 | win_cg_count, 148 | win_cg_freq, 149 | win_obs_exp, 150 | ) 151 | 152 | # Not overlapping a previous valid window => Start new one 153 | else: 154 | valid_win_start = i 155 | valid_win_end = i + min_win_len 156 | 157 | previous_valid = True 158 | 159 | # A previous overlapping windows was valid and getting out of the valid window or reaching end of sequence => save window 160 | elif i > (valid_win_end + merge_gap) or i == len(seq) - min_win_len - 1: 161 | if previous_valid: 162 | counter["Valid merged windows"] += 1 163 | win_len, win_cg_count, win_cg_freq, win_obs_exp = compute_win( 164 | seq[valid_win_start:valid_win_end] 165 | ) 166 | writer.write( 167 | seq_name, 168 | valid_win_start, 169 | valid_win_end, 170 | win_len, 171 | win_cg_count, 172 | win_cg_freq, 173 | win_obs_exp, 174 | ) 175 | 176 | previous_valid = False 177 | 178 | finally: 179 | # Print counters 180 | log_dict(counter, log.info, "Results summary") 181 | 182 | 183 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~Comp_Writer HELPER CLASS~~~~~~~~~~~~~~~~~~~~~~~~~~~# 184 | class CGI_Writer: 185 | """Extract data for valid sites and write to BED and/or TSV file""" 186 | 187 | def __init__(self, bed_fn=None, tsv_fn=None, verbose=True): 188 | """""" 189 | self.log = get_logger(name="Comp_Writer", verbose=verbose) 190 | self.bed_fn = bed_fn 191 | self.tsv_fn = tsv_fn 192 | self.bed_fp = self._init_bed() if bed_fn else None 193 | self.tsv_fp = self._init_tsv() if tsv_fn else None 194 | 195 | # ~~~~~~~~~~~~~~PUBLIC METHODS~~~~~~~~~~~~~~# 196 | def write(self, chrom, start, end, length, n_cpg, cg_freq, obs_exp): 197 | """""" 198 | if self.bed_fn: 199 | self._write_bed(chrom, start, end) 200 | if self.tsv_fn: 201 | self._write_tsv(chrom, start, end, length, n_cpg, cg_freq, obs_exp) 202 | 203 | def __enter__(self): 204 | self.log.debug("Opening Writer") 205 | return self 206 | 207 | def __exit__(self, exception_type, exception_val, trace): 208 | self.log.debug("Closing Writer") 209 | for fp in (self.bed_fp, self.tsv_fp): 210 | try: 211 | fp.close() 212 | except: 213 | pass 214 | 215 | # ~~~~~~~~~~~~~~PRIVATE METHODS~~~~~~~~~~~~~~# 216 | def _init_bed(self): 217 | """Open BED file and write file header""" 218 | self.log.debug("Initialise output bed file") 219 | mkbasedir(self.bed_fn, exist_ok=True) 220 | fp = open(self.bed_fn, "w") 221 | fp.write("track name=CpG_islands\n") 222 | return fp 223 | 224 | def _write_bed(self, chrom, start, end): 225 | """Write line to BED file""" 226 | self.bed_fp.write("{}\t{}\t{}\n".format(chrom, start, end)) 227 | 228 | def _init_tsv(self): 229 | """Open TSV file and write file header""" 230 | self.log.debug("Initialise output tsv file") 231 | mkbasedir(self.tsv_fn, exist_ok=True) 232 | fp = open(self.tsv_fn, "w") 233 | fp.write("chromosome\tstart\tend\tlength\tnum_CpG\tCG_freq\tobs_exp_freq\n") 234 | return fp 235 | 236 | def _write_tsv(self, chrom, start, end, length, n_cpg, cg_freq, obs_exp): 237 | """Write line to TSV file""" 238 | self.tsv_fp.write( 239 | "{}\t{}\t{}\t{}\t{}\t{:.3f}\t{:.3f}\n".format(chrom, start, end, length, n_cpg, cg_freq, obs_exp) 240 | ) 241 | 242 | 243 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~Helper Functions~~~~~~~~~~~~~~~~~~~~~~~~~~~# 244 | def valid_window(c_count, g_count, cg_count, min_win_len=200, min_CG_freq=0.5, min_obs_CG_ratio=0.6): 245 | cg_freq = (c_count + g_count) / min_win_len 246 | if cg_freq < min_CG_freq: 247 | return False 248 | 249 | # Safely compute obs_exp in case of no Cs or Gs on windows 250 | try: 251 | obs_exp = cg_count / (c_count * g_count / min_win_len) 252 | except ZeroDivisionError: 253 | obs_exp = 0 254 | 255 | if obs_exp < min_obs_CG_ratio: 256 | return False 257 | 258 | return True 259 | 260 | 261 | def compute_win(win_seq): 262 | win_len = len(win_seq) 263 | c_count = g_count = cg_count = 0 264 | 265 | for j in range(win_len): 266 | if win_seq[j] == "c": 267 | c_count += 1 268 | elif win_seq[j] == "g": 269 | g_count += 1 270 | if win_seq[j : j + 2] == "cg": 271 | cg_count += 1 272 | 273 | cg_freq = (c_count + g_count) / win_len 274 | if c_count == 0 or g_count == 0: 275 | obs_exp = 0 276 | else: 277 | obs_exp = cg_count / (c_count * g_count / win_len) 278 | 279 | return (win_len, cg_count, cg_freq, obs_exp) 280 | -------------------------------------------------------------------------------- /pycoMeth/CoordGen.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~# 4 | # Standard library imports 5 | from __future__ import annotations 6 | import os 7 | from collections import OrderedDict, namedtuple, Counter 8 | 9 | 10 | # Third party imports 11 | from pyfaidx import Fasta 12 | 13 | # Local imports 14 | from pycoMeth.common import * 15 | 16 | # ~~~~~~~~~~~~~~CLASS~~~~~~~~~~~~~~# 17 | class CoordGen: 18 | def __init__(self, ref_fasta_fn, verbose: bool = False, quiet: bool = False): 19 | """ 20 | Initialise CoordTuple 21 | * ref_fasta_fn 22 | Reference file used for alignment in Fasta format (ideally already indexed with samtools faidx) 23 | """ 24 | # Init logger 25 | self.log = get_logger(name="pycoMeth_CpG_Comp", verbose=verbose, quiet=quiet) 26 | 27 | # Import chr list 28 | self.chr_name_id = OrderedDict() 29 | self.chr_name_len = OrderedDict() 30 | 31 | self.log.debug("Loading Fasta index:{}".format(ref_fasta_fn)) 32 | with Fasta(ref_fasta_fn) as fa: 33 | for i, ref in enumerate(fa): 34 | self.chr_name_id[ref.name] = i 35 | self.chr_name_len[ref.name] = len(ref) 36 | 37 | self.log.debug("Found {} reference sequences".format(len(self.chr_name_id))) 38 | 39 | def __iter__(self): 40 | for name in self.chr_name_id.keys(): 41 | yield ("Name:{}\tID:{}\tLength:{}".format(name, self.chr_name_id[name], self.chr_name_len[name])) 42 | 43 | def __call__(self, chr_name, start, end) -> Coord: 44 | """ 45 | Check passed coordinates and generate a namedtuple containing a chromosome id, start end End 46 | The chromosome id is an integer corrersponding to the order of the reference in the index. 47 | The returned objects are thus easily sortable. 48 | * chr_name 49 | Name of the chromosome 50 | * start 51 | Start of the interval (has to be between 0 and chromosome length) 52 | * end 53 | End of the interval (has to be between start and chromosome length) 54 | """ 55 | # Check Chr 56 | if not chr_name in self.chr_name_id: 57 | raise CoordTupleError("Invalid chromosome name: {}".format(chr_name)) 58 | 59 | # Extract chromosome len and id 60 | chr_len = self.chr_name_len[chr_name] 61 | chr_id = self.chr_name_id[chr_name] 62 | 63 | # Check Start 64 | try: 65 | start = int(start) 66 | except CoordTupleError: 67 | raise CoordTupleError("Start coordinate is not a valid integer: {} ".format(start)) 68 | if start < 0 or start > chr_len: 69 | raise CoordTupleError("Invalid value for start coordinate: {} [0:{}]".format(start, chr_len)) 70 | 71 | # Check End 72 | try: 73 | end = int(end) 74 | except CoordTupleError: 75 | raise CoordTupleError("End coordinate is not a valid integer: {} ".format(end)) 76 | if end < start or end > chr_len: 77 | raise CoordTupleError("Invalid value for end coordinate: {} [{}:{}]".format(end, start, chr_len)) 78 | 79 | return Coord(chr_id, chr_name, start, end) 80 | 81 | 82 | class Coord: 83 | def __init__(self, chr_id, chr_name, start, end): 84 | self.chr_id = chr_id 85 | self.chr_name = chr_name 86 | self.start = start 87 | self.end = end 88 | 89 | def __repr__(self): 90 | return "chr_name:{}, start:{}, end:{}".format(self.chr_name, self.start, self.end) 91 | 92 | def __str__(self): 93 | return "{}:{}-{}".format(self.chr_name, self.start, self.end) 94 | 95 | def __hash__(self): 96 | return hash((self.chr_id, self.start, self.end)) 97 | 98 | @property 99 | def center(self): 100 | return self.start + (self.end - self.start) / 2 101 | 102 | # Comparison methods 103 | def __eq__(self, other): 104 | return (self.chr_id, self.start, self.end) == (other.chr_id, other.start, other.end) 105 | 106 | def __ne__(self, other): 107 | return (self.chr_id, self.start, self.end) != (other.chr_id, other.start, other.end) 108 | 109 | def __lt__(self, other): 110 | return (self.chr_id, self.start, self.end) < (other.chr_id, other.start, other.end) 111 | 112 | def __gt__(self, other): 113 | return (self.chr_id, self.start, self.end) > (other.chr_id, other.start, other.end) 114 | 115 | def __le__(self, other): 116 | return (self.chr_id, self.start, self.end) <= (other.chr_id, other.start, other.end) 117 | 118 | def __ge__(self, other): 119 | return (self.chr_id, self.start, self.end) >= (other.chr_id, other.start, other.end) 120 | 121 | def center_comp(self, other): 122 | # different chromosome 123 | if other.chr_id < self.chr_id: 124 | return "lower" 125 | elif other.chr_id > self.chr_id: 126 | return "greater" 127 | # same chromosome 128 | else: 129 | if other.center < self.start: 130 | return "lower" 131 | elif other.center > self.end: 132 | return "greater" 133 | else: 134 | return "inside" 135 | 136 | 137 | class CoordTupleError(Exception): 138 | """ Basic exception class for FileParserError """ 139 | 140 | pass 141 | -------------------------------------------------------------------------------- /pycoMeth/FileParser.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~# 4 | # Standard library imports 5 | import os 6 | from collections import * 7 | import gzip 8 | from glob import iglob 9 | 10 | # Local imports 11 | from pycoMeth.common import * 12 | 13 | # ~~~~~~~~~~~~~~CLASS~~~~~~~~~~~~~~# 14 | 15 | 16 | class FileParser: 17 | def __init__( 18 | self, 19 | fn, 20 | label="", 21 | colnames=False, 22 | first_line_header=True, 23 | sep="\t", 24 | comment="#", 25 | auto_numeric=False, 26 | include_byte_len=False, 27 | dtypes={}, 28 | force_dtypes=False, 29 | force_col_len=True, 30 | verbose=False, 31 | quiet=False, 32 | **kwargs 33 | ): 34 | """ 35 | Open a parser ++ for field delimited file 36 | * fn 37 | Path to a field delimited file 38 | * label 39 | Label for the file of file group 40 | * colnames 41 | List of column names to use if not in first file line 42 | * sep 43 | field separator 44 | * comment 45 | skip any line starting with this string 46 | * auto_numeric 47 | Try to automatically cast fields values in int or float 48 | * include_byte_len 49 | Add byte len corresponding to each line 50 | * dtypes 51 | Dict corresponding to fields (based on colnames) to cast in a given python type 52 | * force_dtypes 53 | Raise an error if type casting fails 54 | * kwargs 55 | Allow to pass extra options such as verbose, quiet and progress 56 | """ 57 | 58 | # Init logger and counter 59 | self.log = get_logger(name="pycoMeth_FileParser", verbose=verbose, quiet=quiet) 60 | self.counter = Counter() 61 | 62 | # Save self variables 63 | self.label = label 64 | self.sep = sep 65 | self.comment = comment 66 | self.first_line_header = first_line_header 67 | self.include_byte_len = include_byte_len 68 | self.auto_numeric = auto_numeric 69 | self.force_dtypes = force_dtypes 70 | self.force_col_len = force_col_len 71 | 72 | # Input file opening 73 | self.f_list = self._open_files(fn) 74 | if len(self.f_list) == 0: 75 | raise ValueError(f"File does not exist {fn}") 76 | 77 | # Init extra private variables 78 | self._previous_index = -1 79 | self._current_index = 0 80 | self._header_len = 0 81 | self._current = None 82 | self._previous = None 83 | 84 | # Define colname based on provided list of names 85 | if colnames and isinstance(colnames, (list, tuple)): 86 | self.colnames = colnames 87 | 88 | # Define colnames based on file header. Need to be the same for all the files 89 | elif first_line_header: 90 | self.colnames = [] 91 | for fn, fp in self.f_list: 92 | if not self.colnames: 93 | self.log.debug("Reading header from file: {}".format(fn)) 94 | self.colnames = self._get_first_line_header(fp) 95 | elif self.colnames != self._get_first_line_header(fp): 96 | raise FileParserError("Inconsistant headers between input files {}".format(fn)) 97 | self.log.debug("Column names from header: '{}'".format(" / ".join(self.colnames))) 98 | else: 99 | raise ValueError("Invalid column name option") 100 | 101 | # Define input file type: 102 | if all_in(["chromosome", "start", "end", "sequence", "num_motifs", "median_llr", "llr_list"], self.colnames): 103 | self.input_type = "CpG_Aggregate" 104 | elif all_in(["chromosome", "start", "end", "num_motifs", "median_llr", "llr_list", "pos_list"], self.colnames): 105 | self.input_type = "Interval_Aggregate" 106 | elif all_in( 107 | [ 108 | "chromosome", 109 | "strand", 110 | "start", 111 | "end", 112 | "read_name", 113 | "log_lik_ratio", 114 | "log_lik_methylated", 115 | "log_lik_unmethylated", 116 | ], 117 | self.colnames, 118 | ): 119 | self.input_type = "call_methylation" 120 | else: 121 | self.input_type = "unknown" 122 | self.log.debug("Input file type: {}".format(self.input_type)) 123 | 124 | # Save initial number of columns 125 | self.ncols = len(self.colnames) 126 | 127 | # Define custom namedtuple to be returned as a line 128 | if include_byte_len: 129 | self.colnames.append("byte_len") 130 | self.lt = namedtuple("lt", self.colnames) 131 | 132 | # Set types to try to cast data in 133 | self.dtypes_index = self._set_types(dtypes) 134 | 135 | # ~~~~~~~~~~~~~~MAGIC AND PROPERTY METHODS~~~~~~~~~~~~~~# 136 | 137 | def __len__(self): 138 | size = 0 139 | for fn, fp in self.f_list: 140 | size += int(os.path.getsize(fn)) 141 | return size - self._header_len 142 | 143 | def __enter__(self): 144 | return self 145 | 146 | def close(self): 147 | for fn, fp in self.f_list: 148 | try: 149 | self.log.debug("Closing file:{}".format(fn)) 150 | fp.close() 151 | except Exception as E: 152 | self.log.debug.warning(E) 153 | 154 | def __exit__(self, exception_type, exception_val, trace): 155 | self.close() 156 | 157 | def __iter__(self): 158 | for i, (fn, fp) in enumerate(self.f_list): 159 | self.log.debug("Starting to parse file {}".format(fn)) 160 | self._current_index = i 161 | for line in fp: 162 | self.counter["Lines Parsed"] += 1 163 | if line.startswith(self.comment): 164 | self.counter["Comment lines skipped"] += 1 165 | continue 166 | try: 167 | line = self._parse_line(line) 168 | self._previous = self._current 169 | self._current = line 170 | self.counter["Line successfully parsed"] += 1 171 | yield line 172 | except (FileParserError, TypeError) as E: 173 | self.counter["Malformed or Invalid Lines"] += 1 174 | self.log.debug(E) 175 | self.log.debug("File {}: Invalid line {}".format(fn, line)) 176 | self.log.debug("End of file: {}".format(fn)) 177 | self.log.debug("All files done") 178 | 179 | # ~~~~~~~~~~~~~~PUBLIC METHODS~~~~~~~~~~~~~~# 180 | 181 | def current(self): 182 | return self._current 183 | 184 | def previous(self): 185 | return self._previous 186 | 187 | def next(self): 188 | # try to read a line 189 | while True: 190 | try: 191 | fn, fp = self.f_list[self._current_index] 192 | if self._current_index > self._previous_index: 193 | self.log.debug("Starting to parse file {}".format(fn)) 194 | self._previous_index = self._current_index 195 | line = next(fp) 196 | if line.startswith(self.comment): 197 | self.counter["Comment lines skipped"] += 1 198 | continue 199 | line = self._parse_line(line) 200 | self._previous = self._current 201 | self._current = line 202 | self.counter["Line successfully parsed"] += 1 203 | return line 204 | 205 | # If one of the file is finished, start another one 206 | except StopIteration: 207 | self.log.debug("End of file: {}".format(fn)) 208 | self._current_index += 1 209 | 210 | # End condition if all files where read 211 | except IndexError: 212 | self.log.debug("All files done") 213 | raise StopIteration 214 | 215 | except FileParserError: 216 | self.counter["Malformed or Invalid Lines"] += 1 217 | 218 | # ~~~~~~~~~~~~~~PRIVATE METHODS~~~~~~~~~~~~~~# 219 | 220 | def _get_first_line_header(self, fp): 221 | header_line = next(fp) 222 | self._header_len += len(header_line) 223 | return header_line.rstrip().split(self.sep) 224 | 225 | def _parse_line(self, line): 226 | byte_len = len(line) 227 | line = line.rstrip().split(self.sep) 228 | 229 | # if the length of the line is inconsistent with the header 230 | if len(line) != self.ncols: 231 | # Raise error is required (default) 232 | if self.force_col_len: 233 | raise FileParserError("Invalid Number of fields found") 234 | # Else truncate extra fields 235 | else: 236 | line = line[: self.ncols] 237 | 238 | # Try to autocast in int or float 239 | if self.auto_numeric: 240 | for i in range(len(line)): 241 | val = line[i] 242 | try: 243 | line[i] = int(val) 244 | except ValueError: 245 | try: 246 | line[i] = float(val) 247 | except ValueError: 248 | pass 249 | 250 | # Cast values according to provided types 251 | elif self.dtypes_index: 252 | for i, dtype in self.dtypes_index.items(): 253 | try: 254 | line[i] = dtype(line[i]) 255 | except Exception: 256 | if self.force_dtypes: 257 | raise FileParserError("Cannot cast field in required type") 258 | 259 | # Add byte length if needed 260 | if self.include_byte_len: 261 | line.append(byte_len) 262 | 263 | # Return nametuple 264 | return self.lt(*line) 265 | 266 | def _set_types(self, dtypes): 267 | """""" 268 | dtypes_index = OrderedDict() 269 | if dtypes: 270 | for i, name in enumerate(self.colnames): 271 | if name in dtypes: 272 | dtypes_index[i] = dtypes[name] 273 | return dtypes_index 274 | 275 | def _open_files(self, fn_list): 276 | """Transparently open files, lists, regex, gzipped or not""" 277 | f_list = [] 278 | 279 | # Cast single file or regex to list 280 | if isinstance(fn_list, str): 281 | fn_list = [fn_list] 282 | 283 | if isinstance(fn_list, (list, tuple, set)): 284 | for fn_regex in fn_list: 285 | for fn in iglob(fn_regex): 286 | self.counter["Input files"] += 1 287 | if fn.endswith(".gz"): 288 | self.log.debug("Opening file {} in gzip mode".format(fn)) 289 | fp = gzip.open(fn, "rt") 290 | else: 291 | self.log.debug("Opening file {} in normal mode".format(fn)) 292 | fp = open(fn, "r") 293 | f_list.append((fn, fp)) 294 | 295 | return f_list 296 | 297 | else: 298 | raise ValueError("Invalid file type") 299 | 300 | 301 | class FileParserError(Exception): 302 | """ Basic exception class for FileParserError """ 303 | 304 | pass 305 | -------------------------------------------------------------------------------- /pycoMeth/Meth_Comp.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# 4 | # Standard library imports 5 | import itertools 6 | import random 7 | from typing import IO, List, Dict, Any, Generator 8 | from math import sqrt 9 | import fileinput 10 | 11 | # Third party imports 12 | from tqdm import tqdm 13 | import numpy as np 14 | import pandas as pd 15 | from scipy.stats import kruskal, mannwhitneyu, wilcoxon, fisher_exact, chi2_contingency 16 | from statsmodels.stats.multitest import multipletests 17 | from multiprocessing import Pool 18 | from meth5 import MetH5File 19 | 20 | # Local imports 21 | from pycoMeth.common import * 22 | from pycoMeth.FileParser import FileParser 23 | from pycoMeth.CoordGen import CoordGen, Coord 24 | from pycoMeth.loader import MetH5Loader 25 | 26 | # ~~~~~~~~~~~~~~~~~ Multiprocessing Worker methods ~~~~~~~~~~~~~~~~~~~~~~~~~~# 27 | 28 | 29 | class MethCompWorker: 30 | def __init__( 31 | self, 32 | h5_read_groups_key, 33 | sample_id_list, 34 | h5_file_list, 35 | min_abs_llr, 36 | min_samples, 37 | pvalue_method, 38 | min_num_reads_per_interval, 39 | pvalue_threshold, 40 | hypothesis, 41 | do_independent_hypothesis_weighting, 42 | ): 43 | self.min_abs_llr = min_abs_llr 44 | self.min_samples = min_samples 45 | self.pvalue_method = pvalue_method 46 | self.min_pval = np.nextafter(float(0), float(1)) 47 | self.min_num_reads_per_interval = min_num_reads_per_interval 48 | self.sample_hf_files: Dict[str, MetH5File] = {} 49 | self.llr_threshold = 2.0 # TODO expose parameter 50 | self.min_diff_bs = 0.25 # TODO expose parameter 51 | self.pvalue_threshold = pvalue_threshold 52 | self.hypothesis = hypothesis 53 | self.do_independent_hypothesis_weighting = do_independent_hypothesis_weighting 54 | 55 | self.loader = MetH5Loader(h5_read_groups_key, sample_id_list, h5_file_list) 56 | 57 | def compute_ihw_weight(self, test_values: List[List[float]]) -> float: 58 | flat_list = [v for vl in test_values for v in vl] 59 | mean = sum(flat_list) / len(flat_list) 60 | variance = sum((v - mean) ** 2 for v in flat_list) / (len(flat_list) - 1) 61 | std = sqrt(variance) 62 | return std 63 | 64 | def compute_site_betascores( 65 | self, raw_pos_list: List[List[Any]], raw_llr_list: List[List[float]] 66 | ) -> List[List[float]]: 67 | """ 68 | Computes betascores (methylation frequency) based on sites. Two lists 69 | :param raw_pos_list: positions for each llr for site-frequency 70 | :param raw_llr_list: one list of log-likelihood ratios per sample to be compared 71 | :return: 72 | """ 73 | unique_pos = list(set().union(*[set(pos) for pos in raw_pos_list])) 74 | met_list = [ 75 | [sum(llr > self.llr_threshold for s_pos, llr in zip(poss, llrs) if s_pos == pos) for pos in unique_pos] 76 | for poss, llrs in zip(raw_pos_list, raw_llr_list) 77 | ] 78 | nonambig_list = [ 79 | [ 80 | sum(abs(llr) > self.llr_threshold for s_pos, llr in zip(s_poss, llrs) if s_pos == pos) 81 | for pos in unique_pos 82 | ] 83 | for s_poss, llrs in zip(raw_pos_list, raw_llr_list) 84 | ] 85 | is_valid_idx = [True for _ in nonambig_list[0]] 86 | for na in nonambig_list: 87 | is_valid_idx = [a and b for a, b in zip(is_valid_idx, [n > 0 for n in na])] 88 | met_list = [[b for b, i in zip(met, is_valid_idx) if i] for met in met_list] 89 | nonambig_list = [[b for b, i in zip(nonambig, is_valid_idx) if i] for nonambig in nonambig_list] 90 | bs_list = [[m / na for m, na in zip(met, nonambig)] for met, nonambig in zip(met_list, nonambig_list)] 91 | return bs_list 92 | 93 | def compute_read_betascores( 94 | self, raw_read_list: List[List[Any]], raw_llr_list: List[List[float]] 95 | ) -> List[List[float]]: 96 | """ 97 | Computes betascores (methylation frequency) based on reads. Two lists 98 | :param raw_read_list: readname for each llr for site-frequency 99 | :param raw_llr_list: one list of log-likelihood ratios per sample to be compared 100 | :return: 101 | """ 102 | num_samples = len(raw_llr_list) 103 | bs_list = [] 104 | for sample in range(num_samples): 105 | unique_reads = list(set(raw_read_list[sample])) 106 | if len(unique_reads) > 1000: 107 | # excessive coverage indicates weird mapping - we will subsample to not cause a huge memory spike 108 | unique_reads = [r for r in unique_reads if random.random() < 100 / len(unique_reads)] 109 | met_list = [ 110 | sum( 111 | llr > self.llr_threshold 112 | for read, llr in zip(raw_read_list[sample], raw_llr_list[sample]) 113 | if read == r 114 | ) 115 | for r in unique_reads 116 | ] 117 | nonambig_list = [ 118 | sum( 119 | abs(llr) > self.llr_threshold 120 | for read, llr in zip(raw_read_list[sample], raw_llr_list[sample]) 121 | if read == r 122 | ) 123 | for r in unique_reads 124 | ] 125 | bs = [m / na for m, na in zip(met_list, nonambig_list) if na > 0] 126 | bs_list.append(bs) 127 | return bs_list 128 | 129 | def compute_contingency_table(self, raw_llr_list: List[List[float]]) -> List[List[float]]: 130 | """ 131 | Computes contingency table as for fisher exact test by thresholding llrs and counting 132 | methylation/unmethylated calls for each sample 133 | :param raw_llr_list: one list of log-likelihood ratios per sample to be compared 134 | :return: contingency table of shape (n_samples, 2) 135 | """ 136 | num_samples = len(raw_llr_list) 137 | contingency_table = [] 138 | for sample in range(num_samples): 139 | n_met = sum(llr > self.llr_threshold for llr in raw_llr_list[sample]) 140 | n_called = sum(abs(llr) > self.llr_threshold for llr in raw_llr_list[sample]) 141 | n_unmet = n_called - n_met 142 | contingency_table.append([n_met, n_unmet]) 143 | return contingency_table 144 | 145 | def compute_posthoc_test(self, test_values): 146 | try: 147 | posthoc_pvalue_list = [] 148 | for sample_one in range(len(test_values)): 149 | values_one = test_values[sample_one] 150 | if self.pvalue_method == "chi_squared": 151 | # If the original test was a chi_squared test, we use fisher exact to compute post-hoc test 152 | # on the corresponding rows of the contingency table 153 | values_others = [sum(v[i] for j, v in enumerate(test_values) if j != sample_one) for i in range(2)] 154 | _, pvalue = fisher_exact([values_one, values_others]) 155 | elif self.pvalue_method == "KW": 156 | # Merge sample values before and after 157 | values_others = [v for vl in test_values[:sample_one] for v in vl] 158 | values_others += [v for vl in test_values[sample_one + 1 :] for v in vl] 159 | if abs(np.mean(values_others) - np.mean(values_one)) > 0.25: # FIXME expose parameter 160 | _, pvalue = mannwhitneyu(values_one, values_others) 161 | else: 162 | pvalue = 1 163 | else: 164 | raise ValueError("Internal error: Attempted to compute post-hoc when not appropriate") 165 | 166 | posthoc_pvalue_list.append(pvalue) 167 | except ValueError: 168 | posthoc_pvalue_list = [1.0] * len(test_values) 169 | return posthoc_pvalue_list 170 | 171 | def compute_pvalue(self, interval, label_list, raw_llr_list, raw_pos_list, raw_reads_list): 172 | counters_to_increase = [] 173 | res = OrderedDict() 174 | res["chromosome"] = interval.chr_name 175 | res["start"] = interval.start 176 | res["end"] = interval.end 177 | avg_coverage = [len(pos) / len(set(pos)) for pos in raw_pos_list] 178 | non_ambig_llr_count = [sum(1 for l in llrs if abs(l) > self.llr_threshold) for llrs in raw_llr_list] 179 | pos_count = [sum(1 for l in llrs if l > self.llr_threshold) for llrs in raw_llr_list] 180 | n_samples = sum(1 for c in non_ambig_llr_count if c > 0) 181 | overall_bs_list = [pos / total for pos, total in zip(pos_count, non_ambig_llr_count) if total > 0] 182 | 183 | # Collect median llr 184 | med_llr_list = [np.median(llrs) for llrs in raw_llr_list] 185 | 186 | # "Lazy load" this variable as a slight performance boon 187 | read_beta_scores = None 188 | 189 | if self.hypothesis == "llr_diff": 190 | test_values = raw_llr_list 191 | elif self.hypothesis == "bs_diff": 192 | if self.pvalue_method == "paired": 193 | test_values = self.compute_site_betascores(raw_pos_list, raw_llr_list) 194 | else: 195 | read_beta_scores = self.compute_read_betascores(raw_reads_list, raw_llr_list) 196 | test_values = read_beta_scores 197 | elif self.hypothesis == "count_dependency": 198 | test_values = self.compute_contingency_table(raw_llr_list) 199 | 200 | if n_samples < self.min_samples: 201 | # Not enough samples 202 | comment = "Insufficient samples" 203 | pvalue = np.nan 204 | elif all(len(vals) < 3 for vals in test_values) and self.pvalue_method == "paired": 205 | comment = "Insufficient coverage" 206 | pvalue = np.nan 207 | # Sufficient samples and effect size 208 | else: 209 | comment = "Valid" 210 | 211 | # Update counters result table 212 | counters_to_increase.append(comment) 213 | 214 | if len(overall_bs_list) > 0: 215 | difference = np.diff(overall_bs_list).tolist() 216 | else: 217 | difference = [] 218 | if comment == "Valid": 219 | post_hoc_pvalues = [] 220 | try: 221 | # Run stat test 222 | if self.pvalue_method == "KW": 223 | statistics, pvalue = kruskal(*test_values) 224 | if pvalue < self.pvalue_threshold: 225 | post_hoc_pvalues = self.compute_posthoc_test(test_values) 226 | elif self.pvalue_method == "MW": 227 | statistics, pvalue = mannwhitneyu(test_values[0], test_values[1], alternative="two-sided") 228 | elif self.pvalue_method == "paired": 229 | statistics, pvalue = wilcoxon(test_values[0], test_values[1]) 230 | elif self.pvalue_method == "fisher_exact": 231 | statistics, pvalue = fisher_exact(test_values) 232 | elif self.pvalue_method == "chi_squared": 233 | statistics, pvalue, _, _ = chi2_contingency(test_values) 234 | if pvalue < self.pvalue_threshold: 235 | post_hoc_pvalues = self.compute_posthoc_test(test_values) 236 | except ValueError: 237 | # This happens for example if all values are equal in mannwhitneyu 238 | pvalue = 1 239 | 240 | # Fix and categorize p-values 241 | if pvalue is np.nan or pvalue is None or pvalue > 1 or pvalue < 0: 242 | counters_to_increase.append("Sites with invalid pvalue") 243 | # Correct very low pvalues to minimal float size 244 | elif pvalue == 0: 245 | pvalue = self.min_pval 246 | 247 | # Compute statistic used for independent hypothesis weighting 248 | if self.do_independent_hypothesis_weighting: 249 | if read_beta_scores is None: 250 | read_beta_scores = self.compute_read_betascores(raw_reads_list, raw_llr_list) 251 | ihw_weight = self.compute_ihw_weight(read_beta_scores) 252 | 253 | res["pvalue"] = pvalue 254 | res["adj_pvalue"] = np.nan 255 | res["n_samples"] = n_samples 256 | if self.do_independent_hypothesis_weighting: 257 | res["ihw_weight"] = ihw_weight 258 | res["labels"] = list_to_str(label_list) 259 | res["med_llr_list"] = list_to_str(med_llr_list) 260 | res["raw_llr_list"] = list_to_str(raw_llr_list) 261 | res["difference"] = list_to_str(difference) 262 | if self.pvalue_method in {"KW", "chi_squared"}: 263 | res["post_hoc_pvalues"] = list_to_str(post_hoc_pvalues) 264 | res["comment"] = comment 265 | res["raw_pos_list"] = list_to_str(raw_pos_list) 266 | res["avg_coverage"] = list_to_str(avg_coverage) 267 | res["unique_cpg_pos"] = len(set(itertools.chain.from_iterable(raw_pos_list))) 268 | else: 269 | res["pvalue"] = np.nan 270 | res["adj_pvalue"] = np.nan 271 | res["n_samples"] = 0 272 | if self.do_independent_hypothesis_weighting: 273 | res["ihw_weight"] = 0.0 274 | res["labels"] = "[]" 275 | res["med_llr_list"] = "[]" 276 | res["raw_llr_list"] = "[]" 277 | res["comment"] = comment 278 | res["difference"] = "[]" 279 | if self.pvalue_method in {"KW", "chi_squared"}: 280 | res["post_hoc_pvalues"] = "[]" 281 | res["raw_pos_list"] = "[]" 282 | res["avg_coverage"] = "[]" 283 | res["unique_cpg_pos"] = "[]" 284 | 285 | return res, counters_to_increase 286 | 287 | def __call__(self, interval): 288 | try: 289 | label_list, raw_llr_list, raw_pos_list, raw_read_list = self.loader.read_raw_llrs(interval) 290 | return self.compute_pvalue(interval, label_list, raw_llr_list, raw_pos_list, raw_read_list) 291 | except: 292 | import traceback 293 | 294 | print(traceback.format_exc()) 295 | raise 296 | 297 | 298 | def initializer(args: Dict): 299 | """Initializes a worker object at the beginning when the 300 | multiprocessing pool is created and puts it in the global 301 | namespace.""" 302 | global worker 303 | worker = MethCompWorker(**args) 304 | 305 | 306 | def worker_function(*args): 307 | """Calls the work function of the worker object in the global 308 | namespace.""" 309 | return worker(*args) 310 | 311 | 312 | # ~~~~~~~~~~~~~~~~~~~~~~~~CpG_Comp MAIN CLASS~~~~~~~~~~~~~~~~~~~~~~~~# 313 | 314 | 315 | def Meth_Comp( 316 | h5_file_list: [str], 317 | ref_fasta_fn: str, 318 | read_groups_key: str = None, 319 | interval_bed_fn: str = None, 320 | output_bed_fn: str = None, 321 | output_tsv_fn: str = None, 322 | interval_size: int = 1000, 323 | min_num_reads_per_interval: int = 10, 324 | max_missing: int = 0, 325 | min_abs_llr: float = 2, 326 | sample_id_list: [str] = None, 327 | pvalue_adj_method: str = "fdr_bh", 328 | pvalue_threshold: float = 0.01, 329 | only_tested_sites: bool = False, 330 | verbose: bool = False, 331 | quiet: bool = False, 332 | progress: bool = False, 333 | paired_test: bool = False, 334 | worker_processes: int = 4, 335 | hypothesis: str = "bs_diff", 336 | do_independent_hypothesis_weighting: bool = False, 337 | **kwargs, 338 | ): 339 | """Compare methylation values for each CpG positions or intervals 340 | between n samples and perform a statistical test to evaluate if the 341 | positions are significantly different. For 2 samples a Mann_Whitney 342 | test is performed otherwise multiples samples are compared with a 343 | Kruskal Wallis test. pValues are adjusted for multiple tests using 344 | the Benjamini & Hochberg procedure for controlling the false 345 | discovery rate. 346 | 347 | * h5_file_list 348 | A list of MetH5 files containing methylation llr 349 | * read_groups_key 350 | Key in h5 file containing read groups to be used. (optional) 351 | * ref_fasta_fn 352 | Reference file used for alignment in Fasta format (ideally already indexed with samtools faidx) 353 | * interval_bed_fn 354 | SORTED bed file containing **non-overlapping** intervals to bin CpG data into (Optional) (can be gzipped) 355 | * interval_size 356 | Size of the sliding window in which to aggregate CpG sites data from if no BED file is provided 357 | * min_num_reads_per_interval 358 | Minimum number of reads per sample per interval. The entire interval will be discarded if one sample 359 | does not have sufficient coverage. 360 | * output_bed_fn 361 | Path to write a summary result file in BED format (At least 1 output file is required) (can be gzipped) 362 | * output_tsv_fn 363 | Path to write an more extensive result report in TSV format (At least 1 output file is required) (can be gzipped) 364 | * max_missing 365 | Max number of missing samples to perform the test 366 | * min_abs_llr 367 | Minimal llr boundary for negative and positive median llr. 368 | The test if only performed if at least one sample has a median llr above (methylated) and 1 sample has a median llr below (unmethylated) 369 | * sample_id_list 370 | list of sample ids to annotate results in tsv file 371 | * pvalue_adj_method 372 | Method to use for pValue multiple test adjustment 373 | * pvalue_threshold 374 | Alpha parameter (family-wise error rate) for pValue adjustment 375 | * paired_test 376 | Test with a paired test on beta scores instead of unpaired on llrs 377 | * only_tested_sites 378 | Do not include sites that were not tested because of insufficient samples or effect size in the report 379 | * worker_processes 380 | Number of processes to be launched 381 | * hypothesis 382 | "llr_diff" if the hypotheis is a shift in llrs, "bs_diff" if the hypothesis is a shift in mean read methylation 383 | rate, or "count_dependency" if the hypothesis is dependencies between groups in the contingency table of 384 | methylated/unmethylated calls 385 | * do_independent_hypothesis_weighting 386 | Whether to include independent hypothesis weighting in the p-value adjustment 387 | """ 388 | # Init method 389 | opt_summary_dict = opt_summary(local_opt=locals()) 390 | log = get_logger(name="pycoMeth_CpG_Comp", verbose=verbose, quiet=quiet) 391 | 392 | log.warning("Checking options and input files") 393 | log_dict(opt_summary_dict, log.debug, "Options summary") 394 | 395 | # At least one output file is required, otherwise it doesn't make any sense 396 | log.debug("Checking required output") 397 | 398 | if not output_bed_fn and not output_tsv_fn: 399 | raise pycoMethError("At least 1 output file is requires (-t or -b)") 400 | 401 | sample_id_list = MetH5Loader.interpret_sample_ids_from_arguments(sample_id_list, read_groups_key, h5_file_list) 402 | 403 | all_samples = len(sample_id_list) 404 | 405 | min_samples = all_samples - max_missing 406 | 407 | coordgen = CoordGen(ref_fasta_fn, verbose, quiet) 408 | log_list(coordgen, log.debug, "Coordinate reference summary") 409 | 410 | if interval_bed_fn: 411 | log.debug("Bed annotation generator") 412 | 413 | def intervals_gen_fun(): 414 | return bed_intervals_gen(coordgen=coordgen, interval_bed_fn=interval_bed_fn) 415 | 416 | else: 417 | log.debug("Sliding window generator") 418 | 419 | def intervals_gen_fun(): 420 | return sliding_intervals_gen(coordgen=coordgen, interval_size=interval_size) 421 | 422 | # Go through intervals once (should be cheap) to count how many we are investigating 423 | num_intervals = sum(1 for _ in intervals_gen_fun()) 424 | # Recreate the intervals generator 425 | intervals_gen = intervals_gen_fun() 426 | 427 | # 3 values = Kruskal Wallis test 428 | if all_samples >= 3: 429 | if hypothesis == "count_dependency": 430 | log.debug("Multiple comparison mode for count depdencies (Chi-squared test)") 431 | pvalue_method = "chi_squared" 432 | min_samples = 3 433 | else: 434 | pvalue_method = "KW" 435 | log.debug("Multiple comparison mode (Kruskal_Wallis test)") 436 | if min_samples < 3: 437 | log.debug("Automatically raise number of minimal samples to 3") 438 | min_samples = 3 439 | # 2 values = Mann_Whitney test 440 | elif all_samples == 2: 441 | if min_samples: 442 | log.debug("No missing samples allowed for 2 samples comparison") 443 | min_samples = 2 444 | if paired_test: 445 | pvalue_method = "paired" 446 | log.debug("Paired comparison mode (Wilcoxon)") 447 | else: 448 | if hypothesis == "count_dependency": 449 | log.debug("Pairwise comparison mode for count depdencies (Fisher exact test)") 450 | pvalue_method = "fisher_exact" 451 | else: 452 | pvalue_method = "MW" 453 | log.debug("Pairwise comparison mode (Mann_Whitney test)") 454 | else: 455 | raise pycoMethError("Meth_Comp needs at least 2 input files") 456 | 457 | log.warning("Opening H5 files") 458 | try: 459 | # Define StatsResults to collect valid sites and perform stats 460 | stats_results = StatsResults( 461 | pvalue_adj_method=pvalue_adj_method, 462 | pvalue_threshold=pvalue_threshold, 463 | only_tested_sites=only_tested_sites, 464 | do_independent_hypothesis_weighting=do_independent_hypothesis_weighting, 465 | ) 466 | 467 | log.info("Starting asynchronous file parsing") 468 | with tqdm( 469 | total=num_intervals, 470 | unit=" intervals", 471 | unit_scale=True, 472 | desc="\tProgress", 473 | disable=not progress, 474 | ) as pbar: 475 | 476 | log.info("Launching %d worker processes" % worker_processes) 477 | if worker_processes == 1: 478 | initializer( 479 | dict( 480 | h5_read_groups_key=read_groups_key, 481 | sample_id_list=sample_id_list, 482 | h5_file_list=h5_file_list, 483 | min_abs_llr=min_abs_llr, 484 | min_samples=min_samples, 485 | pvalue_method=pvalue_method, 486 | min_num_reads_per_interval=min_num_reads_per_interval, 487 | pvalue_threshold=pvalue_threshold, 488 | hypothesis=hypothesis, 489 | do_independent_hypothesis_weighting=do_independent_hypothesis_weighting, 490 | ) 491 | ) 492 | else: 493 | pool = Pool( 494 | worker_processes, 495 | initializer=initializer, 496 | initargs=[ 497 | dict( 498 | h5_read_groups_key=read_groups_key, 499 | sample_id_list=sample_id_list, 500 | h5_file_list=h5_file_list, 501 | min_abs_llr=min_abs_llr, 502 | min_samples=min_samples, 503 | pvalue_method=pvalue_method, 504 | min_num_reads_per_interval=min_num_reads_per_interval, 505 | pvalue_threshold=pvalue_threshold, 506 | hypothesis=hypothesis, 507 | do_independent_hypothesis_weighting=do_independent_hypothesis_weighting, 508 | ) 509 | ], 510 | ) 511 | 512 | # Continue reading lines from all files 513 | log.debug("Starting deep parsing") 514 | fp_done = 0 515 | 516 | # Init file writer 517 | with Comp_Writer( 518 | bed_fn=output_bed_fn, 519 | tsv_fn=output_tsv_fn, 520 | verbose=verbose, 521 | output_raw_lists=False, 522 | with_ihw_weight=do_independent_hypothesis_weighting, 523 | with_posthoc_test=pvalue_method in {"KW", "chi_squared"}, 524 | ) as writer: 525 | try: 526 | 527 | def callback(*args): 528 | result_line = stats_results.callback(*(args[0])) 529 | writer.write(result_line) 530 | pbar.update(1) 531 | 532 | abort = False 533 | 534 | def error_callback(err): 535 | log.critical("Error in worker thread ") 536 | log.critical(str(err)) 537 | nonlocal abort 538 | abort = True 539 | 540 | async_results = [] 541 | # TODO perhaps perform this in batches (e.g. submit 10k intervals, 542 | # wait for all to finish, then submit the next 10k, etc...) 543 | # instead of submitting every interval into the queue and then waiting 544 | # for all of them to finish. 545 | # That would allow for a more reasonable timeout. 546 | for interval in intervals_gen: 547 | if abort: 548 | raise pycoMethError("Aborting due to error in worker thread") 549 | if worker_processes == 1: 550 | callback(worker(interval)) 551 | else: 552 | ar = pool.apply_async( 553 | worker_function, 554 | args=[interval], 555 | callback=callback, 556 | error_callback=error_callback, 557 | ) 558 | async_results.append(ar) 559 | if worker_processes > 1: 560 | for i, ar in enumerate(async_results): 561 | if abort: 562 | break 563 | ar.wait(timeout=3 * 24 * 3600) 564 | except: 565 | writer.abort() 566 | raise 567 | # Exit condition 568 | if not stats_results.res_list: 569 | log.info("No valid p-Value could be computed") 570 | else: 571 | # Convert results to dataframe and correct pvalues for multiple tests 572 | log.info("Adjust pvalues") 573 | stats_results.multitest_adjust() 574 | 575 | rewriter = Comp_ReWriter([f for f in (output_bed_fn, output_tsv_fn) if f is not None]) 576 | rewriter.write_adjusted_pvalues(stats_results.res_list) 577 | finally: 578 | # Print counters 579 | log_dict(stats_results.counter, log.info, "Results summary") 580 | 581 | 582 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~StatsResults HELPER CLASS~~~~~~~~~~~~~~~~~~~~~~~~~~~# 583 | 584 | 585 | class DictList: 586 | def __init__(self, universal_key): 587 | self.dict = {} 588 | self.universal_key = universal_key 589 | pass 590 | 591 | def __len__(self): 592 | if len(self.dict) == 0: 593 | return 0 594 | 595 | return len(self.dict[self.universal_key]) 596 | 597 | def __getitem__(self, i): 598 | class ListAccessor: 599 | def __init__(innerSelf): 600 | innerSelf.i = i 601 | 602 | def __getitem__(innerSelf, key): 603 | return self.dict[key][innerSelf.i] 604 | 605 | def __setitem__(innerSelf, key, value): 606 | self.dict[key][innerSelf.i] = value 607 | 608 | return ListAccessor() 609 | 610 | def __setitem__(self, i, d: Dict): 611 | for key, val in d.items(): 612 | self.dict[key][i] = val 613 | 614 | def append(self, d): 615 | if len(self.dict) == 0: 616 | self.dict = {key: [val] for key, val in d.items()} 617 | else: 618 | if len(set(d.keys()).intersection(set(self.dict.keys()))) != len(d.keys()): 619 | raise ValueError("All keys must be present in all entries") 620 | for key, val in d.items(): 621 | self.dict[key].append(val) 622 | 623 | def __iter__(self): 624 | for i in range(len(self)): 625 | yield self[i] 626 | 627 | 628 | class StatsResults: 629 | def __init__( 630 | self, 631 | pvalue_adj_method="fdr_bh", 632 | pvalue_threshold=0.01, 633 | only_tested_sites=False, 634 | do_independent_hypothesis_weighting=True, 635 | ): 636 | """""" 637 | # Save self variables 638 | self.pvalue_adj_method = pvalue_adj_method 639 | self.pvalue_threshold = pvalue_threshold 640 | self.only_tested_sites = only_tested_sites 641 | self.do_independent_hypothesis_weighting = do_independent_hypothesis_weighting 642 | 643 | # Init self collections 644 | self.res_list = DictList("pvalue") 645 | self.counter = Counter() 646 | 647 | # Get minimal non-zero float value 648 | self.min_pval = np.nextafter(float(0), float(1)) 649 | 650 | # ~~~~~~~~~~~~~~MAGIC AND PROPERTY METHODS~~~~~~~~~~~~~~# 651 | 652 | def __repr__(self): 653 | return dict_to_str(self.counter) 654 | 655 | def __len__(self): 656 | return len(self.res_list) 657 | 658 | def __iter__(self): 659 | for i in self.res_list: 660 | yield i 661 | 662 | # ~~~~~~~~~~~~~~PUBLIC METHODS~~~~~~~~~~~~~~# 663 | 664 | def callback(self, res, counters_to_increase): 665 | # filter out non tested site if required 666 | if self.only_tested_sites and res["pvalue"] is np.nan: 667 | return 668 | 669 | for c in counters_to_increase: 670 | self.counter[c] += 1 671 | reduced_res = { 672 | "pvalue": res["pvalue"], 673 | "adj_pvalue": res["adj_pvalue"], 674 | "comment": res["comment"], 675 | } 676 | if "ihw_weight" in res: 677 | reduced_res["ihw_weight"] = res["ihw_weight"] 678 | self.res_list.append(reduced_res) 679 | return res 680 | 681 | def multitest_adjust(self): 682 | """""" 683 | # Collect non-nan pvalues 684 | pvalue_idx = [] 685 | pvalue_list = [] 686 | ihw_weight_list = [] 687 | 688 | for i, res in enumerate(self.res_list): 689 | if not np.isnan(res["pvalue"]): 690 | pvalue_idx.append(i) 691 | pvalue_list.append(res["pvalue"]) 692 | if self.do_independent_hypothesis_weighting: 693 | ihw_weight_list.append(res["ihw_weight"]) 694 | 695 | # Adjust values 696 | if len(pvalue_list) == 0: 697 | return 698 | 699 | if self.do_independent_hypothesis_weighting: 700 | # Re-center weights (must average to 1) and is most faithful to FDR if scaled between 0 and 2 701 | mean_weight = sum(ihw_weight_list) / len(ihw_weight_list) 702 | ihw_weight_list = [w - mean_weight for w in ihw_weight_list] 703 | min_weight = max(abs(min(ihw_weight_list)), 0.01) 704 | ihw_weight_list = [max(w / min_weight + 1, 0.01) for w in ihw_weight_list] 705 | # Weight p-values 706 | pvalue_list = [p / w for p, w in zip(pvalue_list, ihw_weight_list)] 707 | 708 | adj_pvalue_list = multipletests( 709 | pvals=pvalue_list, 710 | alpha=self.pvalue_threshold, 711 | method=self.pvalue_adj_method, 712 | )[1] 713 | 714 | # add adjusted values to appropriate category 715 | for i, adj_pvalue in zip(pvalue_idx, adj_pvalue_list): 716 | 717 | # Fix and categorize p-values 718 | if adj_pvalue is np.nan or adj_pvalue is None or adj_pvalue > 1 or adj_pvalue < 0: 719 | adj_pvalue = 1.0 720 | comment = "Non-significant pvalue" 721 | 722 | elif adj_pvalue <= self.pvalue_threshold: 723 | # Correct very low pvalues to minimal float size 724 | if adj_pvalue == 0: 725 | adj_pvalue = self.min_pval 726 | # update counter if pval is still significant after adjustment 727 | comment = "Significant pvalue" 728 | else: 729 | comment = "Non-significant pvalue" 730 | 731 | # update counters and update comment and adj p-value 732 | if self.res_list[i]["comment"] == "Valid": 733 | # Overwriting comment, but only if it was a site that was tested 734 | # (not if it is a site that was excluded for coverage or other reasons) 735 | self.counter[comment] += 1 736 | self.res_list[i]["comment"] = comment 737 | self.res_list[i]["adj_pvalue"] = adj_pvalue 738 | 739 | 740 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~Comp_Writer HELPER CLASS~~~~~~~~~~~~~~~~~~~~~~~~~~~# 741 | class Comp_Writer: 742 | """Extract data for valid sites and write to BED and/or TSV file.""" 743 | 744 | def __init__( 745 | self, 746 | bed_fn=None, 747 | tsv_fn=None, 748 | verbose=True, 749 | output_raw_lists=False, 750 | with_ihw_weight=False, 751 | with_posthoc_test=False, 752 | ): 753 | """""" 754 | self.log = get_logger(name="Comp_Writer", verbose=verbose) 755 | self.bed_fn = bed_fn 756 | self.tsv_fn = tsv_fn 757 | self.output_raw_lists = output_raw_lists 758 | self.with_ihw_weight = with_ihw_weight 759 | self.with_posthoc_test = with_posthoc_test 760 | 761 | # Init file pointers 762 | self.bed_fp = self._init_bed() if bed_fn else None 763 | self.tsv_fp = self._init_tsv() if tsv_fn else None 764 | 765 | # Color score table 766 | self.colors = OrderedDict() 767 | self.colors[10] = "10,7,35" 768 | self.colors[9] = "32,12,74" 769 | self.colors[8] = "60,9,101" 770 | self.colors[7] = "87,15,109" 771 | self.colors[6] = "112,25,110" 772 | self.colors[5] = "137,34,105" 773 | self.colors[4] = "163,43,97" 774 | self.colors[3] = "187,55,84" 775 | self.colors[2] = "209,70,67" 776 | self.colors[1] = "230,230,230" 777 | self.colors[0] = "230,230,230" 778 | self.aborted = False 779 | 780 | # ~~~~~~~~~~~~~~PUBLIC METHODS~~~~~~~~~~~~~~# 781 | 782 | def write(self, res): 783 | """""" 784 | if self.bed_fn: 785 | self._write_bed(res) 786 | if self.tsv_fn: 787 | self._write_tsv(res) 788 | 789 | def __enter__(self): 790 | self.log.debug("Opening Writer") 791 | return self 792 | 793 | def __exit__(self, exception_type, exception_val, trace): 794 | self.log.debug("Closing Writer") 795 | for fp in (self.bed_fp, self.tsv_fp): 796 | try: 797 | fp.close() 798 | if self.aborted: 799 | # There was an error - delete the partial file 800 | os.remove(fp.name) 801 | except: 802 | pass 803 | 804 | def abort(self): 805 | self.aborted = True 806 | 807 | # ~~~~~~~~~~~~~~PRIVATE METHODS~~~~~~~~~~~~~~# 808 | def _init_bed(self): 809 | """Open BED file and write file header.""" 810 | self.log.debug("Initialise output bed file") 811 | mkbasedir(self.bed_fn, exist_ok=True) 812 | fp = gzip.open(self.bed_fn, "wt") if self.bed_fn.endswith(".gz") else open(self.bed_fn, "w") 813 | # Write header line 814 | fp.write("track name=meth_comp itemRgb=On\n") 815 | return fp 816 | 817 | def _write_bed(self, res): 818 | """Write line to BED file.""" 819 | # Log transform pvalue and cast to int 820 | if np.isnan(res["adj_pvalue"]): 821 | score = 0 822 | else: 823 | score = int(-np.log10(res["adj_pvalue"])) 824 | # Define color for bed file 825 | color = self.colors.get(score, self.colors[10]) 826 | # Write line 827 | res_line = [ 828 | res["chromosome"], 829 | res["start"], 830 | res["end"], 831 | ".", 832 | score, 833 | ".", 834 | res["start"], 835 | res["end"], 836 | color, 837 | ] 838 | self.bed_fp.write(str_join(res_line, sep="\t", line_end="\n")) 839 | 840 | def _init_tsv(self): 841 | """Open TSV file and write file header.""" 842 | self.log.debug("Initialise output tsv file") 843 | mkbasedir(self.tsv_fn, exist_ok=True) 844 | fp = gzip.open(self.tsv_fn, "wt") if self.tsv_fn.endswith(".gz") else open(self.tsv_fn, "w") 845 | # Write header line 846 | 847 | self.header = [ 848 | "chromosome", 849 | "start", 850 | "end", 851 | "n_samples", 852 | "pvalue", 853 | "adj_pvalue", 854 | "unique_cpg_pos", 855 | "labels", 856 | "med_llr_list", 857 | "difference", 858 | ] 859 | if self.with_posthoc_test: 860 | self.header = self.header + ["post_hoc_pvalues"] 861 | if self.output_raw_lists: 862 | self.header = self.header + ["raw_llr_list", "raw_pos_list"] 863 | if self.with_ihw_weight: 864 | self.header = self.header + ["ihw_weight"] 865 | self.header = self.header + [ 866 | "avg_coverage", 867 | "comment", 868 | ] 869 | fp.write(str_join(self.header, sep="\t", line_end="\n")) 870 | return fp 871 | 872 | def _write_tsv(self, res): 873 | """Write line to TSV file.""" 874 | res_line = [res[k] for k in self.header] 875 | self.tsv_fp.write(str_join(res_line, sep="\t", line_end="\n")) 876 | 877 | 878 | class Comp_ReWriter: 879 | """Reads delimited files with a header line, and rewrites the file 880 | in-place using python 3's fileinput module. 881 | 882 | This way we don't have to hold the entire file in memory or copy it 883 | around. 884 | """ 885 | 886 | def __init__(self, filenames: List[str], separators: List[str] = None): 887 | """ 888 | :param filenames: The list of filenames to modify 889 | :param separators: The separators for each file. Can be None (default), in 890 | which case tab is assumed. If provided, it must be the same 891 | length as filenames 892 | """ 893 | 894 | self.filenames = filenames 895 | if separators is None: 896 | self.separators = ["\t"] * len(self.filenames) 897 | else: 898 | assert len(separators) == len(self.filenames) 899 | self.separators = separators 900 | 901 | def write_adjusted_pvalues(self, res_list): 902 | for sep, filename in zip(self.separators, self.filenames): 903 | if filename is not None: 904 | is_bed = filename.endswith("bed") 905 | with fileinput.input(filename, inplace=True) as fi_fp: 906 | for res, line in zip((None, *res_list), fi_fp): 907 | line = line.strip() 908 | if res is None: 909 | header = {k: i for i, k in enumerate(line.split(sep))} 910 | print(line) 911 | else: 912 | updated_line = line.split(sep) 913 | if is_bed: 914 | if np.isnan(res["adj_pvalue"]) or res["adj_pvalue"] <= 0: 915 | score = 0 916 | else: 917 | score = int(-np.log10(res["adj_pvalue"])) 918 | 919 | updated_line[4] = str(score) 920 | else: 921 | updated_line[header["adj_pvalue"]] = str(res["adj_pvalue"]) 922 | updated_line[header["comment"]] = res["comment"] 923 | print(sep.join(updated_line)) 924 | 925 | 926 | def read_readgroups_file(readgroups_file: IO): 927 | """Reads file that assigns read to read groups (such as haplotypes, 928 | samples, clusters, etc) 929 | 930 | :param readgroups_file: path to the tab-separated file 931 | :return: pandas dataframe with columns "read_name", "group" and "group_set" 932 | """ 933 | # Loading 934 | try: 935 | read_groups = pd.read_csv( 936 | readgroups_file, 937 | sep="\t", 938 | header=0, 939 | dtype={"read_name": str, "group": int, "group_set": "category"}, 940 | ) 941 | except Exception as e: 942 | raise pycoMethError("Unable to read read groups file", e) 943 | 944 | # Validation 945 | if len(read_groups.columns) == 2: 946 | should_colnames = ["read_name", "group"] 947 | elif len(read_groups.columns) == 3: 948 | should_colnames = ["read_name", "group", "group_set"] 949 | else: 950 | raise pycoMethError("Invalid number of columns in read groups file (should be 2 or 3)") 951 | 952 | if not all([col in read_groups.columns for col in should_colnames]): 953 | raise pycoMethError("Invalid column names in read groups file (should be %s)" % should_colnames.join(", ")) 954 | 955 | # Finished validation, now add group_set column if not present 956 | if "group_set" not in read_groups.columns: 957 | read_groups["group_set"] = 1 958 | 959 | read_groups = read_groups.set_index("read_name") 960 | 961 | return read_groups 962 | 963 | 964 | def sliding_intervals_gen(coordgen, interval_size=1000) -> Generator[Coord, None, None]: 965 | """Generate sliding window coordinate intervals over the entire 966 | reference genome provided.""" 967 | for chr_name, chr_len in coordgen.chr_name_len.items(): 968 | for start in range(0, chr_len, interval_size): 969 | end = start + interval_size if start + interval_size <= chr_len else chr_len 970 | yield (coordgen(chr_name, start, end)) 971 | 972 | 973 | def bed_intervals_gen(coordgen, interval_bed_fn) -> Generator[Coord, None, None]: 974 | """Generate coordinate intervals corresponding to the provided bed 975 | file.""" 976 | with FileParser( 977 | fn=interval_bed_fn, 978 | colnames=["chrom", "start", "end"], 979 | dtypes={"start": int, "end": int}, 980 | force_col_len=False, 981 | comment="track", 982 | quiet=True, 983 | ) as bed: 984 | for line in bed: 985 | ct = coordgen(line.chrom, line.start, line.end) 986 | yield (ct) 987 | -------------------------------------------------------------------------------- /pycoMeth/Meth_Seg.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import IO, List 3 | from multiprocessing import Queue, Process 4 | import logging 5 | 6 | import tqdm 7 | import numpy as np 8 | from meth5 import MetH5File, MethlyationValuesContainer 9 | from meth5.sparse_matrix import SparseMethylationMatrixContainer 10 | 11 | from pycoMeth.meth_seg.segments_csv_io import SegmentsWriterBED, SegmentsWriterBedGraph 12 | from pycoMeth.meth_seg.segment import segment 13 | from pycoMeth.common import pycoMethError, get_logger 14 | 15 | 16 | def worker_segment(input_queue: Queue, output_queue: Queue, max_segments_per_window: int): 17 | import warnings 18 | 19 | warnings.filterwarnings("ignore") 20 | 21 | while True: 22 | job = input_queue.get() 23 | if job is None: 24 | break 25 | 26 | sparse_matrix, fraction = job 27 | llrs = sparse_matrix.met_matrix 28 | 29 | if sparse_matrix.shape[1] <= 1: 30 | # Too few CpG-sites. Nothing to segment. 31 | segmentation = np.zeros(sparse_matrix.shape[1]) 32 | result_tuple = ( 33 | llrs, 34 | segmentation, 35 | sparse_matrix.genomic_coord, 36 | sparse_matrix.genomic_coord_end, 37 | sparse_matrix.read_samples, 38 | ) 39 | else: 40 | # Perform segmentation 41 | segmentation = segment(sparse_matrix, max_segments_per_window) 42 | result_tuple = ( 43 | llrs, 44 | segmentation, 45 | sparse_matrix.genomic_coord, 46 | sparse_matrix.genomic_coord_end, 47 | sparse_matrix.read_samples, 48 | ) 49 | output_queue.put((result_tuple, fraction)) 50 | 51 | 52 | def worker_output( 53 | output_queue: Queue, 54 | out_tsv_file: IO, 55 | out_bedgraph_filebase: str, 56 | chromosome: str, 57 | read_groups_keys: str, 58 | print_diff_met: bool, 59 | quiet: bool, 60 | ): 61 | writers = [SegmentsWriterBED(out_tsv_file, chromosome)] 62 | if out_bedgraph_filebase is not None: 63 | writers.append(SegmentsWriterBedGraph(out_bedgraph_filebase, chromosome)) 64 | with tqdm.tqdm(total=100) as pbar: 65 | while True: 66 | res = output_queue.get() 67 | if res is None: 68 | break 69 | seg_result, fraction = res 70 | llrs, segments, genomic_starts, genomic_ends, samples = seg_result 71 | 72 | for writer in writers: 73 | writer.write_segments_llr( 74 | llrs, segments, genomic_starts, genomic_ends, samples, compute_diffmet=print_diff_met 75 | ) 76 | pbar.update(fraction) 77 | pbar.n = 100 78 | pbar.refresh() 79 | 80 | 81 | def load_met_matrix( 82 | filename: str, 83 | values_container: MethlyationValuesContainer, 84 | read_groups_keys: List[str], 85 | read_groups_to_include: List[str], 86 | ) -> SparseMethylationMatrixContainer: 87 | met_matrix: SparseMethylationMatrixContainer = values_container.to_sparse_methylation_matrix( 88 | read_read_names=False, read_groups_key=read_groups_keys 89 | ) 90 | if met_matrix.shape[0] == 0: 91 | return met_matrix 92 | 93 | if read_groups_keys is None: 94 | """Read groups are read names (read-level mode)""" 95 | met_matrix.read_samples = np.array([f"{filename}" for _ in met_matrix.read_names]) 96 | else: 97 | if read_groups_to_include is not None: 98 | """Filter out only allowed read groups""" 99 | idx = np.array([r in read_groups_to_include for r in met_matrix.read_samples]) 100 | met_matrix = met_matrix.get_submatrix_from_read_mask(idx) 101 | """Read groups are read from h5 file""" 102 | met_matrix.read_samples = np.array([f"{filename}_{sn}" for sn in met_matrix.read_samples]) 103 | return met_matrix 104 | 105 | 106 | def worker_reader( 107 | m5files: List[Path], 108 | chunk_size: int, 109 | chromosome: str, 110 | window_size: int, 111 | input_queue: Queue, 112 | chunks: List[int], 113 | progress_per_chunk: float, 114 | read_groups_keys: List[str], 115 | read_groups_to_include: List[str], 116 | ): 117 | firstfile = m5files[0] 118 | with MetH5File(firstfile, "r", chunk_size=chunk_size) as m5: 119 | chrom_container = m5[chromosome] 120 | 121 | for chunk in chunks: 122 | values_container = chrom_container.get_chunk(chunk) 123 | chunk_start, chunk_end = values_container.chromosome.h5group["chunk_ranges"][chunk] 124 | met_matrix = load_met_matrix(firstfile.name, values_container, read_groups_keys, read_groups_to_include) 125 | 126 | for other_m5file in m5files[1:]: 127 | with MetH5File(other_m5file, "r", chunk_size=chunk_size) as other_m5: 128 | other_values_container = other_m5[chromosome].get_values_in_range(chunk_start, chunk_end) 129 | other_met_matrix = load_met_matrix( 130 | other_m5file.name, other_values_container, read_groups_keys, read_groups_to_include 131 | ) 132 | if met_matrix.met_matrix.shape[0] == 0: 133 | # First file had no data in the requested samples 134 | met_matrix = other_met_matrix 135 | elif other_met_matrix.met_matrix.shape[0] > 0: 136 | met_matrix = met_matrix.merge(other_met_matrix, sample_names_mode="keep") 137 | 138 | if read_groups_keys is None and len(m5files) == 1: 139 | met_matrix.read_samples = met_matrix.read_names 140 | total_sites = len(met_matrix.genomic_coord) 141 | num_windows = (total_sites // window_size) + 1 142 | progress_per_window = progress_per_chunk / num_windows 143 | for window_start in range(0, total_sites + 1, window_size): 144 | window_end = window_start + window_size 145 | logging.debug(f"Submitting window {window_start}-{window_end}") 146 | sub_matrix = met_matrix.get_submatrix(window_start, window_end) 147 | input_queue.put((sub_matrix, progress_per_window)) 148 | 149 | 150 | def validate_chromosome_selection(m5file: Path, chromosome: str, chunk_size: int): 151 | with MetH5File(m5file, "r", chunk_size=chunk_size) as m5: 152 | if chromosome not in m5.get_chromosomes(): 153 | raise ValueError(f"Chromosome {chromosome} not found in m5 file.") 154 | 155 | 156 | def validate_chunk_selection(m5file: Path, chromosome: str, chunk_size: int, chunks: List[int]): 157 | with MetH5File(m5file, "r", chunk_size=chunk_size) as m5: 158 | num_chunks = m5[chromosome].get_number_of_chunks() 159 | if max(chunks) >= m5[chromosome].get_number_of_chunks(): 160 | raise ValueError(f"Chunk {max(chunks)} not in chromosome. Must be in range {0}-{num_chunks - 1}") 161 | 162 | 163 | def Meth_Seg( 164 | h5_file_list: [Path], 165 | output_tsv_fn: str, 166 | chromosome: str, 167 | chunk_size: int = int(5e4), 168 | chunks: [int] = None, 169 | workers: int = 1, 170 | reader_workers: int = 1, 171 | progress: bool = False, 172 | window_size: int = 300, 173 | max_segments_per_window: int = 10, 174 | read_groups_keys: [str] = None, 175 | read_groups_to_include: [str] = None, 176 | print_diff_met: bool = False, 177 | output_bedgraph_fn: str = None, 178 | verbose: bool = False, 179 | quiet: bool = False, 180 | **kwargs, 181 | ): 182 | """ 183 | Methylation segmentation method implemented as a bayesian changepoint detection algorithm 184 | * h5_file_list 185 | A list of MetH5 files containing methylation llr 186 | * chromosome 187 | The chromosome to segment 188 | * chunk_size 189 | Number of llrs per chunk - for best performance, should be a multiple of the chunksize used in creating of the h5 files 190 | Default is the same as the default for creating meth5 files. 191 | * chunks 192 | List of chunk IDs or None if all chunks of the chromsome are to be segmented 193 | * workers 194 | Number of worker processes 195 | * reader_workers 196 | Number of reader worker processes 197 | * progress 198 | True if progress bar is desired 199 | * output_tsv_fn 200 | Output TSV file 201 | * window_size 202 | Window size for segmentation in number of CpG calling sites. Default: 300. 203 | Increasing this increases memory requirement 204 | * max_segments_per_window 205 | Maximum number of segments per window. Should probably be somewhere between 8 and 20. 206 | The larger the number, the more expensive the computation. 207 | * read_groups_keys 208 | If read groups should be considered (e.g. haplotype) pass the read group key. You can provide more than one. 209 | * print_diff_met 210 | Whether output TSV file should contain methylation rate difference between samples 211 | * output_bedgraph_fn 212 | Base name for bedgraphs to be written. One bedgraph per sample/read_group will be created. 213 | """ 214 | log = get_logger(name="pycoMeth_Meth_Seg", verbose=verbose, quiet=quiet) 215 | log.debug("Checking options and input files") 216 | if read_groups_to_include is not None and read_groups_keys is None: 217 | raise pycoMethError("read_groups_to_include defined, but missing read_groups_keys parameter") 218 | 219 | input_queue = Queue(maxsize=workers * 5) 220 | output_queue = Queue(maxsize=workers * 100) 221 | 222 | for m5file in h5_file_list: 223 | validate_chromosome_selection(m5file, chromosome, chunk_size) 224 | 225 | firstm5 = h5_file_list[0] 226 | if chunks is None: 227 | # No chunks have been provided, take all 228 | with MetH5File(firstm5, mode="r", chunk_size=chunk_size) as f: 229 | chunks = list(range(f[chromosome].get_number_of_chunks())) 230 | else: 231 | # flatten chunk list, since we allow a list of chunks or a list of chunk ranges 232 | # (which are converted to lists in parsing) 233 | chunks = [chunk for subchunks in chunks for chunk in ([subchunks] if isinstance(subchunks, int) else subchunks)] 234 | 235 | validate_chunk_selection(firstm5, chromosome, chunk_size, chunks) 236 | 237 | # sort and make unique 238 | chunks = sorted(list(set(chunks))) 239 | progress_per_chunk = 100 / len(chunks) 240 | 241 | segmentation_processes = [Process(target=worker_segment, args=(input_queue, output_queue, max_segments_per_window))] 242 | for p in segmentation_processes: 243 | p.start() 244 | 245 | reader_workers = min(reader_workers, len(chunks)) 246 | chunk_per_process = np.array_split(chunks, reader_workers) 247 | reader_processes = [ 248 | Process( 249 | target=worker_reader, 250 | args=( 251 | h5_file_list, 252 | chunk_size, 253 | chromosome, 254 | window_size, 255 | input_queue, 256 | p_chunks, 257 | progress_per_chunk, 258 | read_groups_keys, 259 | read_groups_to_include, 260 | ), 261 | ) 262 | for p_chunks in chunk_per_process 263 | ] 264 | for p in reader_processes: 265 | p.start() 266 | 267 | output_process = Process( 268 | target=worker_output, 269 | args=( 270 | output_queue, 271 | output_tsv_fn, 272 | output_bedgraph_fn, 273 | chromosome, 274 | read_groups_keys, 275 | print_diff_met, 276 | not progress, 277 | ), 278 | ) 279 | output_process.start() 280 | 281 | for p in reader_processes: 282 | p.join() 283 | 284 | # Deal poison pills to segmentation workers 285 | for p in segmentation_processes: 286 | input_queue.put(None) 287 | 288 | for p in segmentation_processes: 289 | p.join() 290 | 291 | # Deal poison pill to writer worker 292 | output_queue.put(None) 293 | output_process.join() 294 | -------------------------------------------------------------------------------- /pycoMeth/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define self package variable 4 | __version__ = "2.2.2" 5 | __description__ = "Differential methylation calling suite for Nanopore methylation calls PycoMeth" 6 | -------------------------------------------------------------------------------- /pycoMeth/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | # ~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~# 5 | # Standard library imports 6 | import argparse 7 | import sys 8 | 9 | # Local imports 10 | import pycoMeth as pkg 11 | from pycoMeth.common import * 12 | from pycoMeth.Meth_Comp import Meth_Comp 13 | from pycoMeth.CGI_Finder import CGI_Finder 14 | from pycoMeth.Comp_Report import Comp_Report 15 | from pycoMeth.Meth_Seg import Meth_Seg 16 | 17 | # ~~~~~~~~~~~~~~TOP LEVEL ENTRY POINT~~~~~~~~~~~~~~# 18 | def main(args=None): 19 | """ Main entry point for pycoMeth command line interface""" 20 | # Parser and subparsers for command 21 | parser = argparse.ArgumentParser(description=pkg.__description__) 22 | parser.add_argument("--version", action="version", version="{} v{}".format(pkg.__name__, pkg.__version__)) 23 | subparsers = parser.add_subparsers(description="%(prog)s implements the following subcommands", dest="subcommands") 24 | subparsers.required = True 25 | 26 | # Meth_Comp subparser 27 | f = Meth_Comp 28 | sp_met = subparsers.add_parser("Meth_Comp", description=doc_func(f)) 29 | sp_met.set_defaults(func=f) 30 | sp_met_io = sp_met.add_argument_group("Input/Output options") 31 | arg_from_docstr(sp_met_io, f, "h5_file_list", "i") 32 | arg_from_docstr(sp_met_io, f, "ref_fasta_fn", "f") 33 | arg_from_docstr(sp_met_io, f, "read_groups_key", "r") 34 | arg_from_docstr(sp_met_io, f, "interval_bed_fn", "a") 35 | arg_from_docstr(sp_met_io, f, "output_bed_fn", "b") 36 | arg_from_docstr(sp_met_io, f, "output_tsv_fn", "t") 37 | arg_from_docstr(sp_met_io, f, "interval_size", "n") 38 | arg_from_docstr(sp_met_io, f, "min_num_reads_per_interval", "c") 39 | sp_met_ms = sp_met.add_argument_group("Misc options") 40 | arg_from_docstr(sp_met_ms, f, "max_missing", "m") 41 | arg_from_docstr(sp_met_ms, f, "worker_processes", "w") 42 | arg_from_docstr(sp_met_ms, f, "min_abs_llr", "l") 43 | arg_from_docstr(sp_met_ms, f, "sample_id_list", "s") 44 | arg_from_docstr(sp_met_ms, f, "pvalue_adj_method") 45 | arg_from_docstr(sp_met_ms, f, "pvalue_threshold") 46 | arg_from_docstr(sp_met_ms, f, "only_tested_sites") 47 | arg_from_docstr(sp_met_ms, f, "hypothesis") 48 | arg_from_docstr(sp_met_ms, f, "do_independent_hypothesis_weighting") 49 | 50 | # Comp_Report subparser 51 | f = Comp_Report 52 | sp_cr = subparsers.add_parser("Comp_Report", description=doc_func(f)) 53 | sp_cr.set_defaults(func=f) 54 | sp_cr_io = sp_cr.add_argument_group("Input/Output options") 55 | arg_from_docstr(sp_cr_io, f, "h5_file_list", "i") 56 | arg_from_docstr(sp_cr_io, f, "ref_fasta_fn", "f") 57 | arg_from_docstr(sp_cr_io, f, "read_groups_key", "r") 58 | arg_from_docstr(sp_cr_io, f, "methcomp_fn", "c") 59 | arg_from_docstr(sp_cr_io, f, "gff3_fn", "g") 60 | arg_from_docstr(sp_cr_io, f, "outdir", "o") 61 | sp_cr_ms = sp_cr.add_argument_group("Misc options") 62 | arg_from_docstr(sp_cr_ms, f, "sample_id_list", "s") 63 | arg_from_docstr(sp_cr_ms, f, "n_top", "n") 64 | arg_from_docstr(sp_cr_ms, f, "max_tss_distance", "d") 65 | arg_from_docstr(sp_cr_ms, f, "pvalue_threshold") 66 | arg_from_docstr(sp_cr_ms, f, "min_diff_llr") 67 | arg_from_docstr(sp_cr_ms, f, "min_diff_bs") 68 | arg_from_docstr(sp_cr_ms, f, "n_len_bin") 69 | arg_from_docstr(sp_cr_ms, f, "export_static_plots") 70 | arg_from_docstr(sp_cr_ms, f, "report_non_significant") 71 | 72 | # CGI_Finder subparser 73 | f = CGI_Finder 74 | sp_cgi = subparsers.add_parser("CGI_Finder", description=doc_func(f)) 75 | sp_cgi.set_defaults(func=f) 76 | sp_cgi_io = sp_cgi.add_argument_group("Input/Output options") 77 | arg_from_docstr(sp_cgi_io, f, "ref_fasta_fn", "f") 78 | arg_from_docstr(sp_cgi_io, f, "output_bed_fn", "b") 79 | arg_from_docstr(sp_cgi_io, f, "output_tsv_fn", "t") 80 | sp_cgi_ms = sp_cgi.add_argument_group("Misc options") 81 | arg_from_docstr(sp_cgi_ms, f, "merge_gap", "m") 82 | arg_from_docstr(sp_cgi_ms, f, "min_win_len", "w") 83 | arg_from_docstr(sp_cgi_ms, f, "min_CG_freq", "c") 84 | arg_from_docstr(sp_cgi_ms, f, "min_obs_CG_ratio", "r") 85 | 86 | # Meth_Seg subparser 87 | f = Meth_Seg 88 | sp_seg = subparsers.add_parser("Meth_Seg", description=doc_func(f)) 89 | sp_seg.set_defaults(func=f) 90 | sp_seg_io = sp_seg.add_argument_group("Input/Output options") 91 | arg_from_docstr(sp_seg_io, f, "h5_file_list", "i") 92 | arg_from_docstr(sp_seg_io, f, "chromosome", "c") 93 | arg_from_docstr(sp_seg_io, f, "chunks", "n") 94 | arg_from_docstr(sp_seg_io, f, "output_tsv_fn", "t") 95 | arg_from_docstr(sp_seg_io, f, "output_bedgraph_fn", "b") 96 | arg_from_docstr(sp_seg_io, f, "read_groups_keys", "r") 97 | arg_from_docstr(sp_seg_io, f, "read_groups_to_include", "s") 98 | arg_from_docstr(sp_seg_io, f, "chunk_size") 99 | sp_seg_ms = sp_seg.add_argument_group("Misc options") 100 | arg_from_docstr(sp_seg_ms, f, "max_segments_per_window", "m") 101 | arg_from_docstr(sp_seg_ms, f, "workers", "p") 102 | arg_from_docstr(sp_seg_ms, f, "reader_workers") 103 | arg_from_docstr(sp_seg_ms, f, "window_size", "w") 104 | arg_from_docstr(sp_seg_ms, f, "print_diff_met") 105 | 106 | # Add common group parsers 107 | for sp in [sp_met, sp_cr, sp_cgi]: 108 | sp_vb = sp.add_argument_group("Verbosity options") 109 | sp_vb.add_argument("-v", "--verbose", action="store_true", default=False, help="Increase verbosity") 110 | sp_vb.add_argument("-q", "--quiet", action="store_true", default=False, help="Reduce verbosity") 111 | sp_vb.add_argument("-p", "--progress", action="store_true", default=False, help="Display a progress bar") 112 | 113 | # Parse args and call subfunction 114 | args = parser.parse_args() 115 | 116 | args.func(**vars(args)) 117 | 118 | 119 | if __name__ == "__main__": 120 | main() 121 | -------------------------------------------------------------------------------- /pycoMeth/common.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~# 4 | # Standard library imports 5 | import sys 6 | import os 7 | import inspect 8 | from collections import * 9 | from glob import iglob 10 | import datetime 11 | import logging 12 | import json 13 | import gzip 14 | 15 | # Local imports 16 | from pycoMeth import __version__ as pkg_version 17 | from pycoMeth import __name__ as pkg_name 18 | 19 | # Third party imports 20 | import colorlog 21 | 22 | # Optional static export deps 23 | try: 24 | from kaleido.scopes.plotly import PlotlyScope 25 | from IPython.core.display import SVG, display 26 | 27 | STATIC_EXPORT = True 28 | except (ModuleNotFoundError, ImportError) as E: 29 | print("Cannot import dependencies required for static image export") 30 | STATIC_EXPORT = False 31 | pass 32 | 33 | # ~~~~~~~~~~~~~~FUNCTIONS~~~~~~~~~~~~~~# 34 | def opt_summary(local_opt): 35 | """Simplifiy option dict creation""" 36 | d = OrderedDict() 37 | d["Package name"] = pkg_name 38 | d["Package version"] = pkg_version 39 | d["Timestamp"] = str(datetime.datetime.now()) 40 | for i, j in local_opt.items(): 41 | d[i] = j 42 | return d 43 | 44 | 45 | def str_join(l, sep="\t", line_end=""): 46 | """Join a list of mixed types into a single str""" 47 | s = sep.join(map(str, l)) + line_end 48 | return s 49 | 50 | 51 | def list_to_str(l): 52 | """Generate a string from any list""" 53 | return str(json.dumps(l)).replace(" ", "") 54 | 55 | 56 | def str_to_list(s, parse_int=None, parse_float=None): 57 | """Generate a list from a string""" 58 | return json.loads(s, parse_int=parse_int, parse_float=parse_float) 59 | 60 | 61 | def all_in(l1, l2): 62 | """Check if all element in l1 are in l2""" 63 | s1 = set(l1) 64 | s2 = set(l2) 65 | if s1.difference(s2): 66 | return False 67 | else: 68 | return True 69 | 70 | 71 | def file_readable(fn, **kwargs): 72 | """Check if the file is readable""" 73 | return os.path.isfile(fn) and os.access(fn, os.R_OK) 74 | 75 | 76 | def dir_writable(fn, **kwargs): 77 | """Check if the file is readable""" 78 | if not os.path.isdir(fn): 79 | fn = os.path.dirname(fn) 80 | return os.path.dirname(fn) and os.access(fn, os.W_OK) 81 | 82 | 83 | def mkdir(fn, exist_ok=False): 84 | """ Create directory recursivelly. Raise IO error if path exist or if error at creation """ 85 | try: 86 | os.makedirs(fn, exist_ok=exist_ok) 87 | except: 88 | raise pycoMethError("Error creating output folder `{}`".format(fn)) 89 | 90 | 91 | def mkbasedir(fn, exist_ok=False): 92 | """ Create directory for a given file recursivelly. Raise IO error if path exist or if error at creation """ 93 | dir_fn = os.path.dirname(fn) 94 | if dir_fn: 95 | mkdir(dir_fn, exist_ok=True) 96 | 97 | 98 | def dict_to_str(d, sep="\t", nsep=0, exclude_list=[]): 99 | """ Transform a multilevel dict to a tabulated str """ 100 | m = "" 101 | 102 | if isinstance(d, Counter): 103 | for i, j in d.most_common(): 104 | if not i in exclude_list: 105 | m += "{}{}: {:,}\n".format(sep * nsep, i, j) 106 | 107 | else: 108 | for i, j in d.items(): 109 | if not i in exclude_list: 110 | if isinstance(j, dict): 111 | j = dict_to_str(j, sep=sep, nsep=nsep + 1) 112 | m += "{}{}\n{}".format(sep * nsep, i, j) 113 | else: 114 | m += "{}{}: {}\n".format(sep * nsep, i, j) 115 | if not m: 116 | return "" 117 | else: 118 | return m[:-1] 119 | 120 | 121 | def iter_idx_tuples(df): 122 | for idx, line in zip(df.index, df.itertuples(index=False, name="line")): 123 | yield (idx, line) 124 | 125 | 126 | def doc_func(func): 127 | """Parse the function description string""" 128 | 129 | if inspect.isclass(func): 130 | func = func.__init__ 131 | 132 | docstr_list = [] 133 | for l in inspect.getdoc(func).split("\n"): 134 | l = l.strip() 135 | if l: 136 | if l.startswith("*"): 137 | break 138 | else: 139 | docstr_list.append(l) 140 | 141 | return " ".join(docstr_list) 142 | 143 | 144 | def make_arg_dict(func): 145 | """Parse the arguments default value, type and doc""" 146 | 147 | # Init method for classes 148 | if inspect.isclass(func): 149 | func = func.__init__ 150 | 151 | if inspect.isfunction(func) or inspect.ismethod(func): 152 | # Parse arguments default values and annotations 153 | d = OrderedDict() 154 | for name, p in inspect.signature(func).parameters.items(): 155 | if not p.name in ["self", "cls"]: # Object stuff. Does not make sense to include in doc 156 | d[name] = OrderedDict() 157 | if not name in ["kwargs", "args"]: # Include but skip default required and type 158 | # Get Annotation 159 | if p.annotation != inspect._empty: 160 | d[name]["type"] = p.annotation 161 | # Get default value if available 162 | if p.default == inspect._empty: 163 | d[name]["required"] = True 164 | else: 165 | d[name]["default"] = p.default 166 | 167 | # Parse the docstring in a dict 168 | docstr_dict = OrderedDict() 169 | lab = None 170 | for l in inspect.getdoc(func).split("\n"): 171 | l = l.strip() 172 | if l: 173 | if l.startswith("*"): 174 | lab = l[1:].strip() 175 | docstr_dict[lab] = [] 176 | elif lab: 177 | docstr_dict[lab].append(l) 178 | 179 | # Concatenate and copy doc in main dict 180 | for name in d.keys(): 181 | if name in docstr_dict: 182 | d[name]["help"] = " ".join(docstr_dict[name]) 183 | return d 184 | 185 | 186 | def arg_from_docstr(parser, func, arg_name, short_name=None): 187 | """Get options corresponding to argument name from docstring and deal with special cases""" 188 | 189 | if short_name: 190 | arg_names = ["-{}".format(short_name), "--{}".format(arg_name)] 191 | else: 192 | arg_names = ["--{}".format(arg_name)] 193 | 194 | arg_dict = make_arg_dict(func)[arg_name] 195 | if "help" in arg_dict: 196 | if "default" in arg_dict: 197 | if arg_dict["default"] == "" or arg_dict["default"] == []: 198 | arg_dict["help"] += " (default: None)" 199 | else: 200 | arg_dict["help"] += " (default: %(default)s)" 201 | else: 202 | arg_dict["help"] += " (required)" 203 | 204 | if "type" in arg_dict: 205 | arg_dict["help"] += " [%(type)s]" 206 | 207 | # Special case for boolean args 208 | if arg_dict["type"] == bool: 209 | if arg_dict["default"] == False: 210 | arg_dict["action"] = "store_true" 211 | del arg_dict["type"] 212 | elif arg_dict["default"] == True: 213 | arg_dict["action"] = "store_false" 214 | del arg_dict["type"] 215 | 216 | # Special case for lists args 217 | elif isinstance(arg_dict["type"], list): 218 | arg_dict["nargs"] = "*" 219 | arg_dict["type"] = arg_dict["type"][0] 220 | 221 | parser.add_argument(*arg_names, **arg_dict) 222 | 223 | 224 | def jhelp(f: "python function or method"): 225 | """ 226 | Display a Markdown pretty help message for functions and class methods (default __init__ is a class is passed) 227 | jhelp also display default values and type annotations if available. 228 | The docstring synthax should follow the same synthax as the one used for this function 229 | * f 230 | Function or method to display the help message for 231 | """ 232 | # Private import as this is only needed if using jupyter 233 | from IPython.core.display import display, Markdown 234 | 235 | f_doc = doc_func(f) 236 | arg_doc = make_arg_dict(f) 237 | 238 | # Signature and function documentation 239 | s = "**{}** ({})\n\n{}\n\n---\n\n".format(f.__name__, ", ".join(arg_doc.keys()), f_doc) 240 | 241 | # Args doc 242 | for arg_name, arg_val in arg_doc.items(): 243 | # Arg signature section 244 | s += "* **{}**".format(arg_name) 245 | if "default" in arg_val: 246 | if arg_val["default"] == "": 247 | s += ' (default: "")'.format(arg_val["default"]) 248 | else: 249 | s += " (default: {})".format(arg_val["default"]) 250 | if "required" in arg_val: 251 | s += " (required)" 252 | if "type" in arg_val: 253 | if isinstance(arg_val["type"], type): 254 | s += " [{}]".format(arg_val["type"].__name__) 255 | elif isinstance(arg_val["type"], list): 256 | s += " [list({})]".format(arg_val["type"][0].__name__) 257 | else: 258 | s += " [{}]".format(arg_val["type"]) 259 | s += "\n\n" 260 | # Arg doc section 261 | if "help" in arg_val: 262 | s += "{}\n\n".format(arg_val["help"]) 263 | 264 | # Display in Jupyter 265 | display(Markdown(s)) 266 | 267 | 268 | def head(fp, n=10, sep="\t", max_char_col=50, comment=None): 269 | """ 270 | Emulate linux head cmd. Handle gziped files and bam files 271 | * fp 272 | Path to the file to be parse. 273 | * n 274 | Number of lines to print starting from the begining of the file (Default 10) 275 | """ 276 | line_list = [] 277 | 278 | # Get lines 279 | try: 280 | open_fun, open_mode = (gzip.open, "rt") if fp.endswith(".gz") else (open, "r") 281 | with open_fun(fp, open_mode) as fh: 282 | line_num = 0 283 | while line_num < n: 284 | l = next(fh).strip() 285 | if comment and l.startswith(comment): 286 | continue 287 | if sep: 288 | line_list.append(l.split(sep)) 289 | else: 290 | line_list.append(l) 291 | line_num += 1 292 | 293 | except StopIteration: 294 | pass 295 | 296 | # Add padding if sep given 297 | if sep: 298 | try: 299 | # Find longest elem per col 300 | col_len_list = [0 for _ in range(len(line_list[0]))] 301 | for ls in line_list: 302 | for i in range(len(ls)): 303 | len_col = len(ls[i]) 304 | if len_col > max_char_col: 305 | col_len_list[i] = max_char_col 306 | elif len_col > col_len_list[i]: 307 | col_len_list[i] = len_col 308 | 309 | # Add padding 310 | line_list_tab = [] 311 | for ls in line_list: 312 | s = "" 313 | for i in range(len(ls)): 314 | len_col = col_len_list[i] 315 | len_cur_col = len(ls[i]) 316 | if len_cur_col <= len_col: 317 | s += ls[i] + " " * (len_col - len_cur_col) + " " 318 | else: 319 | s += ls[i][0 : len_col - 3] + "..." 320 | line_list_tab.append(s) 321 | line_list = line_list_tab 322 | 323 | # Fall back to non tabulated display 324 | except IndexError: 325 | return head(fp=fp, n=n, sep=None) 326 | 327 | for l in line_list: 328 | print(l) 329 | print() 330 | 331 | 332 | def stdout_print(*args): 333 | """ 334 | Emulate print but uses sys stdout instead. It could sometimes be useful in specific situations where print 335 | is in is not behaving optimaly (like with tqdm for example) 336 | """ 337 | s = " ".join([str(i) for i in args]) 338 | sys.stdout.write(s) 339 | sys.stdout.flush() 340 | 341 | 342 | def get_logger(name=None, verbose=False, quiet=False): 343 | """Multilevel colored log using colorlog""" 344 | 345 | # Define conditional color formatter 346 | formatter = colorlog.LevelFormatter( 347 | fmt={ 348 | "DEBUG": "%(log_color)s\t[DEBUG]: %(msg)s", 349 | "INFO": "%(log_color)s\t%(msg)s", 350 | "WARNING": "%(log_color)s## %(msg)s ##", 351 | "ERROR": "%(log_color)sERROR: %(msg)s", 352 | "CRITICAL": "%(log_color)sCRITICAL: %(msg)s", 353 | }, 354 | log_colors={ 355 | "DEBUG": "white", 356 | "INFO": "green", 357 | "WARNING": "bold_blue", 358 | "ERROR": "bold_red", 359 | "CRITICAL": "bold_purple", 360 | }, 361 | reset=True, 362 | ) 363 | 364 | # Define logger with custom formatter 365 | logging.basicConfig(format="%(message)s") 366 | logging.getLogger().handlers[0].setFormatter(formatter) 367 | log = logging.getLogger(name) 368 | 369 | # Define logging level depending on verbosity 370 | if verbose: 371 | log.setLevel(logging.DEBUG) 372 | elif quiet: 373 | log.setLevel(logging.WARNING) 374 | else: 375 | log.setLevel(logging.INFO) 376 | 377 | return log 378 | 379 | 380 | def log_dict(d, logger, header="", indent="\t", level=1): 381 | """ log a multilevel dict """ 382 | if header: 383 | logger(header) 384 | if isinstance(d, Counter): 385 | for i, j in d.most_common(): 386 | logger("{}{}: {:,}".format(indent * level, i, j)) 387 | else: 388 | for i, j in d.items(): 389 | if isinstance(j, dict): 390 | logger("{}{}".format(indent * level, i, j)) 391 | log_dict(j, logger, level=level + 1) 392 | else: 393 | logger("{}{}: {}".format(indent * level, i, j)) 394 | 395 | 396 | def log_list(l, logger, header="", indent="\t"): 397 | """ log a list """ 398 | if header: 399 | logger(header) 400 | for i in l: 401 | logger("{}*{}".format(indent, i)) 402 | 403 | 404 | class Kaleido: 405 | def __init__(self): 406 | # Init scopes 407 | if not STATIC_EXPORT: 408 | raise ImportError("Static export is not possible due to missing dependencies") 409 | self.plotly_scope = PlotlyScope() 410 | 411 | def render_plotly_svg(self, fig, width=None, height=None): 412 | """ 413 | Function to render a plotly figure in SVG inside jupyter 414 | """ 415 | if STATIC_EXPORT: 416 | svg_fig = self.plotly_scope.transform(fig, format="svg", width=width, height=height) 417 | return SVG(svg_fig) 418 | 419 | def export_plotly_svg(self, fig, fn, width=None, height=None): 420 | """ 421 | Function to export a plotly figure to SVG 422 | """ 423 | if STATIC_EXPORT: 424 | svg_fig = self.plotly_scope.transform(fig, format="svg", width=width, height=height) 425 | with open(fn, mode="wb") as fp: 426 | fp.write(svg_fig) 427 | 428 | 429 | # ~~~~~~~~~~~~~~CUSTOM EXCEPTION AND WARN CLASSES~~~~~~~~~~~~~~# 430 | class pycoMethError(Exception): 431 | """ Basic exception class for pycoMeth package """ 432 | 433 | pass 434 | 435 | -------------------------------------------------------------------------------- /pycoMeth/loader.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from meth5 import MetH5File 4 | 5 | from typing import List, Tuple, Dict 6 | from pycoMeth.CoordGen import Coord 7 | 8 | 9 | def read_sample_ids_from_read_groups(h5_file_list, read_group_key, labels=None): 10 | rg_dict = {} 11 | for fn in h5_file_list: 12 | with MetH5File(fn, "r") as f: 13 | f_rg_dict = f.get_all_read_groups(read_group_key) 14 | for k, v in f_rg_dict.items(): 15 | if k in rg_dict and rg_dict[k] != v: 16 | raise pycoMethError("Read groups in meth5 files must have the same encoding") 17 | rg_dict.update(f_rg_dict) 18 | if labels is not None: 19 | return [k for k, v in rg_dict.items() if v in labels] 20 | else: 21 | return [k for k in rg_dict] 22 | 23 | 24 | class MetH5Loader: 25 | def __init__(self, h5_read_groups_key: str, sample_id_list: List, h5_file_list: List): 26 | self.h5_read_groups_key = h5_read_groups_key 27 | self.sample_hf_files: Dict[str, MetH5File] = {} 28 | self.llr_threshold = 2.0 # TODO expose parameter 29 | 30 | if h5_read_groups_key is None: 31 | for sample_id, h5_file in zip(sample_id_list, h5_file_list): 32 | hf = MetH5File(h5_file, "r") 33 | self.sample_hf_files[sample_id] = hf 34 | else: 35 | hf = MetH5File(h5_file_list[0], "r") 36 | for sample_id in sample_id_list: 37 | self.sample_hf_files[sample_id] = hf 38 | 39 | def __del__(self): 40 | for hf in self.sample_hf_files.values(): 41 | try: 42 | hf.close() 43 | except: 44 | pass 45 | 46 | def read_raw_llrs(self, interval: Coord) -> Tuple[List, List, List, List]: 47 | sample_llrs = {} 48 | sample_pos = {} 49 | sample_reads = {} 50 | for sample_id, hf in self.sample_hf_files.items(): 51 | chrom_container = hf[interval.chr_name] 52 | 53 | if chrom_container is None: 54 | continue 55 | 56 | interval_container = chrom_container.get_values_in_range(interval.start, interval.end) 57 | 58 | if interval_container is None: 59 | continue 60 | llrs = interval_container.get_llrs()[:] 61 | pos = interval_container.get_ranges()[:, 0] 62 | read_names = interval_container.get_read_ids()[:] 63 | 64 | if self.h5_read_groups_key is not None: 65 | read_samples = interval_container.get_read_groups(self.h5_read_groups_key) 66 | mask = [rs == sample_id for rs in read_samples] 67 | llrs = llrs[mask] 68 | pos = pos[mask] 69 | read_names = read_names[mask] 70 | 71 | sample_llrs[sample_id] = llrs.tolist() 72 | sample_pos[sample_id] = pos.tolist() 73 | sample_reads[sample_id] = read_names.tolist() 74 | 75 | # Remove samples for which there is no data 76 | label_list = list([k for k in sample_llrs.keys() if len(sample_llrs[k]) > 0]) 77 | raw_llr_list = [sample_llrs[k] for k in label_list] 78 | raw_pos_list = [sample_pos[k] for k in label_list] 79 | raw_read_list = [sample_reads[k] for k in label_list] 80 | return label_list, raw_llr_list, raw_pos_list, raw_read_list 81 | 82 | @staticmethod 83 | def interpret_sample_ids_from_arguments(sample_id_list, read_groups_key, h5_file_list): 84 | if sample_id_list is None: 85 | if read_groups_key is None: 86 | # If no sample id list is provided and no read group key is set 87 | # automatically define tests and maximal missing samples depending on number of files to compare 88 | return list(range(len(h5_file_list))) 89 | else: 90 | return read_sample_ids_from_read_groups(h5_file_list, read_groups_key) 91 | elif read_groups_key is not None: 92 | # H5 file stores groups as int 93 | return read_sample_ids_from_read_groups(h5_file_list, read_groups_key, labels=sample_id_list) 94 | else: 95 | return sample_id_list 96 | -------------------------------------------------------------------------------- /pycoMeth/meth_seg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PMBio/pycoMeth/8e81e0799a365ae65f42bafa28121218b1f8f1c3/pycoMeth/meth_seg/__init__.py -------------------------------------------------------------------------------- /pycoMeth/meth_seg/emissions.py: -------------------------------------------------------------------------------- 1 | import math 2 | from abc import ABC, abstractmethod 3 | from typing import Dict 4 | 5 | import numpy as np 6 | 7 | 8 | class EmissionLikelihoodFunction(ABC): 9 | """ 10 | Abstract class defining the interface for emission likelihood functions 11 | The types for parameters is not defined here (only that it's stored as Dict) so that 12 | the concrete implementation of the likelihood function may define it's own 13 | domain of parameters, as long as the update_params function takes the same structure 14 | of parameters as the get_params and get_cluster_params method return 15 | """ 16 | 17 | @abstractmethod 18 | def update_params(self, params: Dict): 19 | """ 20 | :param params: Dictionary where the key is the cluster id and the value contains 21 | the parameter(s) 22 | :type params: Dict 23 | :return: 24 | """ 25 | pass 26 | 27 | @abstractmethod 28 | def likelihood(self, segment_index: int, observations: np.array): 29 | pass 30 | 31 | @abstractmethod 32 | def get_cluster_params(self, cluster: int): 33 | pass 34 | 35 | @abstractmethod 36 | def minimization_objective(self, observations: np.array, posterior: np.array): 37 | pass 38 | 39 | @abstractmethod 40 | def get_param_bounds(self): 41 | pass 42 | 43 | 44 | class BernoulliPosterior(EmissionLikelihoodFunction): 45 | """ 46 | This class models a Bernoulli likelihood with uncertain observations. 47 | Observations are provided as probabilities p(a|S), and the likelihood 48 | is parameterized with bernoulli-likelihoods p(a|mu) for each segment. 49 | Optionally, a gamma prior p(mu) can be defined as well. 50 | The likelihood is modeled as: 51 | L = (1-p(a|S)) * (1-mu) / (1-p(a)) + p(a|S) * mu / p(a) 52 | """ 53 | 54 | def __init__( 55 | self, 56 | number_of_clusters, 57 | number_of_segments, 58 | prior_a: float = None, 59 | eps=np.exp(-512), 60 | initial_segment_p: np.array = None, 61 | ): 62 | # Just for type hinting and to get rid of warnings 63 | self.segment_p: np.array = None 64 | self.segment_prior: np.array = None 65 | self.prior_lognormfactor: float = 0 66 | 67 | self.eps: float = eps 68 | self.prior_a: float = prior_a 69 | 70 | if initial_segment_p is None: 71 | self.segment_p = np.zeros((number_of_clusters, number_of_segments)) 72 | self.segment_p[:, ::2] = np.log(1 / 5) 73 | self.segment_p[:, 1::2] = np.log(4 / 5) 74 | else: 75 | if not ( 76 | initial_segment_p.shape[0] == number_of_clusters and initial_segment_p.shape[0] == number_of_segments 77 | ): 78 | raise ValueError("Initial parameters must be of shape (" "number_of_clusters, number_of_segments)") 79 | self.segment_p = initial_segment_p.copy() 80 | 81 | if self.prior_a is not None: 82 | # Precompute the normfactor (in log space) of the prior gamma 83 | # distribution 84 | self.prior_lognormfactor = np.log(math.gamma(2 * prior_a) / (math.gamma(prior_a) ** 2))*2 85 | self.prior_pdf = scipy.stats.beta(self.prior_a, self.prior_a).pdf 86 | else: 87 | self.prior_pdf = None 88 | 89 | def update_prior(self): 90 | """ 91 | Updates the prior beta distribution (in log space) based on 92 | self.segment_p and self.prior_a 93 | """ 94 | self.segment_prior = self.segment_p * (self.prior_a - 1) 95 | 96 | self.segment_prior += np.log(1 - np.exp(self.segment_p) + self.eps) * (self.prior_a - 1) 97 | self.segment_prior += self.prior_lognormfactor 98 | 99 | def update_params(self, segment_p_list: Dict[int, np.ndarray]): 100 | """ 101 | Updates bernoulli parameters 102 | :param segment_p_list: bernoulli parameters. A list with one numpy array 103 | per cluster 104 | :return: difference between old and new parameters to assess convergence 105 | """ 106 | maxdiff = 0 107 | for sample in segment_p_list.keys(): 108 | diff = np.max(np.abs(np.exp(self.segment_p[sample, :]) - np.exp(segment_p_list[sample]))) 109 | maxdiff = max(diff, maxdiff) 110 | self.segment_p[sample, :] = segment_p_list[sample] 111 | 112 | if self.prior_a is not None: 113 | self.update_prior() 114 | return maxdiff 115 | 116 | def likelihood(self, segment_index: int, observations: np.array, observations_cluster_assignment: np.array): 117 | p = self.segment_p[observations_cluster_assignment, :] 118 | idx = observations != -1 119 | 120 | ret_a = np.log(-np.expm1(p[idx, segment_index]) + self.eps) 121 | ret_a += np.log1p(-observations[idx]) + np.log(0.5) 122 | 123 | ret_b = p[idx, segment_index] 124 | ret_b += np.log(observations[idx] + self.eps) + np.log(0.5) 125 | 126 | ret = np.logaddexp(ret_a, ret_b) 127 | if self.segment_prior is not None: 128 | ret += self.segment_prior[observations_cluster_assignment, :][idx, segment_index] 129 | return ret.sum() 130 | 131 | def minimization_objective(self, observations: np.array, posterior_exp: np.array): 132 | """ 133 | Returns a curried function that only takes the candidate parameters 134 | mu and returns a minimization object (in this case the total 135 | likelihood p(S|mu',psi) given S and posteriors p(psi|mu)) 136 | :param observations: observations numpy array 137 | :param posterior_exp: posterior of segmentation as estimated by the hmm 138 | - must be in linear space (not log space) 139 | :return: a function that takes parameters mu and returns likelihood 140 | """ 141 | 142 | def curried_objective(x): 143 | # x is in log-space but these computations are easier in 144 | # lin-space 145 | # since there are a lot of additions going on 146 | # x = np.exp(x) 147 | ls = np.zeros(self.segment_p.shape[1]) 148 | ps = np.zeros(self.segment_p.shape[1]) 149 | for r in range(observations.shape[0]): 150 | o = observations[r, :] 151 | idx = o != -1 152 | o = o[idx] 153 | pki = posterior_exp[idx, :] 154 | 155 | l_a = np.outer((1 - o), (1 - x)) / 2 156 | l_b = np.outer(o, x) / 2 157 | l = np.log(l_a + l_b + self.eps) 158 | ls += (l * pki).sum(axis=0) 159 | ps += pki.sum(axis=0) 160 | 161 | ret = ls / (ps + self.eps) 162 | if self.prior_pdf is not None: 163 | prior = np.array([self.prior_pdf(xi) for xi in x]) 164 | ret = ret * prior 165 | 166 | ret = ret.sum() 167 | 168 | return -ret 169 | 170 | return curried_objective 171 | 172 | def get_cluster_params(self, cluster): 173 | """ 174 | Returns parameters for optimization for one cluster 175 | :param cluster: cluster index 176 | :return: parameters in linear space 177 | """ 178 | # Clipping is needed due to a scipy bug 179 | return np.clip(np.exp(self.segment_p[cluster, :]), 0.02, 0.98) 180 | 181 | def get_params(self): 182 | # Clipping is needed due to a scipy bug 183 | return np.clip(np.exp(self.segment_p), 0.02, 0.98) 184 | 185 | def get_param_bounds(self): 186 | """ 187 | :return: parameters bounds for optimization (interval [0,1]) 188 | """ 189 | return [(0.01, 0.99)] * self.segment_p.shape[1] 190 | -------------------------------------------------------------------------------- /pycoMeth/meth_seg/hmm.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Dict 2 | 3 | import numpy as np 4 | import scipy.optimize 5 | 6 | from pycoMeth.meth_seg.emissions import EmissionLikelihoodFunction 7 | 8 | 9 | def arraylogexpsum(x): 10 | ret = x[0] 11 | for i in range(1, len(x)): 12 | ret = logaddexp(ret, x[i]) 13 | return ret # if ret > -256 else -512 14 | 15 | 16 | def logaddexp(a, b): 17 | ret = np.logaddexp(a, b) 18 | return ret 19 | 20 | 21 | class SegmentationHMM: 22 | def __init__( 23 | self, 24 | max_segments: int, 25 | t_stay: float, 26 | t_move: float, 27 | e_fn: EmissionLikelihoodFunction, 28 | seg_penalty: float = 0, 29 | eps: np.float64 = np.exp(-512), 30 | ): 31 | self.eps = eps 32 | self.e_fn = e_fn 33 | 34 | self.num_segments = max_segments 35 | 36 | if t_stay + t_move + seg_penalty > 1: 37 | raise ValueError("t_stay + t_move + seg_penalty may not exceed 1") 38 | 39 | self.seg_penalty = np.array([(seg_penalty * i / max_segments) for i in range(max_segments)], dtype=np.float64) 40 | self.t_move = np.array([t_move - self.seg_penalty[i] for i in range(max_segments)], dtype=np.float64) 41 | self.t_stay = np.array([t_stay for i in range(max_segments)], dtype=np.float64) 42 | self.t_end = np.array([1 - self.t_move[i] - self.t_stay[i] for i in range(max_segments)], dtype=np.float64) 43 | self.t_move = np.log(self.t_move + eps) 44 | self.t_stay = np.log(self.t_stay + eps) 45 | self.t_end = np.log(self.t_end + eps) 46 | 47 | def t_fn(self, i, j): 48 | if i == j: 49 | return self.t_stay[i] 50 | if i == (j - 1): 51 | # Probability to move to the next state 52 | return self.t_move[i] # + sim_penalty 53 | if j == (self.num_segments - 1): 54 | # Probability to go the last segment 55 | return self.t_end[i] 56 | 57 | raise RuntimeError("Transition %d to %d is not a valid transition in segmentation " "HMM " % (i, j)) 58 | 59 | def forward(self, observations, obs_c): 60 | e_fn = self.e_fn.likelihood 61 | M = self.num_segments 62 | R = observations.shape[0] 63 | N = observations.shape[1] 64 | F = np.zeros((N, M), dtype=np.float) + self.eps 65 | F[0, 0] = 1 - F[0, :].sum() - self.eps 66 | F = np.log(F) 67 | start_prob = np.zeros(M) + self.eps 68 | start_prob[0] = 1 69 | start_prob = np.log(start_prob) 70 | 71 | for k in range(N): 72 | o = observations[:, k] 73 | for i in range(M): 74 | e = e_fn(i, o, obs_c) 75 | 76 | if k == 0: 77 | F[k, i] = e + start_prob[i] 78 | continue 79 | 80 | # Stay probability 81 | F[k, i] = e + F[k - 1, i] + self.t_fn(i, i) 82 | 83 | # Move probabilty 84 | if i > 0: 85 | F[k, i] = logaddexp(F[k, i], e + F[k - 1, i - 1] + self.t_fn(i - 1, i)) 86 | 87 | # End probability 88 | if i == M - 1: 89 | # if end state we could have come from anywhere to the 90 | # end state: 91 | for j in range(M - 2): # exclude last 2 because those were already 92 | # handled above 93 | F[k, i] = logaddexp(F[k, i], e + F[k - 1, j] + self.t_fn(j, i)) 94 | evidence = F[-1, -1] 95 | return F, evidence 96 | 97 | def backward(self, observations, obs_c): 98 | e_fn = self.e_fn.likelihood 99 | R = observations.shape[0] 100 | M = self.num_segments 101 | N = observations.shape[1] 102 | B = np.zeros((N, M), dtype=np.float64) + self.eps 103 | B[-1, -1] = 1 104 | B = np.log(B) 105 | 106 | for k in range(N - 1, 0, -1): 107 | o = observations[:, k] 108 | k = k - 1 109 | for i in range(M): 110 | e_stay = e_fn(i, o, obs_c) 111 | 112 | if i == M - 1: 113 | # If i is end state, we can only stay 114 | B[k, i] = e_stay + B[k + 1, i] + self.t_fn(i, i) 115 | else: 116 | e_move = e_fn(i + 1, o, obs_c) 117 | # Move and stay probability 118 | B[k, i] = logaddexp( 119 | B[k + 1, i] + self.t_fn(i, i) + e_stay, B[k + 1, i + 1] + self.t_fn(i, i + 1) + e_move 120 | ) 121 | if i < M - 2: 122 | # End probability only if i 0: 159 | p[i - 1] = V[k - 1, i - 1] + self.t_fn(i - 1, i) 160 | 161 | if i == M - 1: 162 | # last two have been covered by stay and move 163 | for j in range(M - 2): 164 | p[j] = V[k - 1, j] + self.t_fn(j, i) 165 | p = e + p 166 | 167 | V[k, i] = np.max(p) 168 | P[k, i] = np.argmax(p) 169 | # Rescaling prevents underflow 170 | V[k, :] = V[k, :] - arraylogexpsum(V[k, :]) 171 | V[-1, :] = np.log(self.eps) 172 | V[-1, -1] = np.max(V[-2, :]) 173 | P[-1, -1] = np.argmax(V[-2, :]) 174 | X = np.zeros(N, dtype=np.int32) 175 | Z = np.zeros(N, dtype=np.float32) 176 | X[N - 1] = M - 1 177 | Z[N - 1] = 0 178 | 179 | for k in range(N - 2, -1, -1): 180 | X[k] = P[k + 1, X[k + 1]] 181 | Z[k] = V[k + 1, X[k + 1]] 182 | 183 | return X, Z 184 | 185 | def MAP(self, posterior): 186 | M = self.num_segments 187 | N = posterior.shape[0] 188 | 189 | V = np.zeros((N, M), dtype=np.float) + self.eps 190 | V[0, 0] = 1 191 | V = np.log(V) 192 | P = np.zeros((N, M), dtype=np.int32) 193 | 194 | start_prob = np.zeros(M) + self.eps 195 | start_prob[0] = 1 196 | start_prob = np.log(start_prob) 197 | 198 | for k in range(0, N - 1): 199 | for i in range(M): 200 | e = posterior[k, i] 201 | 202 | if k == 0: 203 | V[k, i] = np.max(e + start_prob[i]) 204 | continue 205 | 206 | p = np.zeros(M) - np.inf 207 | 208 | p[i] = V[k - 1, i] + self.t_fn(i, i) 209 | 210 | if i > 0: 211 | p[i - 1] = V[k - 1, i - 1] + self.t_fn(i - 1, i) 212 | 213 | if i == M - 1: 214 | for j in range(M - 2): # last two have been covered by stay and 215 | # move 216 | p[j] = V[k - 1, j] + self.t_fn(j, i) 217 | p = e + p 218 | 219 | V[k, i] = np.max(p) 220 | P[k, i] = np.argmax(p) 221 | # Rescaling prevents underflow 222 | V[k, :] = V[k, :] - arraylogexpsum(V[k, :]) 223 | V[-1, :] = np.log(self.eps) 224 | V[-1, -1] = np.max(V[-2, :]) 225 | P[-1, -1] = np.argmax(V[-2, :]) 226 | X = np.zeros(N, dtype=np.int32) 227 | Z = np.zeros(N, dtype=np.float32) 228 | X[N - 1] = M - 1 229 | Z[N - 1] = 0 230 | 231 | for k in range(N - 2, -1, -1): 232 | X[k] = P[k + 1, X[k + 1]] 233 | Z[k] = V[k + 1, X[k + 1]] 234 | 235 | return X, Z 236 | 237 | def baum_welch( 238 | self, 239 | observations: np.ndarray, 240 | tol: float = np.exp(-4), 241 | it_hook=None, 242 | samples: np.ndarray = None, 243 | verbose: bool = False, 244 | ) -> Tuple[Dict[int, np.ndarray], np.ndarray]: 245 | """ 246 | Run the baum_welch algorithm, an expectation maximization algorithm, 247 | to find a segmentation of the methylation signal. 248 | 249 | Note that this algorithm is rather memory expensive. It will take 250 | O(CM) memory where C is the number of samples and M the maximum number 251 | of segments. If no samples are provided, C is equal to the number of 252 | reads, meaning the memory requirement grows with the read coverage. 253 | 254 | :param observations: a numpy array of shape RxN, where R is the 255 | number of reads and N is the number of gennomic positions (or CpG 256 | sites). The values need to be in the range (0,1) and are methylation 257 | predictions for the individual CpG sites. In order to speed up 258 | computation, missing predictions can be labeled with the value -1. 259 | This should lead to the same result as setting it to the value 0.5, 260 | but reduces the number of computations required significantly. 261 | :param tol: The absolute maximum difference in a parameter value that 262 | determines convergence. If the difference is below tol, the algorithm 263 | aborts 264 | :param it_hook: A function hook that will be called after each 265 | iteration. Takes the same parameters as the return value of this 266 | function 267 | :param samples: A 1-dimensional numpy array of length R, assigns each 268 | read to a sample id. Sample ids must be integer, and must start from 269 | 0 and have no gaps. 270 | :return: tuple with estimated parameters and posteriors. Estimated 271 | paramater type depends on the given emission probability class. 272 | Posterior is of shape NxM and gives the posterior probability of each 273 | genomic site n being in each segment m 274 | """ 275 | # Initial guess of parameters 276 | R = observations.shape[0] 277 | N = observations.shape[1] 278 | 279 | if N < 2: 280 | raise ValueError("Observations must contain at least 2 CpG-sites") 281 | if any((observations != -1).sum(axis=0) == 0): 282 | raise ValueError("Observations must not include reads with zero observations") 283 | if any((observations != -1).sum(axis=1) == 0): 284 | raise ValueError("Observations must not include sites with zero observations") 285 | 286 | if samples is None: 287 | # No samples, then we use the identity 288 | self.obs_c = np.arange(R) 289 | else: 290 | self.obs_c = samples 291 | 292 | C = len(set(self.obs_c)) 293 | 294 | for it in range(100): 295 | F, f_evidence = self.forward(observations, self.obs_c) 296 | B, b_evidence = self.backward(observations, self.obs_c) 297 | # Sanity check: fwd and bwd algorithm should return same evidence 298 | if np.abs(f_evidence - b_evidence) > 10e-6: 299 | print("WARNING: forward evidence %f does not equal backward " "evidence %f." % (f_evidence, b_evidence)) 300 | 301 | posterior = F + B - b_evidence 302 | 303 | # Maximize 304 | segment_p_new = {} 305 | 306 | for c in range(C): 307 | old_params = self.e_fn.get_cluster_params(c) 308 | to_minimize = self.e_fn.minimization_objective(observations[self.obs_c == c], np.exp(posterior)) 309 | bounds = self.e_fn.get_param_bounds() 310 | 311 | estimated_p = scipy.optimize.minimize(to_minimize, old_params, method="SLSQP", bounds=bounds).x 312 | segment_p_new[c] = np.log(estimated_p) 313 | 314 | diff = self.e_fn.update_params(segment_p_new) 315 | 316 | segment_p = segment_p_new 317 | 318 | if it_hook is not None: 319 | it_hook(segment_p, posterior) 320 | 321 | if verbose: 322 | print("Iteration %d, parameter difference: %f" % (it, diff)) 323 | if diff < tol: 324 | break 325 | 326 | return segment_p, posterior 327 | -------------------------------------------------------------------------------- /pycoMeth/meth_seg/math.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.stats import rankdata 3 | import scipy 4 | 5 | from typing import Tuple 6 | 7 | 8 | def llr_to_p(llr, prior=0.5): 9 | """ 10 | Convert log-likelihood ratios log(p(x|a)/p(x|~a)) to posterior 11 | probabilty p(a|x) given a prior p(a). For unbiased prediction, 12 | leave prior at 0.5 13 | """ 14 | return 1 / (1 + np.exp(-llr) * (1 / prior - 1)) 15 | 16 | 17 | def p_to_llr(p, prior=0.5): 18 | """ 19 | Converts the posterior probability p(a|x) into a log-likelihood ratio 20 | log(p(x|a)/p(x|~a)) given a prior pa(a) 21 | """ 22 | return -np.log(prior * (1 - p) / (p * (1 - prior))) 23 | 24 | 25 | def llr_to_uncertainty(llr, method="linear"): 26 | if method == "linear": 27 | p = llr_to_p(llr) 28 | return 0.5 - np.abs(0.5 - p) 29 | 30 | 31 | def fdr_from_pvals(p_vals: np.ndarray) -> np.ndarray: 32 | """ 33 | Computes FDR from p-values using the Benjamini-Hochberg method. 34 | :param p_vals: numpy array of p-values 35 | :return: numpy array of adjusted p-values 36 | """ 37 | ranked_p_values = rankdata(p_vals) 38 | fdr = p_vals * len(p_vals) / ranked_p_values 39 | fdr[fdr > 1] = 1 40 | 41 | return fdr 42 | 43 | 44 | def bs_from_llrs(llrs: np.ndarray, thres: float = 1, min_reads: int = 1) -> float: 45 | """ 46 | Computes methylation beta score from a list of log-likelihood ratios 47 | :param llrs: Log-likelihood ratio array 48 | :param thres: threshold for absolute llr - excluding all llrs with an absolute llr lower than this threshold 49 | (default: 1.0) 50 | :param min_reads: return np.nan if length of llrs after threshold filtering is less than min_reads (default: 1) 51 | :return: methylation beta score 52 | """ 53 | llrs_used = llrs[np.abs(llrs) > thres] 54 | if len(llrs_used) < min_reads: 55 | return np.nan 56 | return (llrs_used > 0).sum() / len(llrs_used) 57 | 58 | def __ensure_numpy(x) -> np.ndarray: 59 | if not isinstance(x, np.ndarray): 60 | x = np.array(x) 61 | return x 62 | 63 | def nangmean(x: np.ndarray) -> float: 64 | """ Computes geometric mean while ignoring NaNs """ 65 | x = __ensure_numpy(x) 66 | x = x[~np.isnan(x)] 67 | return scipy.stats.gmean(x) 68 | 69 | def maxabs(x: np.ndarray) -> float: 70 | x = __ensure_numpy(x) 71 | """ Returns the value with the maximum magnitude """ 72 | return x[np.unravel_index(np.argmax(np.abs(x)), x.shape)] 73 | 74 | def compute_differential_methylation( 75 | llrs_a: np.ndarray, llrs_b: np.ndarray 76 | ) -> Tuple: 77 | 78 | # Paired test 79 | a_nan = llrs_a.copy() 80 | a_nan[a_nan == 0] = np.nan 81 | b_nan = llrs_b.copy() 82 | b_nan[b_nan == 0] = np.nan 83 | 84 | # Filtering sites for which both haplotypes have at least one read, 85 | # in order to avoid warnings 86 | good_sites = ((~np.isnan(a_nan)).sum(axis=0) > 0) & ( 87 | (~np.isnan(b_nan)).sum(axis=0) > 0 88 | ) 89 | a_nan = a_nan[:, good_sites] 90 | b_nan = b_nan[:, good_sites] 91 | 92 | pp = scipy.stats.ttest_rel(np.nanmean(a_nan, axis=0), np.nanmean(b_nan, axis=0)) 93 | 94 | # Unpaired test 95 | up = scipy.stats.mannwhitneyu(llrs_a[llrs_a != 0], llrs_b[llrs_b != 0]) 96 | 97 | if np.isnan(up[0]): 98 | # Workaround because ttest_ind returns (nan, nan) if it fails 99 | return None, None 100 | 101 | return up[1], pp[1] -------------------------------------------------------------------------------- /pycoMeth/meth_seg/postprocessing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def cleanup_segmentation( 5 | segment_p: np.ndarray, segments: np.ndarray, min_length: int = 5, min_parameter_diff=0.1 6 | ) -> np.ndarray: 7 | """ 8 | Cleans up a segmentation by merging segments that are too close or too 9 | similar in their parameter space. 10 | :param segment_p: segment parameters. Shape (C,M) where C is the number 11 | of clusters and M is the number of segments 12 | :param segments: segmentation as returned from the viterbi or MAP algorithm. 13 | Shape is (N) where N is the number of genomic indices (i.e. CpG sites). 14 | :param min_length: minimum number of genomic loci per segment 15 | :param min_parameter_diff: minimum difference in parameter values between 16 | neighboring segments 17 | :return: a new segmentation of the same shape as segments 18 | """ 19 | 20 | segment_p = np.exp(segment_p) 21 | new_segments = segments.copy() 22 | for segment in sorted(list(set(segments))): 23 | if len(set(new_segments)) <= 1: 24 | # No need to go on if it's all just one segment 25 | break 26 | length = (new_segments == segment).sum() 27 | if segment == new_segments[-1]: 28 | candidate_replace = new_segments[new_segments != segment][-1] 29 | else: 30 | candidate_replace = segment + 1 31 | absdif = np.abs(segment_p[:, segment] - segment_p[:, candidate_replace]).max() 32 | if length < min_length or absdif < min_parameter_diff: 33 | new_segments[new_segments == segment] = candidate_replace 34 | segment_p[:, segment] = np.mean([segment_p[:, segment], segment_p[:, candidate_replace]]) 35 | segment_p[:, candidate_replace] = segment_p[:, segment] 36 | return np.array(new_segments) 37 | -------------------------------------------------------------------------------- /pycoMeth/meth_seg/segment.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from meth5.sparse_matrix import SparseMethylationMatrixContainer 3 | 4 | from pycoMeth.meth_seg.math import llr_to_p 5 | from pycoMeth.meth_seg.emissions import BernoulliPosterior 6 | from pycoMeth.meth_seg.hmm import SegmentationHMM 7 | from pycoMeth.meth_seg.postprocessing import cleanup_segmentation 8 | 9 | 10 | def segment(sparse_matrix: SparseMethylationMatrixContainer, max_segments_per_window: int) -> np.ndarray: 11 | llrs = np.array(sparse_matrix.met_matrix.todense()) 12 | obs = llr_to_p(llrs) 13 | samples = sparse_matrix.read_samples 14 | 15 | unique_samples = list(set(samples)) 16 | 17 | id_sample_dict = {i: s for i, s in enumerate(unique_samples)} 18 | sample_id_dict = {v: k for k, v in id_sample_dict.items()} 19 | 20 | sample_ids = np.array([sample_id_dict[s] for s in samples]) 21 | 22 | emission_lik = BernoulliPosterior(len(unique_samples), max_segments_per_window, prior_a=None) 23 | hmm = SegmentationHMM( 24 | max_segments=max_segments_per_window, t_stay=0.1, t_move=0.8, e_fn=emission_lik, eps=np.exp(-512) 25 | ) 26 | segment_p, posterior = hmm.baum_welch(obs, tol=np.exp(-8), samples=sample_ids) 27 | 28 | segmentation, _ = hmm.MAP(posterior) 29 | 30 | segment_p_array = np.concatenate([v[np.newaxis, :] for v in segment_p.values()], axis=0) 31 | segmentation = cleanup_segmentation(segment_p_array, segmentation, min_parameter_diff=0.2) 32 | 33 | return segmentation 34 | -------------------------------------------------------------------------------- /pycoMeth/meth_seg/segments_csv_io.py: -------------------------------------------------------------------------------- 1 | from typing import IO 2 | from io import StringIO 3 | 4 | import numpy as np 5 | import pandas as pd 6 | 7 | from pycoMeth.meth_seg.math import bs_from_llrs, compute_differential_methylation 8 | 9 | 10 | class SegmentsWriterBED: 11 | def __init__(self, outfile: IO, chrom: str): 12 | self.outfile = outfile 13 | self.chrom = chrom 14 | self.first = True 15 | 16 | def write_segments_llr( 17 | self, 18 | llrs: np.ndarray, 19 | segments: np.ndarray, 20 | genomic_starts: np.ndarray, 21 | genomic_ends: np.ndarray, 22 | samples: np.ndarray, 23 | compute_diffmet: bool = False, 24 | **kwargs, 25 | ): 26 | df_rowvals = [] 27 | 28 | unique_segments = sorted(list(set(segments))) 29 | for seg in unique_segments: 30 | seg_pos = np.arange(llrs.shape[1])[segments == seg] 31 | start = genomic_starts[seg_pos[0]] 32 | end = genomic_ends[seg_pos[-1]] 33 | 34 | if seg == unique_segments[0]: 35 | segment_type = "window_start" 36 | elif seg == unique_segments[-1]: 37 | segment_type = "window_end" 38 | else: 39 | segment_type = "inner_segment" 40 | 41 | rowval = { 42 | "chrom": self.chrom, 43 | "start": start, 44 | "end": end, 45 | "num_sites": (segments == seg).sum(), 46 | "type": segment_type, 47 | } 48 | if samples is not None and compute_diffmet: 49 | samples_unique = list(set(samples)) 50 | seg_llrs = llrs[:, segments == seg] 51 | sample_llr = {sample: seg_llrs[samples == sample] for sample in samples_unique} 52 | for sample in samples_unique: 53 | rowval[f"met_rate_{sample}"] = bs_from_llrs((sample_llr[sample])) 54 | 55 | for i, s_a in enumerate(samples_unique): 56 | for s_b in samples_unique[i + 1 :]: 57 | up, pp = compute_differential_methylation(sample_llr[s_a], sample_llr[s_b]) 58 | rowval["unpaired_pval_%s_vs_%s" % (s_a, s_b)] = up 59 | rowval["paired_pval_%s_vs_%s" % (s_a, s_b)] = pp 60 | 61 | df_rowvals.append(rowval) 62 | 63 | df = pd.DataFrame(df_rowvals, columns=["chrom", "start", "end", "num_sites", "type"]) 64 | df = df.astype({"chrom": str, "start": int, "end": int, "num_sites": int, "type": str}) 65 | 66 | df.to_csv(self.outfile, sep="\t", header=False, index=False, mode="w" if self.first else "a") 67 | self.first = False 68 | 69 | 70 | class SegmentsWriterBedGraph: 71 | def __init__(self, outfile_base, chrom: str): 72 | self.outfiles = {} 73 | self.outfile_base = outfile_base 74 | self.chrom = chrom 75 | 76 | def get_outfile(self, sample): 77 | if sample not in self.outfiles: 78 | self.outfiles[sample] = f"{self.outfile_base}.{sample}.bedGraph" 79 | with open(self.outfiles[sample], "w") as f: 80 | f.write( 81 | f"track type=bedGraph name={sample} description=center_label visibility=display_mode color=252,127,44 altColor=25,4,248 graphType=heatmap viewLimits=0:1 midRange=0.50:0.50 midColor=255,255,255\n" 82 | ) 83 | return self.outfiles[sample] 84 | 85 | def write_segments_llr( 86 | self, 87 | llrs: np.ndarray, 88 | segments: np.ndarray, 89 | genomic_starts: np.ndarray, 90 | genomic_ends: np.ndarray, 91 | samples: np.ndarray, 92 | **kwargs, 93 | ): 94 | samples_unique = list(set(samples)) 95 | for sample in samples_unique: 96 | with open(self.get_outfile(sample), "a") as f: 97 | for seg in sorted(list(set(segments))): 98 | seg_pos = np.arange(llrs.shape[1])[segments == seg] 99 | start = genomic_starts[seg_pos[0]] 100 | end = genomic_ends[seg_pos[-1]] 101 | seg_llrs = llrs[:, segments == seg] 102 | sample_llr = seg_llrs[samples == sample] 103 | good_calls = (np.abs(sample_llr) > 2).sum() 104 | pos_calls = (sample_llr > 2).sum() 105 | if good_calls > 0: 106 | sample_metrate = pos_calls / good_calls 107 | else: 108 | sample_metrate = np.nan 109 | row = f"{self.chrom} {start} {end} {sample_metrate}\n" 110 | f.write(row) 111 | 112 | 113 | class SegmentsReaderCSV: 114 | """ 115 | Reads a concatenation of CSV files (with potentially differenct columns) and merges 116 | them into one dataframe, filling in null for missing values. Accepts multiple header 117 | lines, but requires them to start with the word "chrom" in order to identify the 118 | header. 119 | 120 | This is used to read a CSV file that has been created in chunks by multiple 121 | concurrent worker threads. 122 | """ 123 | 124 | def __init__(self, *argc): 125 | self.inputfiles = argc 126 | 127 | def read(self): 128 | ret = [] 129 | for inputfile in self.inputfiles: 130 | with open(inputfile) as f: 131 | cur_string = None 132 | while True: 133 | line = f.readline().strip() 134 | if line.startswith("chrom") or not line: 135 | # new header line 136 | if cur_string is not None: 137 | cur_pd = pd.read_csv(StringIO("\n".join(cur_string)), sep="\t") 138 | ret.append(cur_pd) 139 | cur_string = [] 140 | if not line: 141 | break 142 | cur_string.append(line) 143 | ret = pd.concat(ret, sort=False) 144 | ret = ret.sort_values(["chrom", "start", "end"]).reset_index(drop=True) 145 | return ret 146 | -------------------------------------------------------------------------------- /pycoMeth/templates/CpG_Interval.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | PycoMeth report 12 | 13 | 22 | 23 | 24 | 25 |
26 |
27 |
28 | 58 |
59 |
60 |
61 |
62 |
63 |
64 | 65 |
66 |

CpG interval report generated on {{ date }} with pycoMeth {{ version }}

67 |
68 |
69 |
70 |
71 |
Interval details
72 | {{ interval_html }} 73 |
74 |
75 |
76 |
Methylation log-likelihood ratio by CpG position
77 | {{ heatmap_html }} 78 |
79 |
80 |
81 |
Distribution of CpG methylation log-likelihood ratio by sample
82 | {{ ridgeplot_html }} 83 |
84 |
85 |
86 |
Closest transcripts TSS
87 | {{ transcript_html }} 88 |
89 |
90 |
91 |
92 |
93 |

Source file: {{ src_file }}

94 |

MD5 hash: {{ md5 }}

95 |
96 |
97 |
98 |
99 | 100 | 101 | 102 | -------------------------------------------------------------------------------- /pycoMeth/templates/CpG_summary.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | PycoMeth report 12 | 13 | 22 | 23 | 24 |
25 |
26 |
27 | 60 |
61 |
62 |
63 |
64 |
65 |
66 | 67 |
68 |

{{ title_text }} generated on {{ date }} with pycoMeth {{ version }}

69 |
71 |
72 |
73 |
74 |
Overall summary
75 | {{ summary_html }} 76 |
77 |
78 |
79 |
Methylation category counts by sample for significant CpG intervals
80 | {{ catplot_html }} 81 |
82 |
83 |
84 |
Methylation log-likelihood ratio by significant CpG interval
85 | {{ heatmap_html }} 86 |
87 |
88 |
89 |
Distribution of CpG methylation log-likelihood ratio for significant CpG intervals
90 | {{ ridgeplot_html }} 91 |
92 |
93 |
94 |
Distribution of significant CpG intervals by chromosomic intervals
95 | {{ ideogram_html }} 96 |
97 |
98 |
99 |
Distance from CpG interval to closest Gene TSS
100 | {{ tss_dist_html }} 101 |
102 |
103 |
104 |
Top significant differentially methylated intervals
105 | {{ top_html }} 106 |
107 |
108 |
109 |
110 |
111 |

Source file: {{ src_file }}

112 |

MD5 hash: {{ md5 }}

113 |
114 |
115 |
116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from setuptools import setup 5 | 6 | # Define package info 7 | name = "pycoMeth" 8 | description = "DNA methylation analysis downstream to Nanopolish for Oxford Nanopore DNA sequencing datasets" 9 | with open("README.md", "r") as fh: 10 | long_description = fh.read() 11 | 12 | # Collect info in a dictionnary for setup.py 13 | setup( 14 | name=name, 15 | description="Differential methylation calling suite for Nanopore methylation calls PycoMeth", 16 | version="2.2.2", 17 | long_description=long_description, 18 | long_description_content_type="text/markdown", 19 | url="https://github.com/snajder-r/pycoMeth,", 20 | author="Rene Snajder", 21 | author_email="r.snajder@dkfz-heidelberg.de", 22 | license="GPL", 23 | python_requires=">=3.7", 24 | classifiers=[ 25 | "Development Status :: 4 - Beta", 26 | "Intended Audience :: Science/Research", 27 | "Topic :: Scientific/Engineering :: Bio-Informatics", 28 | "License :: OSI Approved :: MIT License", 29 | "Programming Language :: Python :: 3" 30 | ], 31 | install_requires=[ 32 | "numpy==1.22.2", 33 | "scipy==1.4.1", 34 | "statsmodels==0.13.2", 35 | "pandas==1.4.1", 36 | "Jinja2==3.0.3", 37 | "plotly==5.6.0", 38 | "pyfaidx==0.6.4", 39 | "tqdm==4.62.3", 40 | "colorlog==6.6.0", 41 | "nbformat==5.1.3", 42 | "meth5>=1.1.1" 43 | ], 44 | packages=["pycoMeth", "pycoMeth.meth_seg"], 45 | package_dir={"pycoMeth": "pycoMeth", "pycoMeth.meth_seg":"pycoMeth/meth_seg"}, 46 | package_data={name: ["templates/*"]}, 47 | entry_points={"console_scripts": ["pycometh=pycoMeth.__main__:main", "pycoMeth=pycoMeth.__main__:main"]}, 48 | ) 49 | -------------------------------------------------------------------------------- /versipy.yaml: -------------------------------------------------------------------------------- 1 | version: 2 | major: 2 3 | minor: 2 4 | micro: 2 5 | a: null 6 | b: null 7 | rc: null 8 | post: null 9 | dev: null 10 | managed_values: 11 | __package_name__: pycoMeth 12 | __package_description__: Differential methylation calling suite for Nanopore methylation 13 | calls PycoMeth 14 | __package_url__: https://github.com/snajder-r/pycoMeth, 15 | __author_name__: Rene Snajder 16 | __author_email__: r.snajder@dkfz-heidelberg.de 17 | __author_url__: https://github.com/snajder-r 18 | __package_licence__: GPL 19 | __package_licence_url__: https://opensource.org/licenses/MIT 20 | __minimal_python__: '3.7' 21 | __entry_point1__: meth5=meth5.__main__:main 22 | __dependencies__: 23 | - numpy==1.22.2 24 | - scipy==1.4.1 25 | - statsmodels==0.13.2 26 | - pandas==1.4.1 27 | - Jinja2==3.0.3 28 | - plotly==5.6.0 29 | - pyfaidx==0.6.4 30 | - tqdm==4.62.3 31 | - colorlog==6.6.0 32 | - nbformat==5.1.3 33 | - meth5>=1.1.1 34 | __classifiers__: 35 | - 'Development Status :: 4 - Beta' 36 | - 'Intended Audience :: Science/Research' 37 | - 'Topic :: Scientific/Engineering :: Bio-Informatics' 38 | - 'License :: OSI Approved :: MIT License' 39 | - 'Programming Language :: Python :: 3' 40 | __citation__: Rene Snajder. (2021, May 4). snajder-r/pycometh 41 | managed_files: 42 | versipy_templates/setup.py: setup.py 43 | versipy_templates/README.md: README.md 44 | versipy_templates/meta.yaml: meta.yaml 45 | versipy_templates/pycoMeth/__init__.py: pycoMeth/__init__.py 46 | -------------------------------------------------------------------------------- /versipy_history.txt: -------------------------------------------------------------------------------- 1 | 2021-05-14 14:21:48.949462 0.0.0 Initialise versipy history 2 | 2021-05-14 14:23:17.897133 0.2.7.dev1 Versipy auto bump-up 3 | 2021-05-14 14:24:00.890719 0.2.7.dev2 Versipy auto bump-up 4 | 2021-05-14 14:24:18.995740 0.2.7.dev3 Versipy auto bump-up 5 | 2021-05-14 14:25:07.958130 0.2.7.dev4 Versipy auto bump-up 6 | 2021-05-14 14:25:56.523476 0.2.7.dev5 Versipy auto bump-up 7 | 2021-05-14 14:26:22.495598 0.2.7.dev6 Versipy auto bump-up 8 | 2021-05-14 14:26:23.926715 0.2.7.dev7 Versipy auto bump-up 9 | 2021-05-14 15:08:35.547690 0.2.7.dev8 Versipy auto bump-up 10 | 2021-05-14 15:20:26.537592 0.2.7.dev9 Versipy auto bump-up 11 | 2021-05-14 15:20:55.493471 0.2.7.dev10 Versipy auto bump-up 12 | 2021-05-14 15:21:25.097837 0.2.7.dev11 Versipy auto bump-up 13 | 2021-05-14 15:23:59.998725 0.2.7.dev12 Versipy auto bump-up 14 | 2021-05-17 16:39:53.497454 0.2.7.dev10 Manually set version 15 | 2021-05-17 16:39:59.416404 0.2.7.dev12 Manually set version 16 | 2021-05-17 16:40:37.497052 0.2.7.dev12 Manually set version 17 | 2021-05-17 16:41:16.612462 0.2.7.dev12 Manually set version 18 | 2021-05-17 16:41:43.951956 0.2.7.dev12 Manually set version 19 | 2022-02-10 14:50:46.533254 1.0.0 Versipy auto bump-up 20 | 2022-02-10 14:50:50.342701 2.0.0 Versipy auto bump-up 21 | 2022-02-11 10:31:40.736197 2.0.0 Manually set version 22 | 2022-02-11 10:32:18.522231 2.0.0 Manually set version 23 | 2022-02-16 16:11:11.037610 2.1.0 Versipy auto bump-up 24 | 2022-02-16 16:11:36.848556 2.0.0 Manually set version 25 | 2022-02-16 16:14:38.076678 2.0.0 Manually set version 26 | 2022-02-16 16:17:05.090660 2.0.0 Manually set version 27 | 2022-02-16 16:39:30.376184 2.0.0 Manually set version 28 | 2022-02-16 16:40:39.349922 2.0.1 Versipy auto bump-up 29 | 2022-02-23 14:32:52.483419 2.0.1 Manually set version 30 | 2022-02-23 14:34:47.438478 2.0.1 Manually set version 31 | 2022-02-23 15:11:12.579705 2.0.1 Manually set version 32 | 2022-03-07 11:16:55.588412 2.1.0 Versipy auto bump-up 33 | 2022-03-07 16:31:05.233410 2.1.1 Versipy auto bump-up 34 | 2022-05-16 11:57:49.214055 2.2.0 Versipy auto bump-up 35 | 2022-05-16 12:19:25.297333 2.2.0 Manually set version 36 | 2022-06-09 11:22:05.313781 2.2.1 Versipy auto bump-up 37 | 2022-06-09 12:10:19.515064 2.2.1rc1 Versipy auto bump-up 38 | 2022-06-10 13:05:25.975689 2.2.1 Manually set version 39 | 2022-06-10 16:46:16.041962 2.2.1 Manually set version 40 | 2022-11-25 16:27:05.968686 2.3.0 Versipy auto bump-up 41 | 2022-11-25 16:28:25.419597 2.2.2 Manually set version 42 | 2022-11-25 16:28:51.416226 2.2.2 Manually set version 43 | 2022-11-25 16:29:04.058088 2.2.2rc1 Versipy auto bump-up 44 | 2022-11-25 16:32:27.075605 2.2.2rc2 Versipy auto bump-up 45 | 2022-11-25 16:32:32.364054 2.2.2rc1 Manually set version 46 | 2022-11-28 16:52:20.037965 2.2.3 Versipy auto bump-up 47 | 2022-11-28 16:53:13.131999 2.2.2 Manually set version 48 | -------------------------------------------------------------------------------- /versipy_templates/README.md: -------------------------------------------------------------------------------- 1 | ![pycoMeth](./docs/pictures/pycoMeth_long.png) 2 | 3 | [![GitHub license](https://img.shields.io/github/license/a-slide/pycoMeth.svg)](https://github.com/a-slide/pycoMeth/blob/master/LICENSE) 4 | [![Language](https://img.shields.io/badge/Language-Python3.7+-yellow.svg)](https://www.python.org/) 5 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.6637645.svg)](https://doi.org/10.5281/zenodo.6637645) 6 | 7 | [![PyPI version](https://badge.fury.io/py/pycoMeth.svg)](https://badge.fury.io/py/pycoMeth) 8 | [![PyPI downloads](https://pepy.tech/badge/pycoMeth)](https://pepy.tech/project/pycoMeth) 9 | [![Anaconda Version](https://anaconda.org/snajder-r/pycometh/badges/version.svg)](https://anaconda.org/snajder-r/pycometh) 10 | [![Anaconda Downloads](https://anaconda.org/snajder-r/pycometh/badges/downloads.svg)](https://anaconda.org/snajder-r/pycometh) 11 | 12 | --- 13 | Version in this branch: __package_version__ 14 | 15 | --- 16 | 17 | **DNA methylation analysis downstream to Nanopolish for Oxford Nanopore DNA sequencing datasets** 18 | 19 | `pycoMeth` can be used for further analyses starting from the output files generated by [`Nanopolish call-methylation`](https://github.com/jts/nanopolish). The package contains a suite of tools to **find CpG islands**, **segment methylome**, and to perform a **differential methylation analysis** across multiple samples. 20 | 21 | `pycoMeth` generates extensive tabulated reports and BED files which can be loaded in a genome browser. In addition, an interactive HTML report of differentially 22 | methylated intervals/islands can also generated at the end of the analysis. 23 | 24 | [`Methplotlib`](https://github.com/wdecoster/methplotlib) developed by [Wouter de coster](https://twitter.com/wouter_decoster) is an excellent complementary tool to visualise and explore methylation status for specific loci. 25 | 26 | Please be aware that `pycoMeth` is a research package that is still under development. The API, command line interface, and implementation might change without retro-compatibility. 27 | 28 | --- 29 | ### Installation 30 | 31 | Install either using conda: 32 | 33 | conda install -c snajder-r -c bioconda -c conda-forge pycometh 34 | 35 | Or using pip: 36 | 37 | pip install pycometh 38 | 39 | ### Documentation 40 | 41 | A more detailed usage documentation can be found at https://snajder-r.github.io/pycoMeth/ 42 | 43 | ### pycoMeth workflow 44 | 45 | ![Workflow](https://snajder-r.github.io/pycoMeth/images/overview.svg) 46 | 47 | 48 | ### pycoMeth example HTML report 49 | 50 | [Example HTML report 1](https://snajder-r.github.io/pycoMeth/examples/pycometh_report1.html) 51 | 52 | [Example HTML report 2](https://snajder-r.github.io/pycometh/examples/pycometh_report2.html) 53 | 54 | --- 55 | 56 | ### Citing 57 | 58 | The repository is archived at Zenodo. https://doi.org/10.5281/zenodo.6637645 59 | 60 | If you find pycoMeth useful, please cite our preprint: 61 | 62 | Snajder, Rene H., Oliver Stegle, and Marc Jan Bonder. 2022. "PycoMeth: A Toolbox for Differential Methylation Testing from Nanopore Methylation Calls." bioRxiv. https://doi.org/10.1101/2022.02.16.480699. 63 | 64 | 65 | @article {Snajder2022.02.16.480699, 66 | author = {Snajder, Rene and Leger, Adrien and Stegle, Oliver and Bonder, Marc Jan}, 67 | title = {pycoMeth: A toolbox for differential methylation testing from Nanopore methylation calls}, 68 | year = {2022}, doi = {10.1101/2022.02.16.480699}, publisher = {Cold Spring Harbor Laboratory}, 69 | journal = {bioRxiv} 70 | } 71 | 72 | 73 | ### Authors 74 | 75 | * Rene Snajder (rene.snajder@gmail.com) 76 | * Adrien Leger 77 | -------------------------------------------------------------------------------- /versipy_templates/meta.yaml: -------------------------------------------------------------------------------- 1 | {% set version = "__package_version__" %} 2 | {% set name = "pycoMeth" %} 3 | 4 | package: 5 | name: {{ name|lower }} 6 | version: {{ version }} 7 | 8 | source: 9 | path: dist/{{ name }}-{{ version }}.tar.gz 10 | 11 | build: 12 | number: 0 13 | script: "pip install {{ name }}-{{ version }}.tar.gz --no-deps --ignore-installed -vv " 14 | entry_points: 15 | - pycoMeth=pycoMeth.__main__:main 16 | - pycometh=pycoMeth.__main__:main 17 | noarch: "python" 18 | 19 | requirements: 20 | build: 21 | - python>=__minimal_python__ 22 | - pip>=19.2.1 23 | - ripgrep>=11.0.1 24 | - cython 25 | run: 26 | - __@{ 27 | - ::dependencies}__ 28 | 29 | test: 30 | imports: 31 | - pycoMeth.FileParser 32 | - pycoMeth.CoordGen 33 | - pycoMeth.Meth_Comp 34 | - pycoMeth.Meth_Seg 35 | - pycoMeth.Comp_Report 36 | commands: 37 | - pycoMeth Meth_Comp --help 38 | - pycoMeth Meth_Seg --help 39 | - pycoMeth Comp_Report --help 40 | - pycoMeth CGI_Finder --help 41 | 42 | about: 43 | home: "__package_url__" 44 | license: "MIT" 45 | summary: "DNA methylation analysis for Oxford Nanopore DNA sequencing datasets downstream to Nanopolish" 46 | -------------------------------------------------------------------------------- /versipy_templates/pycoMeth/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define self package variable 4 | __version__ = "__package_version__" 5 | __description__ = "__package_description__" 6 | -------------------------------------------------------------------------------- /versipy_templates/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from setuptools import setup 5 | 6 | # Define package info 7 | name = "__package_name__" 8 | description = "DNA methylation analysis downstream to Nanopolish for Oxford Nanopore DNA sequencing datasets" 9 | with open("README.md", "r") as fh: 10 | long_description = fh.read() 11 | 12 | # Collect info in a dictionnary for setup.py 13 | setup( 14 | name=name, 15 | description="__package_description__", 16 | version="__package_version__", 17 | long_description=long_description, 18 | long_description_content_type="text/markdown", 19 | url="__package_url__", 20 | author="__author_name__", 21 | author_email="__author_email__", 22 | license="__package_licence__", 23 | python_requires=">=__minimal_python__", 24 | classifiers=[ 25 | __@{, 26 | ::"classifiers"}__ 27 | ], 28 | install_requires=[ 29 | __@{, 30 | ::"dependencies"}__ 31 | ], 32 | packages=["pycoMeth", "pycoMeth.meth_seg"], 33 | package_dir={"pycoMeth": "pycoMeth", "pycoMeth.meth_seg":"pycoMeth/meth_seg"}, 34 | package_data={name: ["templates/*"]}, 35 | entry_points={"console_scripts": ["pycometh=pycoMeth.__main__:main", "pycoMeth=pycoMeth.__main__:main"]}, 36 | ) 37 | --------------------------------------------------------------------------------