├── tests ├── __init__.py ├── test_make_empty.py ├── test_import.py ├── test_make_string.py ├── test_regr.py ├── test_debug_ply.py ├── test_get_heading_by_path.py ├── test_get_all_headings.py ├── test_read_org.py ├── test_can_read_roam_node.py ├── test_save_org.py ├── test_get_headings_by_title.py ├── test_inheritance.py ├── test_roundtrip.py ├── test_alternate_todos.py └── test_parser.py ├── src ├── orgmunge.egg-info │ ├── dependency_links.txt │ ├── top_level.txt │ ├── requires.txt │ ├── SOURCES.txt │ └── PKG-INFO └── orgmunge │ ├── todos.json │ ├── lexer.py │ ├── parser.py │ ├── __init__.py │ ├── parsetab.py │ └── classes.py ├── requirements.txt ├── pyproject.toml ├── poetry.lock ├── LICENSE ├── .github └── workflows │ ├── python-publish.yml │ └── test-python-publish.yml ├── CONTRIBUTING.org ├── .gitignore └── README.org /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/orgmunge.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/orgmunge.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | orgmunge 2 | -------------------------------------------------------------------------------- /src/orgmunge.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | ply 2 | 3 | [dev] 4 | black 5 | build 6 | twine 7 | pip-tools 8 | pytest 9 | bumpver 10 | -------------------------------------------------------------------------------- /tests/test_make_empty.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | 3 | def test_make_empty_org(): 4 | test = Org("\n", from_file=False) 5 | assert test 6 | -------------------------------------------------------------------------------- /tests/test_import.py: -------------------------------------------------------------------------------- 1 | def test_import(): 2 | try: 3 | import orgmunge 4 | assert True 5 | except ImportError: 6 | assert False 7 | -------------------------------------------------------------------------------- /tests/test_make_string.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | 3 | def test_make_org(): 4 | test = Org('* TODO Something important\n', from_file=False) 5 | assert test 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.10 3 | # by the following command: 4 | # 5 | # pip-compile pyproject.toml 6 | # 7 | ply==3.11 8 | # via orgmunge (pyproject.toml) 9 | -------------------------------------------------------------------------------- /tests/test_regr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from orgmunge import Org 3 | 4 | def test_regr(file_regression): 5 | agenda = Org('./tests/files/regr.org') 6 | file_regression.check(str(agenda), extension='.txt',) 7 | 8 | -------------------------------------------------------------------------------- /src/orgmunge/todos.json: -------------------------------------------------------------------------------- 1 | { 2 | "todo_states": 3 | { 4 | "todo": "", 5 | "next": "", 6 | "wait": "" 7 | }, 8 | "done_states": 9 | { 10 | "cncl": "", 11 | "done": "" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tests/test_debug_ply.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | 3 | def test_can_read_org(): 4 | test = Org("./README.org", debug=True, todos={'todo_states': {'fake_todo': 'TDO'}, 5 | 'done_states': {'fake_done': 'DNE'},}) 6 | assert test 7 | -------------------------------------------------------------------------------- /tests/test_get_heading_by_path.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | 3 | def test_get_heading_by_path(): 4 | parsed = Org('./tests/files/regr.org') 5 | heading = next(parsed.filter_headings(lambda h: h.level == 3)) 6 | heading_path = [x.title for x in [heading.parent.parent, heading.parent, heading]] 7 | assert heading is parsed.get_heading_by_path(heading_path, exact=True) 8 | -------------------------------------------------------------------------------- /tests/test_get_all_headings.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | import re 3 | 4 | def test_get_all_headings(): 5 | test_file = '''* Node 1 6 | ** Subnode 1 7 | ** Subnode 2 8 | * Node 2 9 | ** Subnode 1 10 | * Node 3 11 | ** Subnode 1 12 | *** Subnode 1 13 | *** Subnode 2 14 | ''' 15 | node_titles = re.sub(r'\*+\s+', '', test_file.strip()).split('\n') 16 | 17 | parsed = Org(test_file, from_file=False) 18 | assert [n.title for n in parsed.get_all_headings()] == node_titles 19 | -------------------------------------------------------------------------------- /tests/test_read_org.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | 3 | CONTENT = "* You can write orgfiles!!\n" 4 | 5 | def test_save_org(tmp_path): 6 | p = tmp_path / "testtmp.org" 7 | assert len(list(tmp_path.iterdir())) == 0 # Nothing exists here 8 | test = Org(CONTENT, from_file=False) 9 | test.write(p) 10 | assert len(list(tmp_path.iterdir())) == 1 # testtmp. org exists 11 | assert p.read_text().strip(" \n") == CONTENT.strip(" \n") # Since org is plaintext these should be identical? 12 | -------------------------------------------------------------------------------- /src/orgmunge.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | LICENSE 2 | pyproject.toml 3 | src/orgmunge/__init__.py 4 | src/orgmunge/classes.py 5 | src/orgmunge/lexer.py 6 | src/orgmunge/parser.py 7 | src/orgmunge/parsetab.py 8 | src/orgmunge.egg-info/PKG-INFO 9 | src/orgmunge.egg-info/SOURCES.txt 10 | src/orgmunge.egg-info/dependency_links.txt 11 | src/orgmunge.egg-info/requires.txt 12 | src/orgmunge.egg-info/top_level.txt 13 | tests/test_debug_ply.py 14 | tests/test_import.py 15 | tests/test_make_empty.py 16 | tests/test_make_string.py 17 | tests/test_parser.py 18 | tests/test_read_org.py 19 | tests/test_save_org.py -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "orgmunge" 3 | version = "0.3.1" 4 | description = "Programmatically modify Orgmode documents" 5 | authors = ["durableOne "] 6 | license = "MIT" 7 | readme = "README.org" 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.10" 11 | ply = "^3.11" 12 | 13 | [tool.poetry.group.dev.dependencies] 14 | build = "^1.0.3" 15 | twine = "^4.0.2" 16 | black = "^23.9.1" 17 | pip-tools = "^7.3.0" 18 | pytest = "^7.4.2" 19 | bumpver = "^2023.1126" 20 | 21 | [build-system] 22 | requires = ["poetry-core"] 23 | build-backend = "poetry.core.masonry.api" 24 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. 2 | 3 | [[package]] 4 | name = "ply" 5 | version = "3.11" 6 | description = "Python Lex & Yacc" 7 | optional = false 8 | python-versions = "*" 9 | files = [ 10 | {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, 11 | {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, 12 | ] 13 | 14 | [metadata] 15 | lock-version = "2.0" 16 | python-versions = "^3.11" 17 | content-hash = "f9a6b0a312efa0841d9c4b29ac8760db4608e7dce52d338f0f5673bad9984f05" 18 | -------------------------------------------------------------------------------- /tests/test_can_read_roam_node.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | import re 3 | 4 | CONTENT = """:PROPERTIES: 5 | :ID: some-bogus-id 6 | :END: 7 | #+title: Test Roam Node 8 | 9 | * You can write orgfiles!! 10 | """ 11 | 12 | def test_read_org_roam_node(tmp_path): 13 | p = tmp_path / "testtmp_roam.org" 14 | assert len(list(tmp_path.iterdir())) == 0 # Nothing exists here 15 | test = Org(CONTENT, from_file=False) 16 | assert test.properties['ID'] == 'some-bogus-id' 17 | test.write(p) 18 | assert len(list(tmp_path.iterdir())) == 1 # testtmp. org exists 19 | assert p.read_text().strip(" \n") == re.sub(r'^\s*$', '', CONTENT.strip(" \n"), flags=re.MULTILINE) 20 | -------------------------------------------------------------------------------- /tests/test_save_org.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | 3 | def test_can_read_org(): 4 | test = Org("./README.org", todos={'todo_states': {'fake_todo': 'TDO'}, 5 | 'done_states': {'fake_done': 'DNE'},}) 6 | assert test 7 | 8 | from orgmunge import Org 9 | 10 | CONTENT = "* You can write orgfiles!!\n" 11 | 12 | def test_save_org(tmp_path): 13 | p = tmp_path / "testtmp.org" 14 | assert len(list(tmp_path.iterdir())) == 0 # Nothing exists here 15 | test = Org(CONTENT, from_file=False) 16 | test.write(p) 17 | assert len(list(tmp_path.iterdir())) == 1 # testtmp. org exists 18 | assert p.read_text().strip(" \n") == CONTENT.strip(" \n") # Since org is plaintext these should be identical? 19 | -------------------------------------------------------------------------------- /tests/test_get_headings_by_title.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | import re 3 | 4 | def test_get_headings_by_title(): 5 | test_file = '''* My first node 6 | ** My first sub node 7 | * My first Node 8 | ** My second sub node 9 | ''' 10 | parsed = Org(test_file, from_file=False) 11 | search_1 = parsed.get_headings_by_title('node') 12 | search_2 = parsed.get_headings_by_title('node', re_flags=re.IGNORECASE) 13 | search_3 = parsed.get_headings_by_title('node', exact=True) 14 | search_4 = parsed.get_headings_by_title('My first node', exact=True) 15 | assert [n.title for n in search_1] == ['My first node', 'My first sub node', 'My second sub node'] 16 | assert [n.title for n in search_2] == ['My first node', 'My first sub node', 'My first Node', 'My second sub node'] 17 | assert list(search_3) == [] 18 | assert [n.title for n in search_4] == ['My first node'] 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 durableOne, nalisarc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /.github/workflows/test-python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.TESTPYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /tests/test_inheritance.py: -------------------------------------------------------------------------------- 1 | 2 | from orgmunge import Org 3 | import re 4 | 5 | def test_tag_inheritance(): 6 | test_file = '''* Node 1 :parent: 7 | ** Subnode 1 8 | ** Subnode 2 :child: 9 | * Node 2 10 | ** Subnode 1 11 | * Node 3 :parent: 12 | ** Subnode 1 13 | *** Subnode 1 14 | *** Subnode 2 15 | ''' 16 | parsed = Org(test_file, from_file=False) 17 | subnodes = [parsed.get_heading_by_path(p) for p in 18 | [['Node 1', 'Subnode 2'], 19 | ['Node 3', 'Subnode 1', 'Subnode 2']]] 20 | for subnode in subnodes: 21 | assert 'parent' in subnode.all_tags 22 | 23 | def test_property_inheritance(): 24 | test_file = '''* Node 1 25 | :PROPERTIES: 26 | :parent: me 27 | :END: 28 | ** Subnode 1 29 | ** Subnode 2 30 | * Node 2 31 | ** Subnode 1 32 | * Node 3 33 | :PROPERTIES: 34 | :parent: me 35 | :END: 36 | ** Subnode 1 37 | *** Subnode 1 38 | *** Subnode 2 39 | ''' 40 | parsed = Org(test_file, from_file=False) 41 | subnodes = [parsed.get_heading_by_path(p) for p in 42 | [['Node 1', 'Subnode 2'], 43 | ['Node 3', 'Subnode 1', 'Subnode 2']]] 44 | for subnode in subnodes: 45 | assert 'parent' in subnode.inherited_properties 46 | assert 'parent' in subnode.get_all_properties() 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.org: -------------------------------------------------------------------------------- 1 | #+Title: Contributing 2 | 3 | Thanks for considering contributing! Please read this document to learn the various ways you can contribute to this project and how to go about doing it. 4 | 5 | * Bug reports and feature requests 6 | 7 | ** Did you find a bug? 8 | First, do a quick search to see whether your issue has already been reported. 9 | If your issue has already been reported, please comment on the existing issue. 10 | 11 | Otherwise, open a new GitHub issue. Be sure to include a clear title and description. 12 | The description should include as much relevant information as possible. 13 | The description should explain how to reproduce the erroneous behavior as well as the behavior you expect to see. 14 | Ideally you would include a code sample or an executable test case demonstrating the expected behavior. 15 | 16 | ** Do you have a suggestion for an enhancement or new feature? 17 | We use GitHub issues to track feature requests. Before you create an feature request: 18 | * Make sure you have a clear idea of the enhancement you would like. 19 | - If you have a vague idea, consider discussing it first on a GitHub issue. 20 | * Check the documentation to make sure your feature does not already exist. 21 | * Do a quick search to see whether your feature has already been suggested. 22 | 23 | When creating your request, please: 24 | * Provide a clear title and description. 25 | * Explain why the enhancement would be useful. 26 | - It may be helpful to highlight the feature in other libraries. 27 | * Include code examples to demonstrate how the enhancement would be used. 28 | 29 | ** TODO Making a pull request 30 | -------------------------------------------------------------------------------- /tests/test_roundtrip.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example snippets which should roundtrip and produce 3 | an identical result. 4 | """ 5 | 6 | from orgmunge import Org 7 | import pytest 8 | 9 | 10 | @pytest.fixture 11 | def todo_and_done_states(): 12 | return { 13 | "todo_states": {"todo": "TODO"}, 14 | "done_states": {"done": "DONE"}, 15 | } 16 | 17 | 18 | EXAMPLES = ( 19 | """\ 20 | * TODO Calculation 21 | ** Input 22 | 3+4+5+6 23 | ** Evaluation 24 | """, 25 | """\ 26 | * Parse{}weird characters 27 | There is a weird {} character {} between 2 of these words. 28 | """.format(chr(160), chr(8239), chr(160)), 29 | """\ 30 | * A normal title 31 | * TODO A title with the -- word TODO in the title which triggers a syntax error 32 | * TODO Another normal title. 33 | """, 34 | """\ 35 | * :atitlewithjustatag: 36 | """ 37 | ) 38 | 39 | 40 | @pytest.mark.parametrize("text", EXAMPLES) 41 | def test_roundtrip(text, todo_and_done_states): 42 | # Shouldn't produce errors when parsed 43 | parsed = Org(text, from_file=False, todos=todo_and_done_states) 44 | 45 | # Shouldn't produce errors when roundtripped 46 | Org(str(parsed), from_file=False, todos=todo_and_done_states) 47 | 48 | # Should produce identical output roundtripped 49 | assert str(parsed) == text 50 | 51 | 52 | def test_roundtrip_after_adding_child(todo_and_done_states): 53 | parent_note = Org("* N1\n", from_file=False, todos=todo_and_done_states) 54 | child_note = Org("* N2\n", from_file=False, todos=todo_and_done_states) 55 | parent_note.root.add_child(child_note.root.children[0], new=True) 56 | assert "* N1\n* N2\n" == str(parent_note) 57 | -------------------------------------------------------------------------------- /src/orgmunge.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: orgmunge 3 | Version: 1.0.0 4 | Summary: Use org files in python. 5 | Author-email: D Sharp 6 | License: MIT License 7 | 8 | Copyright (c) 2023 durableOne 9 | 10 | Permission is hereby granted, free of charge, to any person obtaining a copy 11 | of this software and associated documentation files (the "Software"), to deal 12 | in the Software without restriction, including without limitation the rights 13 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 | copies of the Software, and to permit persons to whom the Software is 15 | furnished to do so, subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in all 18 | copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 23 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 26 | SOFTWARE. 27 | 28 | Project-URL: Homepage, https://github.com/durableOne/orgmunge 29 | Keywords: org,emacs,literate programming 30 | Classifier: License :: OSI Approved :: MIT License 31 | Classifier: Programming Language :: Python 32 | Classifier: Programming Language :: Python :: 3 33 | Requires-Python: >=3.9 34 | Description-Content-Type: text/markdown 35 | License-File: LICENSE 36 | Requires-Dist: ply 37 | Provides-Extra: dev 38 | Requires-Dist: black; extra == "dev" 39 | Requires-Dist: build; extra == "dev" 40 | Requires-Dist: twine; extra == "dev" 41 | Requires-Dist: pip-tools; extra == "dev" 42 | Requires-Dist: pytest; extra == "dev" 43 | Requires-Dist: bumpver; extra == "dev" 44 | -------------------------------------------------------------------------------- /tests/test_alternate_todos.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | from orgmunge.classes import Headline, Cookie 3 | 4 | def test_alternate_todos(): 5 | parsed = Org('* NEXT Some task [1/2]\n', from_file=False, 6 | todos = {'todo_states': {'todo': 'TODO', 7 | 'next': 'NEXT'}, 8 | 'done_states': {'done': 'DONE'}}) 9 | assert parsed.root.children[0].headline == Headline(parsed.todos, 10 | level='* ', 11 | comment=False, 12 | todo='NEXT', 13 | priority=None, 14 | title='Some task', 15 | cookie='[1/2]', 16 | tags=None) 17 | 18 | def test_todos_from_file(): 19 | parsed = Org('''#+TODO: FOO | BAR 20 | * FOO Some heading 21 | * BAR Some other heading 22 | ''', from_file=False) 23 | assert parsed.root.children[0].headline == Headline(parsed.todos, 24 | level='* ', 25 | comment=False, 26 | todo='FOO', 27 | priority=None, 28 | title='Some heading', 29 | cookie=None, 30 | tags=None,) 31 | assert parsed.root.children[1].headline == Headline(parsed.todos, 32 | level='* ', 33 | comment=False, 34 | todo='BAR', 35 | priority=None, 36 | title='Some other heading', 37 | cookie=None, 38 | tags=None,) 39 | assert parsed.root.children[0].done == False 40 | assert parsed.root.children[1].done == True 41 | -------------------------------------------------------------------------------- /tests/test_parser.py: -------------------------------------------------------------------------------- 1 | from orgmunge import Org 2 | from orgmunge.classes import * 3 | from itertools import product 4 | 5 | todos = Org('', from_file=False).todos 6 | all_todo_keywords = {**todos['todo_states'], **todos['done_states']} 7 | TODOS = [f'{todo}' for todo in all_todo_keywords.values()] + [''] 8 | def test_parsing_headlines(): 9 | def ctor_arg(arg): 10 | return arg if arg != '' else None 11 | comments = ['COMMENT ', ''] 12 | priorities = ['[#A] ', '[#B] ', '[#C] ', ''] 13 | titles = ['My heading'] 14 | cookies = ['[1/2]', '[50%]', ''] 15 | tags = [':tag1:', ':tag1:tag2:', ''] 16 | for comment, todo, priority, title, cookie, tag in product(comments, TODOS, priorities, titles, cookies, tags): 17 | if cookie: 18 | cookie = ' ' + cookie 19 | headline_string = f'* {comment}{todo}{" " if todo != "" else ""}{priority}{title}{cookie}{10 * " " if tag != "" else ""}{tag}\n' 20 | parsed = Org(headline_string, from_file=False, todos=todos, debug=True) 21 | parsed_headline = parsed.root.children[0].headline 22 | assert parsed_headline == Headline(todos=todos, 23 | level='* ', 24 | comment=(len(comment) > 0), 25 | todo=ctor_arg(todo), 26 | priority=ctor_arg(priority), 27 | title=title, 28 | cookie=ctor_arg(cookie), 29 | tags=[t for t in tag.split(':') if t != '']) 30 | 31 | def test_parsing_headings(): 32 | heading = f'''* This is a sample heading 33 | SCHEDULED: <2023-07-23 Sun 14:00> 34 | :PROPERTIES: 35 | :ID: my_sample_heading 36 | :END: 37 | Let's put some crazy content in here. For instance, all the todo keywords: {' '.join(TODOS)}. 38 | #+something: that looks like metadata. And some priority [#A], and cookies [5%] [1/2] as well as :tags: 39 | Now what about some formatting: *bold* /italic/ _underlined_ and =code=. 40 | ** COMMENT Child heading 41 | :LOGBOOK: 42 | CLOCK: [2023-07-23 Sun 10:00]--[2023-07-23 Sun 12:00] => 2:00 43 | :END: 44 | ''' 45 | parsed = Org(heading, from_file=False) 46 | assert parsed 47 | main_heading = parsed.root.children[0] 48 | assert main_heading.children[0].headline.comment 49 | assert main_heading.clocking(include_children=True) == [Clocking('2023-07-23 Sun 10:00', '2023-07-23 Sun 12:00')] 50 | assert main_heading.properties['ID'] == 'my_sample_heading' 51 | assert main_heading.done == False 52 | assert main_heading.todo is None 53 | assert main_heading.title == main_heading.headline.title 54 | 55 | def test_preface_only(): 56 | input_string = 'Just a preface\n\n' 57 | parsed = Org(input_string, from_file=False, todos=todos) 58 | assert parsed.initial_body.strip() == input_string.strip() 59 | -------------------------------------------------------------------------------- /src/orgmunge/lexer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import ply.lex as lex 4 | import re 5 | 6 | class Lexer: 7 | tokens = ('ATIMESTAMP', 8 | 'ITIMESTAMP', 9 | 'DRAWER', 10 | 'SCHEDULING', 11 | 'COOKIE', 12 | 'PRIORITY', 13 | 'TODO', 14 | 'STARS', 15 | 'COMMENT', 16 | 'SPACE', 17 | 'NEWLINE', 18 | 'TAGS', 19 | 'TEXT', 20 | 'SEPARATOR', 21 | 'METADATA',) 22 | DATE = r'[1-9][0-9]{3}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[12][0-9]|3[01])' 23 | hours_regex = r'(?:0[0-9]|1[0-9]|2[0-3])' 24 | minutes_regex = r'[0-5][0-9]' 25 | TIME = fr'{hours_regex}:{minutes_regex}' 26 | REPEATER = r'[.+]?\+[0-9]+[hdwmy]' 27 | DEADWARN = r'-[0-9]+[hdwmy]' 28 | DAYOFWEEK = r'(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)' 29 | TIMESTAMP = fr'({DATE})\s({DAYOFWEEK})(\s{TIME})?(-{TIME})?(\s{REPEATER})?(\s{DEADWARN})?' 30 | ATIMESTAMP = fr'<{TIMESTAMP}>' 31 | ITIMESTAMP = fr'\[{TIMESTAMP}\]' 32 | 33 | def __init__(self, todos): 34 | 35 | def t_error(t): 36 | print(f"Illegal character encountered: {t.value[0]}") 37 | raise ValueError("Lexer error!") 38 | 39 | self.todos = todos 40 | all_todo_keywords = {**todos['todo_states'], **todos['done_states']} 41 | TODO = fr'(?:{"|".join(list(all_todo_keywords.values()))})' 42 | 43 | def t_METADATA(t): 44 | r'(?:^\#\+[^:\n]+:[^\n]*\n)*^\#\+[^:\n]+:[^\n]*' 45 | return t 46 | 47 | @lex.TOKEN(self.ATIMESTAMP) 48 | def t_ATIMESTAMP(t): 49 | return t 50 | 51 | @lex.TOKEN(self.ITIMESTAMP) 52 | def t_ITIMESTAMP(t): 53 | return t 54 | 55 | def t_DRAWER(t): 56 | r'^\s*:[^:]+:.+?:(?:end|END):' 57 | return t 58 | 59 | def t_SCHEDULING(t): 60 | r'(?:CLOSED|SCHEDULED|DEADLINE):' 61 | return t 62 | 63 | def t_COOKIE(t): 64 | r'\[(?:[0-9]*/[0-9]*|[0-9]*%)\]' 65 | return t 66 | 67 | def t_PRIORITY(t): 68 | r'\[\#(?:A|B|C)\]' 69 | return t 70 | 71 | @lex.TOKEN(TODO) 72 | def t_TODO(t): 73 | return t 74 | 75 | def t_STARS(t): 76 | r'^\*+(?=\s)' 77 | return t 78 | 79 | def t_COMMENT(t): 80 | r'COMMENT' 81 | return t 82 | 83 | # Needed to distinguish a regular newline from one that starts a new heading or ends the file 84 | def t_SEPARATOR(t): 85 | r'(?:\r?\n)+(?=\*+\s|\Z)' 86 | return t 87 | 88 | def t_NEWLINE(t): 89 | r'\r?\n+' 90 | return t 91 | 92 | def t_TAGS(t): 93 | r'(?::\S+)+:' 94 | return t 95 | 96 | def t_SPACE(t): 97 | r'[^\S\r\n]+' 98 | return t 99 | 100 | def t_TEXT(t): 101 | r'\S+' 102 | return t 103 | 104 | token_funcs = [func for func in locals() if func.startswith('t_')] 105 | for func in token_funcs: 106 | setattr(self, func, locals()[func]) 107 | 108 | self.lexer = lex.lex(module=self, reflags=re.DOTALL|re.MULTILINE) 109 | 110 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | <<<<<<< HEAD 2 | __pycache__ 3 | *.pyc 4 | ======= 5 | # Taken from Github's defaults: https://github.com/github/gitignore/blob/main/Python.gitignore 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | cover/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | db.sqlite3 67 | db.sqlite3-journal 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | .pybuilder/ 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | # For a library or package, you might want to ignore these files since the code is 92 | # intended to run in multiple environments; otherwise, check them in: 93 | # .python-version 94 | 95 | # pipenv 96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 99 | # install all needed dependencies. 100 | #Pipfile.lock 101 | 102 | # poetry 103 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 104 | # This is especially recommended for binary packages to ensure reproducibility, and is more 105 | # commonly ignored for libraries. 106 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 107 | #poetry.lock 108 | 109 | # pdm 110 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 111 | #pdm.lock 112 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 113 | # in version control. 114 | # https://pdm.fming.dev/#use-with-ide 115 | .pdm.toml 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # mypy 147 | .mypy_cache/ 148 | .dmypy.json 149 | dmypy.json 150 | 151 | # Pyre type checker 152 | .pyre/ 153 | 154 | # pytype static type analyzer 155 | .pytype/ 156 | 157 | # Cython debug symbols 158 | cython_debug/ 159 | 160 | # PyCharm 161 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 162 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 163 | # and can be added to the global gitignore or merged into this file. For a more nuclear 164 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 165 | #.idea/ 166 | 167 | # MacOS 168 | .DS_Store/ 169 | 170 | # Emacs 171 | *~ 172 | \#*\# 173 | .\#* 174 | >>>>>>> c2d7d7a31e5c00b998101bfb82b4a116e3af5c60 175 | -------------------------------------------------------------------------------- /src/orgmunge/parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from .classes import * 4 | from functools import reduce 5 | from operator import add 6 | import ply.yacc as yacc 7 | 8 | class Parser: 9 | def __init__(self, lexer): 10 | self.tokens = lexer.tokens 11 | self.lexer = lexer 12 | self.parser = yacc.yacc(module=self) 13 | def p_org_file(self, p): 14 | '''org_file : metadata org_tree 15 | | initial_body_text SEPARATOR org_tree 16 | | initial_body_text SEPARATOR 17 | | metadata initial_body_text SEPARATOR org_tree 18 | | metadata initial_body_text SEPARATOR 19 | | metadata 20 | | org_tree 21 | | SEPARATOR 22 | | empty''' 23 | if len(p) == 5: 24 | p[0] = (p[1], p[2], p[4]) 25 | elif len(p) == 4: 26 | if type(p[3]) is not str: 27 | p[0] = ('', p[1], p[3]) 28 | else: 29 | p[0] = (p[1], p[2], None) 30 | elif len(p) == 3: 31 | if p[1].startswith('#+') or p[1].startswith(':'): 32 | p[0] = (p[1], '', p[2]) 33 | else: 34 | p[0] = ('', p[1], None) 35 | else: 36 | if type(p[1]) is str: 37 | p[0] = (p[1], '', None) 38 | else: 39 | p[0] = ('', '', p[1]) 40 | 41 | def p_metadata(self, p): 42 | '''metadata : METADATA SEPARATOR 43 | | METADATA NEWLINE 44 | | DRAWER NEWLINE metadata''' 45 | p[0] = reduce(add, [str(x) for x in p[1:]]) 46 | 47 | def p_org_tree(self, p): 48 | '''org_tree : heading 49 | | heading SEPARATOR 50 | | org_tree heading SEPARATOR 51 | | org_tree heading''' 52 | if len(p) == 2: 53 | p[0] = [p[1]] 54 | elif len(p) == 3: 55 | if isinstance(p[1], Heading): 56 | p[0] = [p[1]] 57 | else: 58 | p[0] = p[1] + [p[2]] 59 | elif len(p) == 4: 60 | p[0] = p[1] + [p[2]] 61 | 62 | def p_heading(self, p): 63 | '''heading : headline NEWLINE contents 64 | | headline SEPARATOR''' 65 | if len(p) == 4: 66 | p[0] = Heading(p[1], p[3]) 67 | else: 68 | p[0] = Heading(p[1], contents=(None, None, None)) 69 | 70 | def p_headline(self, p): 71 | '''headline : STARS SPACE comment todo priority title cookie tags''' 72 | p[0] = Headline(level=p[1], comment=bool(p[3]), todo=p[4], priority=p[5], title=p[6], cookie=p[7], tags=p[8], 73 | todos=self.lexer.todos) 74 | 75 | def p_comment(self, p): 76 | '''comment : COMMENT SPACE 77 | | empty''' 78 | p[0] = p[1] if len(p) > 2 else None 79 | 80 | def p_todo(self, p): 81 | '''todo : TODO SPACE 82 | | empty''' 83 | p[0] = p[1] if len(p) > 2 else None 84 | 85 | def p_priority(self, p): 86 | '''priority : PRIORITY SPACE 87 | | empty''' 88 | p[0] = p[1] if len(p) > 2 else None 89 | 90 | def p_title(self, p): 91 | """title : TEXT 92 | | TODO 93 | | any_timestamp 94 | | title TODO 95 | | title TEXT 96 | | title SPACE TEXT 97 | | title SPACE TODO 98 | | title SPACE 99 | | empty""" 100 | none_to_empty = [str(x) if x else "" for x in p[1:]] 101 | p[0] = reduce(add, none_to_empty, "").strip() 102 | 103 | def p_cookie(self, p): 104 | '''cookie : COOKIE SPACE 105 | | COOKIE 106 | | empty''' 107 | p[0] = p[1] if len(p) > 1 else None 108 | 109 | def p_tags(self, p): 110 | '''tags : TAGS 111 | | empty''' 112 | if p[1] is not None: 113 | p[0] = [x for x in p[1].split(':') if x != ''] 114 | else: 115 | p[0] = None 116 | 117 | def p_contents(self, p): 118 | '''contents : scheduling drawers body''' 119 | p[0] = (p[1], p[2], p[3]) 120 | 121 | def p_scheduling_data(self, p): 122 | '''scheduling_data : SCHEDULING SPACE any_timestamp NEWLINE 123 | | SCHEDULING SPACE any_timestamp SEPARATOR 124 | | SCHEDULING SPACE any_timestamp SPACE 125 | | scheduling_data SCHEDULING SPACE any_timestamp NEWLINE 126 | | scheduling_data SCHEDULING SPACE any_timestamp SEPARATOR 127 | | scheduling_data SCHEDULING SPACE any_timestamp SPACE''' 128 | if len(p) > 5: 129 | p[0] = reduce(add, [p[1], Scheduling(p[2], timestamp=p[4])]) 130 | elif len(p) > 2: 131 | p[0] = Scheduling(p[1], timestamp=p[3]) 132 | else: 133 | p[0] = None 134 | 135 | def p_scheduling(self, p): 136 | '''scheduling : scheduling_data 137 | | empty''' 138 | p[0] = p[1] 139 | 140 | def p_any_timestamp(self, p): 141 | '''any_timestamp : ATIMESTAMP 142 | | ITIMESTAMP''' 143 | p[0] = TimeStamp(p[1]) 144 | 145 | def p_drawer_data(self, p): 146 | '''drawer_data : DRAWER NEWLINE 147 | | DRAWER SEPARATOR 148 | | drawer_data DRAWER NEWLINE 149 | | drawer_data DRAWER SEPARATOR''' 150 | if len(p) == 4: 151 | p[0] = ([d for d in p[1] if d is not None]) + ([Drawer(p[2])]) 152 | elif len(p) == 3: 153 | if type(p[1]) is list: 154 | p[0] = ([d for d in p[1] if d is not None]) + ([Drawer(p[2])]) 155 | else: 156 | p[0] = ([Drawer(p[1])]) 157 | else: 158 | if type(p[1]) is str: 159 | p[0] = ([Drawer(p[1])]) 160 | else: 161 | p[0] = ([]) 162 | 163 | def p_drawers(self, p): 164 | '''drawers : drawer_data 165 | | empty''' 166 | p[0] = p[1] 167 | 168 | def p_body(self, p): 169 | '''body : body_text 170 | | empty''' 171 | p[0] = p[1] 172 | 173 | def p_initial_body_text(self, p): 174 | '''initial_body_text : TEXT 175 | | SPACE 176 | | any_timestamp 177 | | METADATA 178 | | initial_body_text TEXT 179 | | initial_body_text SPACE 180 | | initial_body_text METADATA 181 | | initial_body_text special_token 182 | | initial_body_text NEWLINE''' 183 | p[0] = reduce(add, map(str, p[1:])) 184 | 185 | def p_special_token(self, p): 186 | '''special_token : SCHEDULING 187 | | COOKIE 188 | | PRIORITY 189 | | TODO 190 | | any_timestamp 191 | | DRAWER 192 | | COMMENT 193 | | TAGS''' 194 | p[0] = p[1] 195 | 196 | def p_body_text(self, p): 197 | '''body_text : TEXT 198 | | SPACE 199 | | METADATA 200 | | special_token 201 | | body_text TEXT 202 | | body_text SPACE 203 | | body_text special_token 204 | | body_text METADATA 205 | | body_text NEWLINE''' 206 | p[0] = reduce(add, map(str, p[1:])) 207 | 208 | def p_empty(self, p): 209 | 'empty :' 210 | pass 211 | 212 | def p_error(self, p): 213 | if p is not None: 214 | print(f'Syntax error: {p}') 215 | raise ValueError("Parser error!") 216 | -------------------------------------------------------------------------------- /src/orgmunge/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import re 4 | import json 5 | import os 6 | import platform 7 | from itertools import takewhile 8 | from .parser import Parser 9 | from .lexer import Lexer 10 | from .classes import * 11 | from typing import List, Dict, Optional, Generator, Callable, Iterable 12 | 13 | class Org: 14 | def _parse_todos(self, inp: str) -> Optional[Dict[str, Dict[str, str]]]: 15 | payload = [l.strip() for l in inp.split('\n') 16 | if l.startswith('#+TODO:') \ 17 | or l.startswith('#+SEQ_TODO:')\ 18 | or l.startswith('#+TYP_TODO:')] 19 | if not payload: 20 | return None 21 | todo_states = [] 22 | done_states = [] 23 | bindkey_pattern = re.compile(r'\([^)]+\)') 24 | for line in payload: 25 | if re.search(r'\|', line): 26 | todo_, done_ = line.split('|') 27 | todo = todo_.split() if todo_ else [] 28 | done = done_.split() if done_ else [] 29 | else: 30 | line_lst = line.split() 31 | todo, done = line_lst[:-1], [line_lst[-1]] 32 | todo_states.extend([re.sub(bindkey_pattern, '', x) for x in todo]) 33 | done_states.extend([re.sub(bindkey_pattern, '', x) for x in done]) 34 | return {'todo_states': {t: t for t in todo_states}, 35 | 'done_states': {t: t for t in done_states},} 36 | 37 | @classmethod 38 | def get_todos(cls) -> Dict[str, Dict[str, str]]: 39 | 40 | # First try the current directory, then the user's home directory 41 | # to find the todos.json file. 42 | base_file_name = 'todos.json' 43 | current_dir_file = os.path.join(os.getcwd(), base_file_name) 44 | home_dir = os.environ['HOMEPATH'] if platform.system() == 'Windows' else os.environ['HOME'] 45 | home_dir_file = os.path.join(home_dir, base_file_name) 46 | if os.path.isfile(current_dir_file): 47 | input_file_name = current_dir_file 48 | else: 49 | input_file_name = home_dir_file 50 | if os.path.isfile(input_file_name): 51 | with open(input_file_name, 'rb') as JSON: 52 | return json.load(JSON) 53 | else: 54 | return {'todo_states': 55 | {'todo': 'TODO', 56 | 'next': 'NEXT', 57 | 'block': 'BLOCK', 58 | 'wait': 'WAIT',}, 59 | 'done_states': 60 | {'cncl': 'CNCL', 61 | 'done': 'DONE',}} 62 | def __init__(self, input_string: str, from_file: bool = True, debug: bool = False, 63 | todos: Optional[Dict[str, Dict[str, str]]] = None,): 64 | if from_file: 65 | with open(input_string, 'rb') as IN: 66 | string = IN.read().decode('utf-8') 67 | else: 68 | string = input_string 69 | input_todos = self._parse_todos(string) 70 | if input_todos: 71 | self.todos = input_todos 72 | elif todos: 73 | self.todos = todos 74 | else: 75 | self.todos = self.get_todos() 76 | lexer = Lexer(self.todos) 77 | parser = Parser(lexer) 78 | metadata, initial_body, headings = parser.parser.parse(string, debug=debug) 79 | self.top_drawer, self.metadata = self._read_metadata(metadata) 80 | if self.top_drawer.name == 'PROPERTIES': 81 | self.properties = Heading._get_properties_dict(self.top_drawer.contents) 82 | else: 83 | self.properties = dict() 84 | self.initial_body = initial_body 85 | self.root = self._classify_headings(headings) 86 | 87 | def _classify_headings(self, lst: Optional[List[Heading]]) -> Heading: 88 | """Takes a list of headings and classifies them according to their 89 | parent and sibling relationships. It creates an empty top-level heading 90 | named ROOT and returns it, with all the other level-1 headings in the 91 | tree as its children. 92 | """ 93 | ROOT = Heading(Headline(self.todos, ' ', title='ROOT'), (None, None, None)) 94 | if lst is not None: 95 | ROOT.add_child(lst[0], new=True) 96 | if lst[0].level !=1: 97 | raise ValueError("Org tree can't start with a heading of level > 1") 98 | if len(lst) > 1: 99 | for elem1, elem2 in zip(lst[:-1], lst[1:]): 100 | if elem2.level == 1: 101 | if ROOT.children: 102 | elem2.sibling = ROOT.children[-1] 103 | ROOT.add_child(elem2, new=True) 104 | elif elem2.level > elem1.level: 105 | if elem1.children: 106 | elem2.sibling = elem1.children[-1] 107 | elem1.add_child(elem2, new=True) 108 | elif elem2.level < elem1.level: 109 | levels_to_climb = elem1.level - elem2.level 110 | sibling = elem1.parent 111 | for _ in range(levels_to_climb-1): 112 | sibling = sibling.parent 113 | elem2.sibling = sibling 114 | elem2.sibling.parent.add_child(elem2, new=True) 115 | else: 116 | elem2.sibling = elem1 117 | elem1.parent.add_child(elem2, new=True) 118 | return ROOT 119 | 120 | def _read_metadata(self, metadata: str) -> Tuple[Drawer, Dict[str, List[str]]]: 121 | """Reads the metadata string into a dictionary mapping 122 | each metadata keyword to a list of values assigned to it. 123 | This allows for cumulative metadata assignments (e.g. multiple 124 | #+options lines). The metadata keywords are all converted to 125 | lower case and that's how they will be written out to file. 126 | """ 127 | metadata_lines = [l for l in metadata.split('\n') if l != ''] 128 | if metadata_lines and ':PROPERTIES:' in metadata_lines[0]: 129 | drawer_lines = list(takewhile(lambda l: not re.search(r'^:END:', l), 130 | metadata_lines)) + [':END:'] 131 | metadata_lines = metadata_lines[len(drawer_lines):] 132 | drawer = Drawer('\n'.join(drawer_lines)) 133 | else: 134 | drawer = Drawer('') 135 | result = dict() 136 | for line in metadata_lines: 137 | keyword, value = re.search(r'^\#\+([^:]+):\s*(.*)', line).groups() 138 | if keyword.lower() in result: 139 | result[keyword.lower()].append(value) 140 | else: 141 | result[keyword.lower()] = [value] 142 | return (drawer, result) 143 | 144 | def _metadata_values_to_string(self, keyword: str) -> str: 145 | return "\n".join([f"#+{keyword}: {v}" for v in self.metadata[keyword]]) + '\n' 146 | 147 | def write(self, out_file: str): 148 | "Writes out the org tree into a file." 149 | with open(out_file, 'w') as OUT: 150 | OUT.write(str(self)) 151 | 152 | def get_all_headings(self) -> Generator[Heading, None, None]: 153 | """Generator function to recursively return all headings in the Org tree. 154 | The headings are returned in the order they're encountered in the file 155 | (so the tree is searched depth-first).""" 156 | def _helper(tree: List[Heading]) -> Generator[Heading, None, None]: 157 | for heading in tree: 158 | yield heading 159 | yield from _helper(heading.children) 160 | yield from _helper(self.root.children) 161 | 162 | def filter_headings(self, func: Callable[..., bool]) -> Generator[Heading, None, None]: 163 | """Takes a predicate function and returns all headings in the tree 164 | that return True when passed through that function.""" 165 | return (heading for heading in self.get_all_headings() if func(heading)) 166 | 167 | def get_headings_by_title(self, search_string: str, exact: bool = False, 168 | re_flags: int = 0) -> Generator[Heading, None, None]: 169 | """Return a heading whose headline matches the given string. 170 | If exact is True, get the heading whose headline is exactly 171 | the given string. If not, the given string is interpreted 172 | as a regex (so any special characters must be quoted). 173 | Matching is only done on headline title, no cookies, 174 | todo keywords or tags are considered.""" 175 | if exact: 176 | condition = lambda h: h.title == search_string 177 | else: 178 | condition = lambda h: bool(re.search(fr'{search_string}', h.title, flags=re_flags)) 179 | return self.filter_headings(condition) 180 | 181 | def get_heading_by_path(self, path: List[str], exact: bool = False, re_flags: int = 0) -> Optional[Heading]: 182 | """Return a heading by its given path. If exact is True, interpret the given path 183 | as strings to match node titles exactly; otherwise, interpret them as regexes. If 184 | no heading matches the given path, return None""" 185 | if exact: 186 | condition = lambda heading: heading.title == path[-1] 187 | else: 188 | condition = lambda heading: bool(re.search(fr'{path[-1]}', heading.title, flags=re_flags)) 189 | if not path: 190 | return self.root 191 | else: 192 | headings_at_this_level = self.filter_headings(lambda h: h.level == len(path) and condition(h)) 193 | candidates = (h for h in headings_at_this_level 194 | if h.parent is self.get_heading_by_path(path[:-1], exact=exact, re_flags=re_flags)) 195 | try: 196 | return next(candidates) 197 | except StopIteration: 198 | return None 199 | 200 | def __repr__(self): 201 | result = '' 202 | if self.top_drawer.name != '': 203 | result += str(self.top_drawer) 204 | for keyword in self.metadata: 205 | result += self._metadata_values_to_string(keyword) 206 | if self.metadata: 207 | result += '\n' 208 | if self.initial_body: 209 | result += self.initial_body + '\n' 210 | result += ''.join([c.__str__() for c in self.root.children]) 211 | if result[-1] != '\n': 212 | result += '\n' 213 | return result 214 | 215 | def __eq__(self, other): 216 | if not isinstance(other, self.__class__): 217 | return False 218 | else: 219 | return str(self) == str(other) 220 | 221 | 222 | -------------------------------------------------------------------------------- /src/orgmunge/parsetab.py: -------------------------------------------------------------------------------- 1 | 2 | # parsetab.py 3 | # This file is automatically generated. Do not edit. 4 | # pylint: disable=W,C,R 5 | _tabversion = '3.10' 6 | 7 | _lr_method = 'LALR' 8 | 9 | _lr_signature = 'ATIMESTAMP COMMENT COOKIE DRAWER ITIMESTAMP METADATA NEWLINE PRIORITY SCHEDULING SEPARATOR SPACE STARS TAGS TEXT TODOorg_file : metadata org_tree\n| initial_body_text SEPARATOR org_tree\n| initial_body_text SEPARATOR\n| metadata initial_body_text SEPARATOR org_tree\n| metadata initial_body_text SEPARATOR \n| metadata\n| org_tree\n| SEPARATOR\n| emptymetadata : METADATA SEPARATOR\n| METADATA NEWLINE\n| DRAWER NEWLINE metadataorg_tree : heading\n| heading SEPARATOR\n| org_tree heading SEPARATOR\n| org_tree headingheading : headline NEWLINE contents\n| headline SEPARATORheadline : STARS SPACE comment todo priority title cookie tagscomment : COMMENT SPACE\n| emptytodo : TODO SPACE\n| emptypriority : PRIORITY SPACE\n| emptytitle : TEXT \n| TODO\n| any_timestamp\n| title TODO\n| title TEXT\n| title SPACE TEXT\n| title SPACE TODO\n| title SPACE\n| emptycookie : COOKIE SPACE\n| COOKIE\n| emptytags : TAGS\n| emptycontents : scheduling drawers bodyscheduling_data : SCHEDULING SPACE any_timestamp NEWLINE\n| SCHEDULING SPACE any_timestamp SEPARATOR\n| SCHEDULING SPACE any_timestamp SPACE\n| scheduling_data SCHEDULING SPACE any_timestamp NEWLINE\n| scheduling_data SCHEDULING SPACE any_timestamp SEPARATOR\n| scheduling_data SCHEDULING SPACE any_timestamp SPACEscheduling : scheduling_data\n| emptyany_timestamp : ATIMESTAMP\n| ITIMESTAMPdrawer_data : DRAWER NEWLINE\n| DRAWER SEPARATOR\n| drawer_data DRAWER NEWLINE\n| drawer_data DRAWER SEPARATORdrawers : drawer_data\n| emptybody : body_text\n| emptyinitial_body_text : TEXT\n| SPACE\n| any_timestamp\n| METADATA\n| initial_body_text TEXT\n| initial_body_text SPACE\n| initial_body_text METADATA\n| initial_body_text special_token\n| initial_body_text NEWLINEspecial_token : SCHEDULING\n| COOKIE\n| PRIORITY\n| TODO\n| any_timestamp\n| DRAWER\n| COMMENT\n| TAGSbody_text : TEXT\n| SPACE\n| METADATA\n| special_token\n| body_text TEXT\n| body_text SPACE\n| body_text special_token\n| body_text METADATA\n| body_text NEWLINEempty :' 10 | 11 | _lr_action_items = {'SEPARATOR':([0,4,7,9,10,11,12,13,14,15,18,19,20,22,23,24,25,26,27,28,29,30,31,32,33,34,39,40,41,46,47,48,49,50,52,54,56,57,58,59,62,64,65,66,67,68,69,70,71,72,73,74,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,],[5,21,35,-59,-60,-61,38,-49,-50,40,42,-62,43,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-85,-18,-85,35,-17,-85,-47,-48,-85,-21,-85,-55,-56,75,-85,-23,-20,-40,-57,-58,-76,-77,-78,-79,88,-51,-52,92,-85,-25,-22,-80,-81,-82,-83,-84,-53,-54,101,-43,-41,-42,-85,-26,-27,-28,-34,-24,-46,-44,-45,-33,-85,-29,-30,-36,-37,-31,-32,-19,-38,-39,-35,]),'METADATA':([0,2,4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,39,45,48,49,50,56,57,58,67,69,70,71,72,74,75,82,83,84,85,86,87,88,90,91,92,99,100,101,],[7,19,24,-62,-59,-60,-61,-49,-50,24,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-10,-11,46,-85,-12,-85,-47,-48,71,-55,-56,85,-76,-77,-78,-79,-51,-52,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-46,-44,-45,]),'DRAWER':([0,4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,37,39,48,49,50,56,57,58,67,69,70,71,72,74,75,82,83,84,85,86,87,88,90,91,92,99,100,101,],[8,32,-62,-59,-60,-61,-49,-50,32,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,8,-85,59,-47,-48,32,73,-56,32,-76,-77,-78,-79,-51,-52,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-46,-44,-45,]),'TEXT':([0,2,4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,39,41,45,48,49,50,52,54,56,57,58,62,64,65,67,69,70,71,72,74,75,78,80,81,82,83,84,85,86,87,88,90,91,92,93,94,95,96,97,98,99,100,101,102,104,105,108,109,],[9,9,22,-62,-59,-60,-61,-49,-50,22,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-10,-11,-85,-85,-12,-85,-47,-48,-85,-21,69,-55,-56,-85,-23,-20,82,-76,-77,-78,-79,-51,-52,94,-25,-22,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,105,-26,-27,-28,-34,-24,-46,-44,-45,108,-29,-30,-31,-32,]),'SPACE':([0,2,4,7,9,10,11,13,14,16,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,39,41,45,48,49,50,51,52,53,54,56,57,58,60,62,63,64,65,67,69,70,71,72,74,75,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,104,105,106,108,109,],[10,10,23,-62,-59,-60,-61,-49,-50,41,23,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-10,-11,-85,-85,-12,-85,-47,-48,61,-85,65,-21,70,-55,-56,76,-85,81,-23,-20,83,-76,-77,-78,-79,-51,-52,90,-85,98,-25,-22,-80,-81,-82,-83,-84,-53,-54,99,-43,-41,-42,102,-26,-27,-28,-34,-24,-46,-44,-45,-33,-29,-30,113,-31,-32,]),'$end':([0,1,2,3,5,6,12,13,14,17,20,21,27,28,29,30,31,32,33,34,35,36,38,39,40,42,43,44,45,47,48,49,50,55,56,57,58,66,67,68,69,70,71,72,74,75,82,83,84,85,86,87,88,90,91,92,99,100,101,],[-85,0,-6,-7,-8,-9,-13,-49,-50,-1,-16,-3,-68,-69,-70,-71,-72,-73,-74,-75,-10,-11,-14,-85,-18,-5,-15,-2,-12,-17,-85,-47,-48,-4,-85,-55,-56,-40,-57,-58,-76,-77,-78,-79,-51,-52,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-46,-44,-45,]),'ATIMESTAMP':([0,2,4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,39,41,45,48,49,50,52,54,56,57,58,61,62,64,65,67,69,70,71,72,74,75,76,78,80,81,82,83,84,85,86,87,88,90,91,92,98,99,100,101,],[13,13,13,-62,-59,-60,-61,-49,-50,13,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-10,-11,-85,-85,-12,-85,-47,-48,-85,-21,13,-55,-56,13,-85,-23,-20,13,-76,-77,-78,-79,-51,-52,13,13,-25,-22,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-24,-46,-44,-45,]),'ITIMESTAMP':([0,2,4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,39,41,45,48,49,50,52,54,56,57,58,61,62,64,65,67,69,70,71,72,74,75,76,78,80,81,82,83,84,85,86,87,88,90,91,92,98,99,100,101,],[14,14,14,-62,-59,-60,-61,-49,-50,14,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-10,-11,-85,-85,-12,-85,-47,-48,-85,-21,14,-55,-56,14,-85,-23,-20,14,-76,-77,-78,-79,-51,-52,14,14,-25,-22,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-24,-46,-44,-45,]),'STARS':([0,2,3,12,13,14,17,20,21,27,28,29,30,31,32,33,34,35,36,38,39,40,42,43,44,45,47,48,49,50,55,56,57,58,66,67,68,69,70,71,72,74,75,82,83,84,85,86,87,88,90,91,92,99,100,101,],[16,16,16,-13,-49,-50,16,-16,16,-68,-69,-70,-71,-72,-73,-74,-75,-10,-11,-14,-85,-18,16,-15,16,-12,-17,-85,-47,-48,16,-85,-55,-56,-40,-57,-58,-76,-77,-78,-79,-51,-52,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-46,-44,-45,]),'NEWLINE':([4,7,8,9,10,11,13,14,15,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,41,46,52,54,59,62,64,65,67,69,70,71,72,73,77,78,80,81,82,83,84,85,86,89,93,94,95,96,97,98,102,103,104,105,106,107,108,109,110,111,112,113,],[26,36,37,-59,-60,-61,-49,-50,39,26,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-85,36,-85,-21,74,-85,-23,-20,86,-76,-77,-78,-79,87,91,-85,-25,-22,-80,-81,-82,-83,-84,100,-85,-26,-27,-28,-34,-24,-33,-85,-29,-30,-36,-37,-31,-32,-19,-38,-39,-35,]),'SCHEDULING':([4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,39,48,49,50,56,57,58,67,69,70,71,72,74,75,82,83,84,85,86,87,88,90,91,92,99,100,101,],[27,-62,-59,-60,-61,-49,-50,27,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,51,-85,60,-48,27,-55,-56,27,-76,-77,-78,-79,-51,-52,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-46,-44,-45,]),'COOKIE':([4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,39,41,48,49,50,52,54,56,57,58,62,64,65,67,69,70,71,72,74,75,78,80,81,82,83,84,85,86,87,88,90,91,92,93,94,95,96,97,98,99,100,101,102,104,105,108,109,],[28,-62,-59,-60,-61,-49,-50,28,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-85,-85,-85,-47,-48,-85,-21,28,-55,-56,-85,-23,-20,28,-76,-77,-78,-79,-51,-52,-85,-25,-22,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,106,-26,-27,-28,-34,-24,-46,-44,-45,-33,-29,-30,-31,-32,]),'PRIORITY':([4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,39,41,48,49,50,52,54,56,57,58,62,64,65,67,69,70,71,72,74,75,81,82,83,84,85,86,87,88,90,91,92,99,100,101,],[29,-62,-59,-60,-61,-49,-50,29,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-85,-85,-85,-47,-48,-85,-21,29,-55,-56,79,-23,-20,29,-76,-77,-78,-79,-51,-52,-22,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-46,-44,-45,]),'TODO':([4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,39,41,48,49,50,52,54,56,57,58,62,64,65,67,69,70,71,72,74,75,78,80,81,82,83,84,85,86,87,88,90,91,92,93,94,95,96,97,98,99,100,101,102,104,105,108,109,],[30,-62,-59,-60,-61,-49,-50,30,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-85,-85,-85,-47,-48,63,-21,30,-55,-56,-85,-23,-20,30,-76,-77,-78,-79,-51,-52,95,-25,-22,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,104,-26,-27,-28,-34,-24,-46,-44,-45,109,-29,-30,-31,-32,]),'COMMENT':([4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,39,41,48,49,50,56,57,58,67,69,70,71,72,74,75,82,83,84,85,86,87,88,90,91,92,99,100,101,],[33,-62,-59,-60,-61,-49,-50,33,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-85,53,-85,-47,-48,33,-55,-56,33,-76,-77,-78,-79,-51,-52,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-46,-44,-45,]),'TAGS':([4,7,9,10,11,13,14,18,19,22,23,24,25,26,27,28,29,30,31,32,33,34,39,41,48,49,50,52,54,56,57,58,62,64,65,67,69,70,71,72,74,75,78,80,81,82,83,84,85,86,87,88,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,113,],[34,-62,-59,-60,-61,-49,-50,34,-62,-63,-64,-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-85,-85,-85,-47,-48,-85,-21,34,-55,-56,-85,-23,-20,34,-76,-77,-78,-79,-51,-52,-85,-25,-22,-80,-81,-82,-83,-84,-53,-54,-43,-41,-42,-85,-26,-27,-28,-34,-24,-46,-44,-45,-33,111,-29,-30,-36,-37,-31,-32,-35,]),} 12 | 13 | _lr_action = {} 14 | for _k, _v in _lr_action_items.items(): 15 | for _x,_y in zip(_v[0],_v[1]): 16 | if not _x in _lr_action: _lr_action[_x] = {} 17 | _lr_action[_x][_k] = _y 18 | del _lr_action_items 19 | 20 | _lr_goto_items = {'org_file':([0,],[1,]),'metadata':([0,37,],[2,45,]),'org_tree':([0,2,21,42,],[3,17,44,55,]),'initial_body_text':([0,2,],[4,18,]),'empty':([0,39,41,48,52,56,62,78,93,103,],[6,50,54,58,64,68,80,97,107,112,]),'any_timestamp':([0,2,4,18,56,61,67,76,78,],[11,11,31,31,31,77,31,89,96,]),'heading':([0,2,3,17,21,42,44,55,],[12,12,20,20,12,12,20,20,]),'headline':([0,2,3,17,21,42,44,55,],[15,15,15,15,15,15,15,15,]),'special_token':([4,18,56,67,],[25,25,72,84,]),'contents':([39,],[47,]),'scheduling':([39,],[48,]),'scheduling_data':([39,],[49,]),'comment':([41,],[52,]),'drawers':([48,],[56,]),'drawer_data':([48,],[57,]),'todo':([52,],[62,]),'body':([56,],[66,]),'body_text':([56,],[67,]),'priority':([62,],[78,]),'title':([78,],[93,]),'cookie':([93,],[103,]),'tags':([103,],[110,]),} 21 | 22 | _lr_goto = {} 23 | for _k, _v in _lr_goto_items.items(): 24 | for _x, _y in zip(_v[0], _v[1]): 25 | if not _x in _lr_goto: _lr_goto[_x] = {} 26 | _lr_goto[_x][_k] = _y 27 | del _lr_goto_items 28 | _lr_productions = [ 29 | ("S' -> org_file","S'",1,None,None,None), 30 | ('org_file -> metadata org_tree','org_file',2,'p_org_file','parser.py',14), 31 | ('org_file -> initial_body_text SEPARATOR org_tree','org_file',3,'p_org_file','parser.py',15), 32 | ('org_file -> initial_body_text SEPARATOR','org_file',2,'p_org_file','parser.py',16), 33 | ('org_file -> metadata initial_body_text SEPARATOR org_tree','org_file',4,'p_org_file','parser.py',17), 34 | ('org_file -> metadata initial_body_text SEPARATOR','org_file',3,'p_org_file','parser.py',18), 35 | ('org_file -> metadata','org_file',1,'p_org_file','parser.py',19), 36 | ('org_file -> org_tree','org_file',1,'p_org_file','parser.py',20), 37 | ('org_file -> SEPARATOR','org_file',1,'p_org_file','parser.py',21), 38 | ('org_file -> empty','org_file',1,'p_org_file','parser.py',22), 39 | ('metadata -> METADATA SEPARATOR','metadata',2,'p_metadata','parser.py',42), 40 | ('metadata -> METADATA NEWLINE','metadata',2,'p_metadata','parser.py',43), 41 | ('metadata -> DRAWER NEWLINE metadata','metadata',3,'p_metadata','parser.py',44), 42 | ('org_tree -> heading','org_tree',1,'p_org_tree','parser.py',48), 43 | ('org_tree -> heading SEPARATOR','org_tree',2,'p_org_tree','parser.py',49), 44 | ('org_tree -> org_tree heading SEPARATOR','org_tree',3,'p_org_tree','parser.py',50), 45 | ('org_tree -> org_tree heading','org_tree',2,'p_org_tree','parser.py',51), 46 | ('heading -> headline NEWLINE contents','heading',3,'p_heading','parser.py',63), 47 | ('heading -> headline SEPARATOR','heading',2,'p_heading','parser.py',64), 48 | ('headline -> STARS SPACE comment todo priority title cookie tags','headline',8,'p_headline','parser.py',71), 49 | ('comment -> COMMENT SPACE','comment',2,'p_comment','parser.py',76), 50 | ('comment -> empty','comment',1,'p_comment','parser.py',77), 51 | ('todo -> TODO SPACE','todo',2,'p_todo','parser.py',81), 52 | ('todo -> empty','todo',1,'p_todo','parser.py',82), 53 | ('priority -> PRIORITY SPACE','priority',2,'p_priority','parser.py',86), 54 | ('priority -> empty','priority',1,'p_priority','parser.py',87), 55 | ('title -> TEXT','title',1,'p_title','parser.py',91), 56 | ('title -> TODO','title',1,'p_title','parser.py',92), 57 | ('title -> any_timestamp','title',1,'p_title','parser.py',93), 58 | ('title -> title TODO','title',2,'p_title','parser.py',94), 59 | ('title -> title TEXT','title',2,'p_title','parser.py',95), 60 | ('title -> title SPACE TEXT','title',3,'p_title','parser.py',96), 61 | ('title -> title SPACE TODO','title',3,'p_title','parser.py',97), 62 | ('title -> title SPACE','title',2,'p_title','parser.py',98), 63 | ('title -> empty','title',1,'p_title','parser.py',99), 64 | ('cookie -> COOKIE SPACE','cookie',2,'p_cookie','parser.py',104), 65 | ('cookie -> COOKIE','cookie',1,'p_cookie','parser.py',105), 66 | ('cookie -> empty','cookie',1,'p_cookie','parser.py',106), 67 | ('tags -> TAGS','tags',1,'p_tags','parser.py',110), 68 | ('tags -> empty','tags',1,'p_tags','parser.py',111), 69 | ('contents -> scheduling drawers body','contents',3,'p_contents','parser.py',118), 70 | ('scheduling_data -> SCHEDULING SPACE any_timestamp NEWLINE','scheduling_data',4,'p_scheduling_data','parser.py',122), 71 | ('scheduling_data -> SCHEDULING SPACE any_timestamp SEPARATOR','scheduling_data',4,'p_scheduling_data','parser.py',123), 72 | ('scheduling_data -> SCHEDULING SPACE any_timestamp SPACE','scheduling_data',4,'p_scheduling_data','parser.py',124), 73 | ('scheduling_data -> scheduling_data SCHEDULING SPACE any_timestamp NEWLINE','scheduling_data',5,'p_scheduling_data','parser.py',125), 74 | ('scheduling_data -> scheduling_data SCHEDULING SPACE any_timestamp SEPARATOR','scheduling_data',5,'p_scheduling_data','parser.py',126), 75 | ('scheduling_data -> scheduling_data SCHEDULING SPACE any_timestamp SPACE','scheduling_data',5,'p_scheduling_data','parser.py',127), 76 | ('scheduling -> scheduling_data','scheduling',1,'p_scheduling','parser.py',136), 77 | ('scheduling -> empty','scheduling',1,'p_scheduling','parser.py',137), 78 | ('any_timestamp -> ATIMESTAMP','any_timestamp',1,'p_any_timestamp','parser.py',141), 79 | ('any_timestamp -> ITIMESTAMP','any_timestamp',1,'p_any_timestamp','parser.py',142), 80 | ('drawer_data -> DRAWER NEWLINE','drawer_data',2,'p_drawer_data','parser.py',146), 81 | ('drawer_data -> DRAWER SEPARATOR','drawer_data',2,'p_drawer_data','parser.py',147), 82 | ('drawer_data -> drawer_data DRAWER NEWLINE','drawer_data',3,'p_drawer_data','parser.py',148), 83 | ('drawer_data -> drawer_data DRAWER SEPARATOR','drawer_data',3,'p_drawer_data','parser.py',149), 84 | ('drawers -> drawer_data','drawers',1,'p_drawers','parser.py',164), 85 | ('drawers -> empty','drawers',1,'p_drawers','parser.py',165), 86 | ('body -> body_text','body',1,'p_body','parser.py',169), 87 | ('body -> empty','body',1,'p_body','parser.py',170), 88 | ('initial_body_text -> TEXT','initial_body_text',1,'p_initial_body_text','parser.py',174), 89 | ('initial_body_text -> SPACE','initial_body_text',1,'p_initial_body_text','parser.py',175), 90 | ('initial_body_text -> any_timestamp','initial_body_text',1,'p_initial_body_text','parser.py',176), 91 | ('initial_body_text -> METADATA','initial_body_text',1,'p_initial_body_text','parser.py',177), 92 | ('initial_body_text -> initial_body_text TEXT','initial_body_text',2,'p_initial_body_text','parser.py',178), 93 | ('initial_body_text -> initial_body_text SPACE','initial_body_text',2,'p_initial_body_text','parser.py',179), 94 | ('initial_body_text -> initial_body_text METADATA','initial_body_text',2,'p_initial_body_text','parser.py',180), 95 | ('initial_body_text -> initial_body_text special_token','initial_body_text',2,'p_initial_body_text','parser.py',181), 96 | ('initial_body_text -> initial_body_text NEWLINE','initial_body_text',2,'p_initial_body_text','parser.py',182), 97 | ('special_token -> SCHEDULING','special_token',1,'p_special_token','parser.py',186), 98 | ('special_token -> COOKIE','special_token',1,'p_special_token','parser.py',187), 99 | ('special_token -> PRIORITY','special_token',1,'p_special_token','parser.py',188), 100 | ('special_token -> TODO','special_token',1,'p_special_token','parser.py',189), 101 | ('special_token -> any_timestamp','special_token',1,'p_special_token','parser.py',190), 102 | ('special_token -> DRAWER','special_token',1,'p_special_token','parser.py',191), 103 | ('special_token -> COMMENT','special_token',1,'p_special_token','parser.py',192), 104 | ('special_token -> TAGS','special_token',1,'p_special_token','parser.py',193), 105 | ('body_text -> TEXT','body_text',1,'p_body_text','parser.py',197), 106 | ('body_text -> SPACE','body_text',1,'p_body_text','parser.py',198), 107 | ('body_text -> METADATA','body_text',1,'p_body_text','parser.py',199), 108 | ('body_text -> special_token','body_text',1,'p_body_text','parser.py',200), 109 | ('body_text -> body_text TEXT','body_text',2,'p_body_text','parser.py',201), 110 | ('body_text -> body_text SPACE','body_text',2,'p_body_text','parser.py',202), 111 | ('body_text -> body_text special_token','body_text',2,'p_body_text','parser.py',203), 112 | ('body_text -> body_text METADATA','body_text',2,'p_body_text','parser.py',204), 113 | ('body_text -> body_text NEWLINE','body_text',2,'p_body_text','parser.py',205), 114 | ('empty -> ','empty',0,'p_empty','parser.py',209), 115 | ] 116 | -------------------------------------------------------------------------------- /README.org: -------------------------------------------------------------------------------- 1 | #+title:Orgmunge 2 | * Motivation and scope 3 | Orgmunge was born out of the desire to modify Org documents 4 | programmatically from within Python. The wonderful [[https://github.com/karlicoss/orgparse][orgparse]] can read 5 | an Org document into a tree object but doesn't offer an interface to 6 | modify the tree and write it back to file. 7 | 8 | The original use case was trying to sync Outlook calendar items with 9 | Org: whenever someone rescheduled a meeting, my Python script was 10 | unable to reschedule the Org heading it had originally 11 | created. Instead of forking =orgparse=, I decided to write an actual 12 | grammar for an Org document and use [[https://github.com/dabeaz/ply][PLY]] to generate a parser for it. 13 | 14 | Now Org syntax is too sophisticated for me to claim that this first 15 | attempt can parse everything. In fact, some folks way smarter than I 16 | am (and with more formal training), have hinted that Org 17 | syntax can't be properly parsed with a context-free grammar. For such 18 | reasons (and for my own lack of experience with writing grammars), I 19 | have restricted the scope of this module to the features I care about: 20 | for each heading, the headline components (the =COMMENT= keyword, the 21 | todo state, priority, cookies, and tags) are all parsed, as well as 22 | any scheduling timestamps and all the drawers. The heading contents 23 | are treated as a blob of text and the only thing the parser extracts 24 | from the contents are the timestamps. No attempts are made at parsing 25 | things like tables or source code blocks further. =orgmunge= can also 26 | parse out the document's metadata and export options but the major 27 | assumption it makes is that the document starts out with some optional 28 | metadata and export options, followed by some optional initial body 29 | text (not falling under any heading), and then a tree of headings. Any 30 | export options or metadata that come later within the document are 31 | treated as text (some heading's content). 32 | * Use Cases 33 | If you have built something on top of =orgmunge=, please open an issue 34 | here and I'm happy to add your project to the use cases. 35 | ** Update an Org file with external data 36 | Arguably, this is the main motivation behind creating =orgmunge=: using 37 | the power of Python to fetch external data and "smartly" update an Org 38 | file with it. I have created three base classes to help with this at 39 | [[https://github.com/durableOne/orgfetcher][orgfetcher]]. The repo also contains an example file that inherits from 40 | the base classes to fetch github issues and track them in an Org file. 41 | ** Redact an Org file 42 | Replace important information in an Org file with random words in 43 | order to share the structure of the file with someone without 44 | compromising your information. See [[https://github.com/durableOne/redactOrg][redactOrg]] 45 | * Installation 46 | ** From PyPi 47 | - =orgmunge= is now on PyPi 48 | - You can install =orgmunge= using =pip=: 49 | #+begin_src shell 50 | python3 -m pip install orgmunge 51 | #+end_src 52 | ** From the repo 53 | - The only dependency of =orgmunge= is =PLY=. So you need =PLY= installed. 54 | - Clone this repo 55 | - Add the directory where you cloned this repo to your =PYTHONPATH= 56 | * Usage 57 | ** Specifying TODO keywords 58 | - The parser needs to know the set of valid keywords before it starts 59 | parsing your input. To do this, it uses the following steps 60 | 1. If your input string/file contains [[https://orgmode.org/manual/Per_002dfile-keywords.html][per-file keywords]], these will 61 | take precedence over anything else 62 | 2. Failing to find any such keywords, it looks to see if you passed 63 | it the keywords using the =todos= argument 64 | 3. If no todo keywords were passed, the parser looks for todo keywords by looking for a file named 65 | =todos.json= in one of 2 places (again in order of preference): 66 | 1) The current directory 67 | 2) The user's home directory 68 | 4. Failing all the above, the keywords are assumed to be defined by: 69 | #+begin_src javascript 70 | { 71 | "todo_states": 72 | { 73 | "todo": "TODO", 74 | "next": "NEXT", 75 | "wait": "WAIT" 76 | }, 77 | "done_states": 78 | { 79 | "cncl": "CNCL", 80 | "done": "DONE" 81 | } 82 | } 83 | #+end_src 84 | - If you choose to supply your own keywords as an argument to the 85 | parser, you must follow the above structure: separate =todo_states= 86 | and =done_states= with pairs of =keyword_nickname: keyword= specifying 87 | each set of states. 88 | ** Reading an Org tree 89 | 90 | - The =Org= class in =__init__.py= is the main entry point to =orgmunge=. 91 | It can be used to read an Org tree either from a string or from a 92 | file: 93 | #+begin_src python 94 | from orgmunge import Org 95 | 96 | org_1 = Org('* TODO Something important\n', from_file=False) # \n needed to signify end of document 97 | org_2 = Org('/path/to/my/file.org') 98 | org_3 = Org('/path/to/my/file.org', debug=True) # Print PLY debugging info 99 | #+end_src 100 | - The =Org= object has 3 main attributes you should care about: 101 | 1. =Org.metadata= stores the metadata and export options found at the 102 | beginning of the file. This is a dict mapping the option/keyword 103 | name to a list of its values (to allow for cumulative keywords 104 | such as =#+OPTION=). Example: 105 | #+begin_src python 106 | org_1 = Org('#+title: Test\n') 107 | assert(org_1.metadata['title'] == ['Test']) 108 | #+end_src 109 | 2. =Org.initial_body= stores any text between the metadata and the 110 | first heading. 111 | 3. =Org.root= stores the root of the Org tree. This is a [[*Heading Objects][heading]] with 112 | the [[*Headline Objects][headline]] =ROOT= whose only useful attribute is =children=, which is a 113 | list of all the [[*Heading Objects][headings]] in the given document. 114 | - The Org tree is a list of [[file:__init__.py::def _classify_headings(self, lst):][headings]] with parent, child and sibling relationships. 115 | 116 | 117 | *** Heading Objects 118 | - A heading object consists of: 119 | 1. A [[*Headline Objects][headline]] 120 | 2. Contents: 121 | 1) [[*Scheduling Objects][Scheduling]], if any 122 | 2) A list of [[*Drawer Objects][Drawers]], if any 123 | 3) Body text, if any 124 | - Important attributes: 125 | 1. =properties=. This is a dict mapping property names to their 126 | values. The properties are parsed from the =PROPERTIES= drawer if 127 | it exists. This attribute can also be set by the user (the value 128 | supplied must be a dict). 129 | 2. =inherited_properties=. Same format as the =properties= dict but 130 | contains only properties inherited from ancestors. 131 | 3. =tags= returns a list of all tags (those explicitly defined for 132 | this heading and those inherited) 133 | 4. =headline= returns the heading's headline. This attribute can also 134 | be set by a user (the value must be a [[*Headline Objects][Headline]] instance). 135 | 5. =scheduling= is a [[*Scheduling Objects][Scheduling]] object containing information about 136 | =SCHEDULED/DEADLINE/CLOSED= timestamps of the heading, if any. Can 137 | also be set by the user (the value must be a Scheduling instance). 138 | 6. =drawers= is a list of [[*Drawer Objects][Drawer]] objects containing the drawers 139 | associated with this heading. When you update the heading's 140 | =properties= attribute, the =PROPERTIES= drawer is updated the next 141 | time you access it. 142 | 7. =children= returns a list of Heading objects that are the direct 143 | children of this heading. 144 | 8. =parent= returns the parent heading of the current one. If the 145 | current heading is a top-level heading, the root heading will be 146 | returned. 147 | 9. =sibling= returns the sibling heading of the current one that comes 148 | before it in the tree, if any. The reason this is the sibling 149 | heading that is formally tracked is because it's the one that 150 | would adopt the current heading whenever the current heading is 151 | demoted. If you want a list of all siblings of the current 152 | heading, you can do this: 153 | #+begin_src python 154 | siblings = [c for c in current_heading.parent.children if c is not current_heading] 155 | #+end_src 156 | 10. =level= is the heading's level, with 1 being the top level and each 157 | sub-level after that being incremented by 1 (the heading's level 158 | is the number of "stars" before its headline). 159 | - Important methods: 160 | 1. =clocking=. This returns a list of [[*Clocking Objects][Clocking]] objects, parsed 161 | from the heading's =LOGBOOK= drawer, if any. You can also pass the 162 | optional boolean parameter =include_children=, which, when True, 163 | includes clocking information of this heading's children as well. 164 | 2. =get_all_properties=. This returns a dict of all properties of the 165 | heading, whether directly defined or inherited from the heading's 166 | ancestors. The latest-defined value of a property wins over. 167 | 3. =add_child= accepts a Heading object to add as a child to the 168 | current heading. The optional boolean parameter =new= should be set 169 | to =True= when this is a new heading that was created and needs to 170 | be assigned a parent. It should be set to =False= (default) when 171 | the addition of a child is due to a promotion/demotion operation. 172 | 4. =remove_child= accepts a heading object and deletes it from the 173 | current heading's children if it's a child of the current 174 | heading. 175 | 5. =promote= promotes the current heading one level. If the heading has 176 | children, they would be orphaned so this raises a 177 | =ValueError=. Technically, Org allows you to have, say, level 3 178 | headings under a level 1 heading, but =orgmunge= does not allow 179 | this to make parsing the tree easier. 180 | 6. =promote_tree= promotes the current heading and all its 181 | descendants. Use this if the heading you want to promote has 182 | children. 183 | 7. =demote= demotes the current heading one level. If the current 184 | heading has no sibling to adopt it, the demotion attempt fails 185 | and raises a =ValueError=. 186 | 8. =demote_tree= is the equivalent of =promote_tree= for demotion. 187 | *** Headline Objects 188 | - Important attributes: 189 | 1. =done= is a boolean attribute that determines whether the headline 190 | is in one of the done states. You can't set this attribute directly. 191 | 2. =level= is the headline's level (the number of "stars" before the 192 | title) 193 | 3. =comment= is a boolean attribute that determines whether a headline 194 | is commented out (by having the keyword =COMMENT= inserted before 195 | the title). 196 | 4. =todo= returns/sets the headline's todo state. You can set it 197 | yourself but it has to be one of the values of =self._todo_states= 198 | or =self._done_states=. 199 | 5. =cookie= returns/sets the headline's cookie. See [[*Cookie Objects][Cookie Objects]]. 200 | 6. =priority= returns/sets the headline's priority 201 | - Important methods: 202 | 1. =promote= decreases the level by the number given by the parameter 203 | =n= (default 1). 204 | 2. =demote= acts like =promote= but increases the level by =n= instead. 205 | 3. =toggle_comment= toggles the state of whether or not a headline is 206 | commented out using the =COMMENT= keyword. 207 | 1. =comment_out= ensures the headline is commented out using 208 | =COMMENT= 209 | 2. =uncomment= ensures the headline is not commented out using the 210 | =COMMENT= keyword. 211 | 3. =raise_priority= increases the headline's priority by 1 212 | 4. =lower_priority= decreases the headline's priority by 1 213 | *** Scheduling Objects 214 | - Has 6 attributes for the 3 possible scheduling keywords (3 are aliases of the other 3): 215 | 1. CLOSED, closed 216 | 2. SCHEDULED, scheduled 217 | 3. DEADLINE, deadline 218 | - Each attribute, when queried will return either =None= or a =TimeStamp= 219 | object representing the timestamp associated with this particular 220 | scheduling keyword. You can set the attributes directly but they 221 | have to be set to a =TimeStamp= object. 222 | *** Drawer Objects 223 | - A =Drawer= object has only 2 attributes: =name= and =contents=. The 224 | =contents= attribute is simply a list of lines making up the drawer 225 | contents. When you modify a heading's =properties= attribute, its 226 | =PROPERTIES= drawer gets updated accordingly. 227 | *** Clocking Objects 228 | - The =Clocking= objects have 3 attributes: =start_time=, =end_time= and 229 | =duration=. Only the first 2 can be set. When setting either, you 230 | should pass a string following the Org time format; namely, 231 | '%Y-%m-%d %a %H:%M' (see the [[https://man7.org/linux/man-pages/man3/strftime.3.html][strftime(3)]] man page for an explanation 232 | of the format codes). 233 | - If =end_time= is =None=, the duration is calculated from the =start_time= 234 | up to the current moment. 235 | *** Priority Objects 236 | - The only attribute, =priority= can be set directly by the user and can 237 | be one of only 3 strings: 'A', 'B' or 'C'. Set to =None= to remove it 238 | from the =Heading=. 239 | - The methods =_raise= and =_lower= will raise or lower the priority. 240 | - If the priority is =None=, raising it, sets it to 'A' and lowering it 241 | sets it to 'C'. 242 | *** TimeStamp Objects 243 | - Important attributes: 244 | 1. =start_time= and =end_time= can be queried and set by the user. You 245 | can set them by supplying a string, a =datetime= object or =None=. 246 | 2. =repeater= returns a timestamp repeater string such as '+1w'. Can 247 | also be set by the user. 248 | 3. =deadline_warn= acts similarly to =repeater= and represents the 249 | number of days before a deadline to warn the user of an upcoming 250 | deadline. 251 | 4. =active= is a boolean property and decides whether the time stamp 252 | will be printed with =[]= or =<>= delimiters. Can be set directly by 253 | the user. 254 | *** Cookie Objects 255 | - =Cookie= objects represent progress on the current =Heading=. 256 | - They can be of type 'percent' (e.g. [50%]) or of type 'progress' (e.g. [2/4]). 257 | 258 | - Important attributes: 259 | 1. =cookie_type=: can only be one of 'percent' or 'progress'. Can be 260 | set directly by the user. 261 | 2. =m= and =n= represent the progress as the ratio =m/n=. If the cookie 262 | type is 'percent', =n= is 100. When changing =cookie_type=, =m= and =n= 263 | are converted accordingly. 264 | ** Modifying an Org tree 265 | - The ability to modify the tree was the main reason I wrote this 266 | package. Most of the attributes of the tree objects can be modified 267 | directly by the user. 268 | - Use the =promote*= and =demote*= methods of the =Heading= objects to 269 | change =Heading= levels. 270 | - To rearrange headings, note that a =Heading's= =children= 271 | attribute is a list whose ordering is important: in other words, the 272 | tree will be written back to a file with the order each =Heading='s 273 | children are in. So the user can rearrange the headings of the same level 274 | by assigning the =children= attribute of their parent to a different 275 | order of child headings. It's up to the user to update the child 276 | headings' =sibling= attributes appropriately. 277 | ** Writing an Org tree 278 | - You can use the =Org= object's =write= method to write out the tree to a 279 | file whose name you supply to the method: 280 | #+begin_src python 281 | from orgmunge import Org 282 | 283 | agenda = Org('/path/to/agenda.org') 284 | 285 | # Do something with agenda... 286 | 287 | agenda.write('/path/to/modified_agenda.org') 288 | #+end_src 289 | 290 | ** Convenience Methods 291 | *** Getting All Headings 292 | The convenience method =Org.get_all_headings= walks the Org tree 293 | depth-first and returns a generator of all the headings in the tree in 294 | the order in which they occur. 295 | *** Filtering Headings 296 | You can use =Org.filter_headings(func)= where =func= is any arbitrary 297 | [[https://en.wikipedia.org/wiki/Boolean-valued_function][predicate]] and get a generator of all headings satisfying the predicate. 298 | *** Search for Headings by Title 299 | Use =Org.get_headings_by_title= to search for a heading with the given title: 300 | #+begin_src python 301 | Org.get_headings_by_title(search_string, exact=False, re_flags=0) 302 | #+end_src 303 | =search_string= is what's searched in the title. It's interpreted as a 304 | regex unless =exact= is set to =True=, in which case, the function will 305 | return headings whose title matches the search string 306 | exactly. =re_flags= are flags passed to =re.search=. This argument is 307 | ignored if =exact= is =True=. 308 | Uses =filter_headings= under the hood so will return a generator of 309 | matching headings. 310 | 311 | *** Search for Headings by Path 312 | Use =Org.get_heading_by_path= to search for a heading with the given path: 313 | #+begin_src python 314 | Org.get_heading_by_path(path, exact=False, re_flags=0) 315 | #+end_src 316 | =path= is a list of heading titles. Each member is interpreted the same 317 | way the =search_string= argument of =get_headings_by_title= is 318 | interpreted. This function returns the first heading of the tree that 319 | matches the given path or =None= if no such heading is found. 320 | 321 | * License 322 | #+INCLUDE: ./LICENSE 323 | 324 | 325 | 326 | * Contributors 327 | :PROPERTIES: 328 | :ID: d27f8cd9-4be9-4fa3-b54a-40b9d3807e90 329 | :END: 330 | :LOGBOOK: 331 | CLOCK: [2023-07-08 Sat 15:54]--[2023-07-08 Sat 15:54] => 0:00 332 | :END: 333 | Thanks to these wonderful people for contributing time and code: 334 | 335 | - [[https://github.com/Nalisarc][Nalisarc]] 336 | - [[https://github.com/ispringle][ispringle]] 337 | - [[https://github.com/crdoconnor][crdoconnor]] 338 | - [[https://github.com/allrob23][allrob23]] 339 | -------------------------------------------------------------------------------- /src/orgmunge/classes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import re 4 | from typing import Optional, List, Union, Tuple, Dict 5 | from datetime import datetime as dt 6 | from datetime import timedelta 7 | from functools import reduce 8 | from operator import add 9 | from math import floor 10 | from .lexer import Lexer 11 | 12 | ORG_TIME_FORMAT_NO_TIME = '%Y-%m-%d %a' 13 | ORG_TIME_FORMAT = ORG_TIME_FORMAT_NO_TIME + ' %H:%M' 14 | 15 | class Cookie: 16 | def __init__(self, text: str): 17 | """Cookies can be of type 'precent' (e.g. [5%]) 18 | or 'progress' (e.g. [2/3]). Both types store 19 | their state as 2 numbers: m is the number of completed items 20 | and n is the total number. In the case of a perecent-type Cookie 21 | n is set to 100. 22 | """ 23 | if re.search(r'%', text): 24 | self._cookie_type = 'percent' 25 | match = re.search(r'\[(.+)%\]', text) 26 | if match: 27 | self._m = int(match.group(1)) 28 | if self._m > 100: 29 | raise ValueError(f'Meaningless cookie value: {m}%') 30 | else: 31 | self._m = 0 32 | self._n = 100 33 | elif re.search(r'/', text): 34 | self._cookie_type = 'progress' 35 | match = re.search(r'\[(.*)/(.*)\]', text) 36 | m = int(match.group(1)) if match.group(1) != '' else 0 37 | n = int(match.group(2)) if match.group(2) != '' else 0 38 | self._m, self._n = m, n 39 | else: 40 | self._cookie_type = None 41 | self._m = 0 42 | self._n = 0 43 | if self._m > self._n: 44 | raise ValueError(f'Meaningless cookie value: {m}/{n}') 45 | 46 | @property 47 | def cookie_type(self): 48 | return self._cookie_type 49 | 50 | @cookie_type.setter 51 | def cookie_type(self, new_type: str): 52 | if new_type != self.cookie_type: 53 | if new_type == 'percent': 54 | self._m = int(self.m/self.n * 100) 55 | self._n = 100 56 | elif new_type == 'progress': 57 | # m and n are already defined. Nothing needs to be done 58 | pass 59 | else: 60 | raise ValueError(f'Unknown cookie type {new_type}') 61 | self._cookie_type = new_type 62 | 63 | @property 64 | def m(self): 65 | return self._m 66 | 67 | @m.setter 68 | def m(self, value: int): 69 | if not isinstance(value, int): 70 | raise ValueError(f'Cookie progress can only be an integer, {value} passed.') 71 | elif value > self.n: 72 | raise ValueError(f"Can't have cookie progress set to {value} > {self.n}") 73 | else: 74 | self._m = value 75 | 76 | @property 77 | def n(self): 78 | return self._n 79 | 80 | @n.setter 81 | def n(self, value: int): 82 | if not isinstance(value, int): 83 | raise ValueError(f'Cookie final value can only be an integer, {value} passed.') 84 | elif value < self.m: 85 | raise ValueError(f"Can't have cookie final value set to {value} < {self.m}") 86 | else: 87 | self._n = value 88 | 89 | def __repr__(self): 90 | return f'[{self.m}/{self.n}]' if self.cookie_type == 'progress' else f'[{self.m}%]' 91 | 92 | def __eq__(self, other): 93 | if not isinstance(other, self.__class__): 94 | return False 95 | else: 96 | return str(self) == str(other) 97 | class Priority: 98 | allowed_values = ['A', 'B', 'C'] 99 | def _parse_priority(self, p: str): 100 | match = re.search(r"^\[#(.)\]", p) 101 | return match.group(1) if match else p 102 | 103 | def __init__(self, priority_text: Optional[str]): 104 | p = self._parse_priority(priority_text) if priority_text is not None else priority_text 105 | self.priority = p 106 | 107 | @property 108 | def priority(self): 109 | return self._priority 110 | 111 | @priority.setter 112 | def priority(self, value: Optional[str]): 113 | if value in self.allowed_values or value is None: # Allow None to remove priority 114 | self._priority = value 115 | else: 116 | raise ValueError(f"Priority must be one of {self.allowed_values}; {value} passed") 117 | 118 | def _raise(self): 119 | if self.priority is None: 120 | self.priority = self.allowed_values[0] 121 | idx = self.allowed_values.index(self.priority) 122 | self.priority = self.allowed_values[(idx + 1) % len(self.allowed_values)] 123 | 124 | def _lower(self): 125 | if self.priority is None: 126 | self.priority = self.allowed_values[-1] 127 | idx = self.allowed_values.index(self.priority) 128 | self.priority = self.allowed_values[(idx - 1) % len(self.allowed_values)] 129 | 130 | def __repr__(self): 131 | return f'[#{self.priority}]' if self.priority is not None else '' 132 | 133 | def __eq__(self, other): 134 | if not isinstance(other, self.__class__): 135 | return False 136 | else: 137 | return str(self) == str(other) 138 | class Headline: 139 | def __init__(self, todos, level: str, comment: bool = False, 140 | todo: Optional[str] = None, priority: Optional[str] = None, 141 | title: str = "", cookie: Optional[str] = None, tags: Optional[List[str]] = None): 142 | self._level = len(re.sub(r'\s+', '', level)) # Number of leading asterisks 143 | self._comment = comment 144 | self._todo = todo 145 | self._priority = Priority(priority) 146 | self.title = title 147 | self._cookie = cookie if cookie is None else Cookie(cookie) 148 | self.tags = tags 149 | self._todo_states = set(todos['todo_states'].values()) 150 | self._done_states = set(todos['done_states'].values()) 151 | self._todo_keywords = {**todos['todo_states'], **todos['done_states']} 152 | @property 153 | def done(self): 154 | return self._is_done() 155 | 156 | @done.setter 157 | def done(self, _): 158 | raise AttributeError("Can't set the 'done' attribute") 159 | 160 | def _is_done(self): 161 | if self.todo is None or self.todo in self._todo_states: 162 | return False 163 | elif self.todo in self._done_states: 164 | return True 165 | else: 166 | raise ValueError(f"Uncategorized todo state {self.todo}") 167 | 168 | @property 169 | def level(self): 170 | return self._level 171 | 172 | @level.setter 173 | def level(self, value: int): 174 | if not isinstance(value, int): 175 | raise ValueError(f"Can only set headline level to an integer value, {value} passed.") 176 | self._level = value 177 | 178 | def promote(self, n: int = 1): 179 | level = self.level - n 180 | self.level = max(level, 1) 181 | 182 | def demote(self, n: int = 1): 183 | self.level += n 184 | 185 | @property 186 | def comment(self): 187 | return self._comment 188 | 189 | @comment.setter 190 | def comment(self, value: bool): 191 | if not isinstance(value, bool): 192 | raise ValueError('The "comment" property must be a boolean!') 193 | self._comment = value 194 | 195 | def toggle_comment(self): 196 | self.comment = not(self.comment) 197 | 198 | def comment_out(self): 199 | self.comment = True 200 | 201 | def uncomment(self): 202 | self.comment = False 203 | 204 | @property 205 | def todo(self): 206 | return self._todo 207 | 208 | @todo.setter 209 | def todo(self, value: Optional[str]): 210 | if value not in self._todo_states and value not in self._done_states and value is not None: 211 | possible_states = set.union(self._todo_states, self._done_states) 212 | raise ValueError(f"Todo keyword has to be one of {','.join(possible_states)} or None, {value} passed.") 213 | else: 214 | self._todo = value 215 | 216 | @property 217 | def cookie(self): 218 | return self._cookie 219 | 220 | @cookie.setter 221 | def cookie(self, value: Cookie): 222 | if not isinstance(value, Cookie): 223 | raise ValueError("Can only set cookie to an instance of the Cookie class.") 224 | else: 225 | self._cookie = value 226 | 227 | def raise_priority(self): 228 | self.priority._raise() 229 | 230 | def lower_priority(self): 231 | self.priority._lower() 232 | 233 | @property 234 | def priority(self): 235 | return self._priority 236 | 237 | @priority.setter 238 | def priority(self, value: Optional[str]): 239 | self._priority = Priority(value) 240 | 241 | def __repr__(self): 242 | priority = f'{self.priority}' + (' ' if str(self.priority) != '' else '') 243 | comment = "COMMENT " if self.comment else "" 244 | todo = f"{self.todo} " if self.todo else "" 245 | cookie = ' ' + str(self.cookie) if self.cookie else "" 246 | tags = f" :{':'.join(self.tags)}:" if self.tags else "" 247 | return f"{'*' * self.level} {todo}{comment}{priority}{self.title}{cookie}{tags}" 248 | 249 | def __eq__(self, other): 250 | if not isinstance(other, self.__class__): 251 | return False 252 | else: 253 | return str(self) == str(other) 254 | class TimeStamp: 255 | def __init__(self, timestamp_str: str): 256 | is_active = re.search(Lexer.ATIMESTAMP, timestamp_str) 257 | is_inactive = re.search(Lexer.ITIMESTAMP, timestamp_str) 258 | self._active = True if is_active else False 259 | match = is_active if self._active else is_inactive 260 | date, day_of_week, start_time, end_time, repeater, deadline_warn = match.groups() 261 | self._start_time = self._to_datetime([date, day_of_week, start_time]) 262 | if end_time: 263 | end_time = re.sub(r'^-', '', end_time) 264 | self._end_time = self._to_datetime([date, day_of_week, end_time]) 265 | else: 266 | self._end_time = None 267 | self._repeater = repeater 268 | self._deadline_warn = deadline_warn 269 | 270 | def _to_datetime(self, date_components: List[str]) -> dt: 271 | if date_components[-1] is None: 272 | dt_format = ORG_TIME_FORMAT_NO_TIME 273 | date_components = date_components[:-1] 274 | else: 275 | dt_format = ORG_TIME_FORMAT 276 | self._dt_format = dt_format 277 | return dt.strptime(' '.join(date_components), dt_format) 278 | @property 279 | def start_time(self): 280 | return self._start_time 281 | 282 | @start_time.setter 283 | def start_time(self, value: Union[str, dt, None]): 284 | if value is None: 285 | self._start_time = self.start_time.strftime(ORG_TIME_FORMAT_NO_TIME) 286 | t = None 287 | elif isinstance(value, str): 288 | t = dt.strptime(' '.join([self.start_time.strftime(ORG_TIME_FORMAT_NO_TIME), value]), ORG_TIME_FORMAT) 289 | elif isinstance(value, dt): 290 | if self.end_time and (value.year != self.end_time.year or value.month != self.end_time.month or value.day != self.end_time.day): 291 | raise ValueError('The start time for a timestamp must have the same date as the end time') 292 | else: 293 | t = value 294 | else: 295 | raise TypeError(f"Can't set timestamp start time from value of type {type(value)}!") 296 | if t and self.end_time and t > self.end_time: 297 | raise ValueError("Start time must be before end time") 298 | elif t: 299 | self._start_time = t 300 | 301 | @property 302 | def active(self): 303 | return self._active 304 | 305 | @active.setter 306 | def active(self, value: bool): 307 | if isinstance(value, bool): 308 | self._active = value 309 | else: 310 | raise TypeError("The active property of timestamps needs to be a Boolean.") 311 | 312 | @property 313 | def end_time(self): 314 | return self._end_time 315 | 316 | @end_time.setter 317 | def end_time(self, value: Union[str, dt, None]): 318 | if value is None: 319 | self._end_time = None 320 | t = None 321 | elif isinstance(value, str): 322 | t = dt.strptime(' '.join([self.end_time.strftime(ORG_TIME_FORMAT_NO_TIME), value]), ORG_TIME_FORMAT) 323 | elif isinstance(value, dt): 324 | if value.year != self.start_time.year or value.month != self.start_time.month or value.day != self.start_time.day: 325 | raise ValueError('The end time for a timestamp must have the same date as the start time') 326 | else: 327 | t = value 328 | else: 329 | raise TypeError(f"Can't set timestamp end time from value of type {type(value)}!") 330 | if t and t < self.start_time: 331 | raise ValueError("End time must be after start time.") 332 | elif t: 333 | self._end_time = t 334 | 335 | @property 336 | def repeater(self): 337 | return self._repeater 338 | 339 | @repeater.setter 340 | def repeater(self, value: Optional[str]): 341 | if value is None: 342 | self._repeater = None 343 | elif re.search(r'^[.+]?\+[0-9]+[hdwmy]', value): 344 | self._repeater = value 345 | else: 346 | raise ValueError(f"Repeaters must start with .+, ++ or +, followed by an integer and one of h, d, w, m or y. Can't work with {value}.") 347 | 348 | @property 349 | def deadline_warn(self): 350 | return self._deadline_warn 351 | 352 | @deadline_warn.setter 353 | def deadline_warn(self, value: Optional[str]): 354 | if value is None: 355 | self._deadline_warn = None 356 | elif re.search(r'^-[0-9]+[hdwmy]', value): 357 | self._deadline_warn = value 358 | else: 359 | raise ValueError(f"Special deadline warnings must start with -, followed by an integer and one of h, d, w, m or y. Can't work with {value}.") 360 | def __repr__(self): 361 | ldelim = '<' if self.active else '[' 362 | rdelim = '>' if self.active else ']' 363 | timestamp = self.start_time.strftime(self._dt_format) 364 | if self.end_time: 365 | timestamp += f'-{self.end_time.strftime("%H:%M")}' 366 | if self.repeater: 367 | timestamp += f'{self.repeater}' 368 | if self.deadline_warn: 369 | timestamp += f'{self.deadline_warn}' 370 | return ldelim + timestamp + rdelim 371 | 372 | def __eq__(self, other): 373 | if not isinstance(other, self.__class__): 374 | return False 375 | else: 376 | return str(self) == str(other) 377 | class Scheduling: 378 | _closed = None 379 | _scheduled = None 380 | _deadline = None 381 | 382 | # Helper class to group together common getter and setter code for all keywords 383 | class Keyword: 384 | def __init__(self, attr: str): 385 | self.attr = '_' + attr 386 | def __get__(self, obj, _): 387 | # The third argument doesn't matter. The caller needs to pass 388 | # in a keyword name to getattr and that gets passed on to this function 389 | # but it's not needed since each Keyword instance stores its keyword in 390 | # the attr attribute. 391 | return getattr(obj, self.attr) 392 | def __set__(self, obj, value): 393 | if value is None: 394 | setattr(obj, self.attr, None) 395 | elif isinstance(value, TimeStamp): 396 | if self.attr == '_closed': 397 | value.active = False 398 | value.end_time = value.repeater = value.deadline_warn = None 399 | if self.attr == '_scheduled' or self.attr == '_deadline': 400 | value.active = True 401 | if self.attr != '_deadline': 402 | value.deadline_warn = None 403 | setattr(obj, self.attr, value) 404 | else: 405 | raise TypeError(f"The timestamp value for a Scheduling keyword must be an instance of the TimeStamp class.") 406 | valid_keywords = ['closed', 'scheduled', 'deadline'] 407 | def __init__(self, keyword: Optional[str] = None, timestamp: Optional[TimeStamp] = None): 408 | if keyword is not None and timestamp is not None: 409 | canonical_keyword = re.sub(r':\s*$', '', keyword).lower() 410 | if canonical_keyword not in self.valid_keywords: 411 | raise ValueError(f'Scheduling keyword must be one of {self.valid_keywords}, got {canonical_keyword}') 412 | else: 413 | if isinstance(timestamp, TimeStamp): 414 | setattr(self, canonical_keyword, timestamp) 415 | else: 416 | raise TypeError("The timestamp value for a Scheduling keyword must be an instance of the TimeStamp class") 417 | 418 | # Define this so the parser can add together multiple scheduling keywords: 419 | def __add__(self, other): 420 | for keyword in self.valid_keywords: 421 | if getattr(self, keyword) and getattr(other, keyword): 422 | raise ValueError(f"Can't merge two Scheduling types when both of them have the {keyword} property set.") 423 | else: 424 | result = Scheduling() 425 | for keyword in self.valid_keywords: 426 | setattr(result, keyword, getattr(self, keyword) or getattr(other, keyword) or None) 427 | return result 428 | 429 | CLOSED = closed = Keyword('closed') 430 | SCHEDULED = scheduled = Keyword('scheduled') 431 | DEADLINE = deadline = Keyword('deadline') 432 | 433 | def __repr__(self): 434 | data = [f'{keyword.upper()}: {getattr(self, keyword)}' for keyword in self.valid_keywords if getattr(self, keyword) is not None] 435 | return ' '.join(data) 436 | 437 | def __eq__(self, other): 438 | if not isinstance(other, self.__class__): 439 | return False 440 | else: 441 | return str(self) == str(other) 442 | class Drawer: 443 | def __init__(self, drawer_string: str): 444 | self.name = re.sub(r':', '', drawer_string.split('\n')[0]) 445 | self.contents = drawer_string.strip().split('\n')[1:-1] 446 | def __repr__(self): 447 | contents = "\n".join(self.contents) 448 | return f''':{self.name}: 449 | {contents} 450 | :END: 451 | ''' 452 | def __eq__(self, other): 453 | if not isinstance(other, self.__class__): 454 | return False 455 | else: 456 | return str(self) == str(other) 457 | class Clocking: 458 | def __init__(self, start_time: str, end_time: Optional[str] = None): 459 | self._start_time = dt.strptime(start_time, ORG_TIME_FORMAT) 460 | if end_time is not None: 461 | self._end_time = dt.strptime(end_time, ORG_TIME_FORMAT) 462 | else: 463 | self._end_time = None 464 | self._duration = None 465 | 466 | def _set_time(self, property: str, value: str): 467 | try: 468 | datetime_obj = dt.strptime(value, ORG_TIME_FORMAT) 469 | setattr(self, property, datetime_obj) 470 | except ValueError: 471 | raise ValueError(f"Time string {value} doesn't match expected org time format {ORG_TIME_FORMAT}") 472 | 473 | @property 474 | def start_time(self): 475 | return self._start_time 476 | 477 | @start_time.setter 478 | def start_time(self, value: str): 479 | self._set_time('_start_time', value) 480 | 481 | @property 482 | def end_time(self): 483 | return self._end_time 484 | 485 | @end_time.setter 486 | def end_time(self, value: Optional[str]): 487 | if value is None: 488 | self._end_time = value 489 | else: 490 | self._set_time('_end_time', value) 491 | 492 | def _display_delta(self, time_delta: timedelta) -> str: 493 | total_seconds = time_delta.total_seconds() 494 | if total_seconds < 0: 495 | return f'-{self._display_delta(timedelta(seconds=-total_seconds))}' 496 | m, s = total_seconds/60, total_seconds%60 497 | if s > 30: m += 1 # Round minutes up 498 | h, m = m/60, m%60 499 | hours, minutes = [floor(x) for x in (h, m)] 500 | return f'{hours}:{minutes:02d}' 501 | 502 | @property 503 | def duration(self): 504 | if self.end_time is None: 505 | return self._display_delta(dt.now() - self.start_time) 506 | else: 507 | return self._display_delta(self.end_time - self.start_time) 508 | 509 | @duration.setter 510 | def duration(self, _): 511 | raise TypeError("Can't set the duration for a clocking object! Set the start and/or end time instead.") 512 | 513 | @property 514 | def duration_seconds(self): 515 | if self.end_time is None: 516 | return (dt.now() - self.start_time).seconds 517 | else: 518 | return (self.end_time - self.start_time).seconds 519 | 520 | @duration_seconds.setter 521 | def duration_seconds(self, _): 522 | raise TypeError("Can't set the duration for a clocking object! Set the start and/or end time instead.") 523 | 524 | def __repr__(self): 525 | if self.end_time is None: 526 | return f'[{self.start_time.strftime(ORG_TIME_FORMAT)}]' 527 | else: 528 | return f'[{self.start_time.strftime(ORG_TIME_FORMAT)}]--[{self.end_time.strftime(ORG_TIME_FORMAT)}] => {self.duration}' 529 | 530 | def __eq__(self, other): 531 | if not isinstance(other, self.__class__): 532 | return False 533 | else: 534 | return str(self) == str(other) 535 | 536 | def __lt__(self, other): 537 | return 538 | class Heading: 539 | def __init__(self, headline: Headline, contents: Tuple[Scheduling, List[Drawer], str]): 540 | self._headline = headline 541 | self._scheduling, self._drawers, self.body = contents 542 | self._children = [] 543 | self._parent = None 544 | self._sibling = None 545 | self._inherited_properties = dict() 546 | if self.body: 547 | self.timestamps = [TimeStamp(t[0]) for t in re.findall(fr'({Lexer.ATIMESTAMP}|{Lexer.ITIMESTAMP})', self.body)] 548 | if self._drawers: 549 | properties_drawer = [d for d in self._drawers if d.name == 'PROPERTIES'] 550 | if properties_drawer: 551 | self._properties = self._get_properties_dict(properties_drawer[0].contents) 552 | else: 553 | self._properties = dict() 554 | else: 555 | self._properties = dict() 556 | 557 | def __getattr__(self, attr): 558 | # So that things like self.todo and self.title, etc... will work 559 | if self.headline: 560 | return getattr(self.headline, attr) 561 | else: 562 | raise AttributeError(f'Heading class has no attribute {attr}') 563 | 564 | def _parse_clock_line(self, line: str) -> Clocking: 565 | m = re.search(fr'CLOCK:\s*(?P{Lexer.ITIMESTAMP})(?:--(?P{Lexer.ITIMESTAMP}))?', line) 566 | if m is not None: 567 | start_time = re.sub(r'[\[\]]', '', m.group("start")) 568 | if m.group("end"): 569 | end_time = re.sub(r'[\[\]]', '', m.group("end")) 570 | else: 571 | end_time = None 572 | else: 573 | raise ValueError(f"Unable to parse {line} as clocking information") 574 | return Clocking(start_time, end_time) 575 | 576 | def _get_clocking_info(self) -> List[Clocking]: 577 | if not self.drawers: 578 | return [] 579 | logbook = self.get_drawer_by_name('LOGBOOK') 580 | if logbook: 581 | return [self._parse_clock_line(l) for l in logbook.contents if re.search(r'^\s*CLOCK:', l)] 582 | else: 583 | return [] 584 | 585 | @classmethod 586 | def _get_properties_dict(cls, contents: List[str]) -> Dict[str, str]: 587 | return {k: v for (k, v) in [re.search(r':([^:]+):\s+(.*)', line).groups() 588 | for line in contents]} 589 | 590 | def _get_properties_string(self) -> str: 591 | return "\n".join([f":{k}:{' '*7}{v}" for k, v in self.properties.items()]) 592 | 593 | @property 594 | def inherited_properties(self): 595 | if not self._inherited_properties: 596 | if self.parent: 597 | self._inherited_properties = {**self.parent.inherited_properties, **self.parent.properties} 598 | return self._inherited_properties 599 | 600 | @inherited_properties.setter 601 | def inherited_properties(self, _): 602 | raise AttributeError("Can't set the inherited properties of a heading") 603 | 604 | @property 605 | def tags(self): 606 | self_tags = self.headline.tags or [] 607 | return list(set(self_tags)) 608 | 609 | @tags.setter 610 | def tags(self, _): 611 | raise AttributeError("Can't set the tags of a heading, use headline instead.") 612 | 613 | @property 614 | def all_tags(self): 615 | parent_tags = (self.parent.all_tags if self.parent else None) or [] 616 | return list(set(self.tags + parent_tags)) 617 | 618 | @all_tags.setter 619 | def all_tags(self, _): 620 | raise AttributeError("Can't set the tags of a heading, use headline instead.") 621 | 622 | @property 623 | def properties(self): 624 | return self._properties 625 | 626 | @properties.setter 627 | def properties(self, val: Dict[str, str]): 628 | if type(val) is not dict: 629 | raise TypeError("Heading properties must be given in the form of a dict") 630 | else: 631 | self._properties = dict() 632 | for key in val: 633 | self._properties[key] = val[key] 634 | 635 | def get_all_properties(self) -> Dict[str, str]: 636 | return {**self.inherited_properties, **self.properties} 637 | 638 | def clocking(self, include_children: bool = False) -> List[Clocking]: 639 | "Return the clocking information of the given headline and possibly its children." 640 | own_clocking = self._get_clocking_info() 641 | if include_children and self.children != []: 642 | return own_clocking + reduce(add, [c.clocking(include_children=True) for c in self.children]) 643 | else: 644 | return own_clocking 645 | 646 | def get_drawer_by_name(self, name: str) -> Optional[Drawer]: 647 | "Return the named drawer if it exists, or None if it doesn't" 648 | try: 649 | return next(d for d in (self.drawers or []) if d.name == name) 650 | except StopIteration: 651 | return None 652 | 653 | @property 654 | def headline(self): 655 | return self._headline 656 | 657 | @headline.setter 658 | def headline(self, value: Headline): 659 | if not isinstance(value, Headline): 660 | raise TypeError(f"Org headline must be of type {Headline}. Can't work with {type(value)}.") 661 | else: 662 | self._headline = value 663 | 664 | @property 665 | def scheduling(self): 666 | return self._scheduling 667 | 668 | @scheduling.setter 669 | def scheduling(self, value: Optional[Scheduling]): 670 | if value is None: 671 | self._scheduling = None 672 | elif not isinstance(value, Scheduling): 673 | raise TypeError(f"Scheduling information must be of type {Scheduling}. Can't work with {type(value)}.") 674 | else: 675 | self._scheduling = value 676 | 677 | @property 678 | def drawers(self): 679 | updated_properties_drawer = Drawer(f""":PROPERTIES: 680 | {self._get_properties_string()} 681 | :END:""") 682 | if self._drawers: 683 | if self._drawers[0].name == 'PROPERTIES': 684 | self._drawers = [updated_properties_drawer] + self._drawers[1:] 685 | elif self.properties: 686 | self._drawers = [updated_properties_drawer] 687 | return self._drawers 688 | 689 | @drawers.setter 690 | def drawers(self, value: Optional[List[Drawer]]): 691 | if value is None: 692 | self._drawers = None 693 | else: 694 | types = {type(d) for d in value if type(d) is not Drawer} 695 | if types: 696 | raise TypeError(f"Drawer information must be of type {Drawer}. Found these value types instead: {' '.join(types)}.") 697 | else: 698 | self._drawers = value 699 | 700 | @property 701 | def children(self): 702 | return self._children 703 | 704 | @children.setter 705 | def children(self, value): 706 | if value is None: 707 | self._children = None 708 | else: 709 | types = {type(d) for d in value if type(d) is not Heading} 710 | if types: 711 | raise TypeError(f"Child headings must all be of type {Heading}. Found these value types instead: {' '.join(types)}.") 712 | else: 713 | self._children = value 714 | 715 | def add_child(self, heading, new: bool = False): 716 | heading.parent = self 717 | if not isinstance(heading, Heading): 718 | raise TypeError(f"Child heading must be of type {Heading}. Can't work with {type(heading)}!") 719 | if new: 720 | if self.children: 721 | self.children.append(heading) 722 | else: 723 | self.children = [heading] 724 | else: 725 | # This is the case where a heading that's been promoted needs to be adopted by another heading 726 | if self.children: 727 | if heading.sibling: 728 | try: 729 | idx = self.children.index(heading.sibling) 730 | self.children = self.children[:idx + 1] + [heading] + self.children[idx+1:] 731 | except ValueError: 732 | raise ValueError("Incorrect promotion: grandparent doesn't have original parent in children!") 733 | else: # This is the case where a heading that's been demoted needs to be adopted by its sibling 734 | self.children = [heading] + self.children 735 | else: 736 | self.children = [heading] 737 | 738 | def remove_child(self, child): 739 | if self.children: 740 | self.children = [c for c in self.children if c is not child] 741 | 742 | @property 743 | def parent(self): 744 | return self._parent 745 | 746 | @parent.setter 747 | def parent(self, value): 748 | if value is None: 749 | self._parent = None 750 | elif not isinstance(value, Heading): 751 | raise TypeError(f"Parent heading must be of type {Heading}. Can't work with {type(value)}.") 752 | else: 753 | self._parent = value 754 | 755 | @property 756 | def sibling(self): 757 | return self._sibling 758 | 759 | @sibling.setter 760 | def sibling(self, value): 761 | if value is None: 762 | self._sibling = None 763 | elif not isinstance(value, Heading): 764 | raise TypeError(f"Sibling heading must be of type {Heading}. Can't work with {type(value)}.") 765 | else: 766 | self._sibling = value 767 | 768 | @property 769 | def level(self): 770 | return self.headline.level 771 | 772 | @level.setter 773 | def level(self, value: int): 774 | self.headline.level = value 775 | 776 | def promote(self): 777 | if self.children: 778 | raise ValueError('Incorrect promotion: heading has children that would be orphaned. Did you mean promote_tree?') 779 | self.headline.promote() 780 | self.sibling = self.parent 781 | idx = self.sibling.children.index(self) 782 | next_siblings = self.sibling.children[idx + 1:] 783 | if next_siblings: 784 | next_siblings[0].sibling = None 785 | for s in next_siblings: 786 | s.parent = self 787 | self.children = next_siblings 788 | self.sibling.remove_child(self) 789 | for s in next_siblings: 790 | self.sibling.remove_child(s) 791 | self.sibling.parent.add_child(self) 792 | 793 | def promote_tree(self): 794 | children = self.children 795 | self.promote() 796 | if children: 797 | for child in children: 798 | child.promote_tree() 799 | 800 | def demote(self): 801 | if not self.sibling: 802 | raise ValueError('Incorrect demotion: heading has no sibling to adopt it.') 803 | self.headline.demote() 804 | idx = self.parent.children.index(self) 805 | try: 806 | next_sibling = self.parent.children[idx + 1] 807 | next_sibling.sibling = self.sibling 808 | except IndexError: 809 | pass 810 | self.parent.remove_child(self) 811 | self.parent = self.sibling 812 | if self.parent.children: 813 | self.sibling = self.parent.children[-1] 814 | else: 815 | self.sibling = None 816 | self.parent.add_child(self) 817 | if self.children: 818 | self.children[0].sibling = self 819 | for child in self.children: 820 | self.parent.add_child(child) 821 | self.children = None 822 | 823 | def demote_tree(self): 824 | children = self.children 825 | self.demote() 826 | if children: 827 | for child in children: 828 | child.demote_tree() 829 | 830 | def get_path(self): 831 | "Returns the full path of the current heading as a list of headings" 832 | if self.parent.title == 'ROOT': 833 | return [self] 834 | else: 835 | return self.parent.get_path() + [self] 836 | 837 | def __repr__(self): 838 | scheduling = str(self.scheduling) + "\n" if self.scheduling else "" 839 | drawers = "".join(d.__str__() for d in self.drawers) if self.drawers else "" 840 | body = str(self.body) + "\n" if self.body else "" 841 | if len(body) > 80: 842 | body = body[:77].strip() + "...\n" 843 | children = ''.join([c.__str__() for c in self.children]) if self.children else '' 844 | return f'{self.headline}\n{scheduling}{drawers}{body}{children}' 845 | 846 | def __str__(self): 847 | scheduling = str(self.scheduling) + "\n" if self.scheduling else "" 848 | drawers = "".join(d.__str__() for d in self.drawers) if self.drawers else "" 849 | body = str(self.body) + "\n" if self.body else "" 850 | children = ''.join([c.__str__() for c in self.children]) if self.children else '' 851 | return f'{self.headline}\n{scheduling}{drawers}{body}{children}' 852 | 853 | def __eq__(self, other): 854 | if not isinstance(other, self.__class__): 855 | return False 856 | else: 857 | return str(self) == str(other) 858 | --------------------------------------------------------------------------------