├── .github ├── FUNDING.yml └── workflows │ └── integrity-check.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── Pipfile ├── Pipfile.lock ├── README.md ├── TODO.md ├── brain_brew ├── __init__.py ├── build_tasks │ ├── __init__.py │ ├── crowd_anki │ │ ├── __init__.py │ │ ├── crowd_anki_generate.py │ │ ├── headers_from_crowdanki.py │ │ ├── headers_to_crowd_anki.py │ │ ├── media_group_from_crowd_anki.py │ │ ├── media_group_to_crowd_anki.py │ │ ├── note_model_single_from_crowd_anki.py │ │ ├── note_models_all_from_crowd_anki.py │ │ ├── note_models_to_crowd_anki.py │ │ ├── notes_from_crowd_anki.py │ │ ├── notes_to_crowd_anki.py │ │ └── shared_base_notes.py │ ├── csvs │ │ ├── __init__.py │ │ ├── csvs_generate.py │ │ ├── generate_guids_in_csvs.py │ │ ├── notes_from_csvs.py │ │ └── shared_base_csvs.py │ ├── deck_parts │ │ ├── __init__.py │ │ ├── from_yaml_part.py │ │ ├── headers_from_yaml_part.py │ │ ├── media_group_from_folder.py │ │ ├── note_model_from_html_parts.py │ │ ├── note_model_from_yaml_part.py │ │ ├── save_media_group_to_folder.py │ │ └── save_note_models_to_folder.py │ └── overrides │ │ ├── __init__.py │ │ ├── headers_override.py │ │ └── notes_override.py ├── commands │ ├── __init__.py │ ├── argument_reader.py │ ├── init_repo │ │ ├── __init__.py │ │ └── init_repo.py │ └── run_recipe │ │ ├── __init__.py │ │ ├── build_task.py │ │ ├── parts_builder.py │ │ ├── recipe_builder.py │ │ ├── run_recipe.py │ │ └── top_level_builder.py ├── configuration │ ├── __init__.py │ ├── anki_field.py │ ├── file_manager.py │ ├── part_holder.py │ ├── representation_base.py │ └── yaml_verifier.py ├── front_matter.py ├── interfaces │ ├── __init__.py │ ├── command.py │ ├── media_container.py │ └── yamale_verifyable.py ├── main.py ├── representation │ ├── __init__.py │ ├── generic │ │ ├── __init__.py │ │ ├── csv_file.py │ │ ├── html_file.py │ │ ├── media_file.py │ │ └── source_file.py │ ├── json │ │ ├── __init__.py │ │ ├── crowd_anki_export.py │ │ ├── json_file.py │ │ └── wrappers_for_crowd_anki.py │ └── yaml │ │ ├── __init__.py │ │ ├── headers.py │ │ ├── media_group.py │ │ ├── note_model.py │ │ ├── note_model_field.py │ │ ├── note_model_template.py │ │ ├── notes.py │ │ └── yaml_object.py ├── schemas │ ├── __init__.py │ └── recipe.yaml ├── transformers │ ├── __init__.py │ ├── create_media_group_from_location.py │ ├── file_mapping.py │ ├── note_model_mapping.py │ ├── save_media_group_to_location.py │ └── save_note_model_to_location.py └── utils.py ├── scripts ├── __init__.py ├── build.bash ├── dist.bash └── yamale_build.py ├── setup.py └── tests ├── __init__.py ├── build_tasks ├── __init__.py ├── test_source_crowd_anki_json.py └── test_source_csv.py ├── representation ├── __init__.py ├── configuration │ ├── __init__.py │ ├── test_csv_file_mapping.py │ └── test_note_model_mapping.py ├── generic │ ├── __init__.py │ ├── test_csv_file.py │ └── test_media_file.py ├── json │ ├── __init__.py │ └── test_crowd_anki_export.py └── yaml │ ├── __init__.py │ ├── test_note_model_repr.py │ └── test_note_repr.py ├── test_argument_reader.py ├── test_builder.py ├── test_file_manager.py ├── test_files.py ├── test_files ├── build_files │ └── builder1.yaml ├── crowd_anki │ └── crowdanki_example_1 │ │ └── deck.json ├── csv │ ├── test1.csv │ ├── test1_split1.csv │ ├── test1_split2.csv │ ├── test2.csv │ ├── test2_missing_guids.csv │ └── test3.csv ├── deck_parts │ ├── note_models │ │ ├── LL Word No Defaults.json │ │ ├── LL Word Only Required.json │ │ ├── LL Word.json │ │ └── Test-Model.json │ └── yaml │ │ ├── note_models │ │ ├── LL-Word-No-Defaults.yaml │ │ └── LL-Word-Only-Required.yaml │ │ └── notes │ │ └── note1.yaml ├── media_files │ ├── buried │ │ └── even_more │ │ │ └── signals2.png │ └── signals.png └── tsv │ └── test1.tsv ├── test_helpers.py └── test_utils.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: ohare93 # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: jmohare 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: brainbrew 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /.github/workflows/integrity-check.yml: -------------------------------------------------------------------------------- 1 | name: Python application 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v1 11 | 12 | - name: Set up Python 3.7 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: 3.7 16 | 17 | - name: Install dependencies 18 | run: | 19 | python3 -m pip install --upgrade pipenv 20 | pipenv install --dev 21 | 22 | - name: Run tests 23 | run: | 24 | pipenv run unit_tests 25 | 26 | - name: Build Yamale Recipe 27 | run: | 28 | pipenv run build_yamale 29 | 30 | - name: Check Yamale Recipe for changes 31 | run: git diff --quiet -- || (echo "::error file=yamale,line=0,col=0::You need to run 'python scripts/yamale_build.py'" && exit 1) 32 | 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | 132 | 133 | .idea/ 134 | .directory -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | ### Install Brain Brew package 3 | 4 | https://pypi.org/project/Brain-Brew/ 5 | 6 | ```shell 7 | pipenv install brain-brew 8 | ``` 9 | 10 | ### Run Local Version 11 | 12 | Fork/Clone this repo onto your computer, then in a different repository you wish to run Brain Brew you can point it to this version in a 2 ways: 13 | 14 | #### Install development folder for live updates 15 | 16 | Point your installation to this folder. Run the following (change the path to match yours): 17 | 18 | ```shell 19 | pipenv install -e ../brain-brew 20 | ``` 21 | 22 | This should result in your Pipfile updating to: 23 | 24 | ``` 25 | [packages] 26 | brain-brew = {file = "../brain-brew", editable = true} 27 | ``` 28 | 29 | #### Install a locally built package 30 | 31 | Build Brain Brew using the `scripts/build.bash` script. This will generate dist and build folders. Install the generated wheel by running: 32 | 33 | ``` 34 | pip install ../brain-brew/dist/Brain_Brew-0.3.11-py3-none-any.whl 35 | ``` 36 | 37 | This should result in your Pipfile updating to: 38 | 39 | ``` 40 | [packages] 41 | brain-brew = {file = "../brain-brew/dist/Brain_Brew-0.3.11-py3-none-any.whl"} 42 | ``` 43 | 44 | Change to match the wheel version number, which is set in `brain_brew/front_matter.py` if you wish to change it. 45 | 46 | 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include brain_brew/schemas/recipe.yaml 2 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "Brain Brew" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | pytest = "==5.4.1" 8 | twine = "*" 9 | coverage = "==4.5.4" 10 | typing-extensions = "==3.10.0.0" 11 | 12 | [packages] 13 | "ruamel.yaml" = "==0.16.10" 14 | yamale = "==3.0.8" 15 | 16 | [requires] 17 | python_version = "3.7" 18 | 19 | [scripts] 20 | build_yamale = "python scripts/yamale_build.py" 21 | check_for_changes = ''' 22 | git diff --quiet -- || (echo "::error file=yamale,line=0,col=0::You need to run `python scripts/yamale_build.py`" && exit 1) 23 | ''' 24 | unit_tests = "py.test" 25 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # Todos 2 | 3 | - [ ] Better error messages 4 | - No stack trace, unless they add -v 5 | - [ ] Save note model to yaml 6 | - Respect the current positions of the child files 7 | - Be able to do it for multiple note models at a time, while checking their shared components are the same 8 | - [ ] Save headers build task 9 | - Remove the save_to_file 10 | -------------------------------------------------------------------------------- /brain_brew/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/__init__.py -------------------------------------------------------------------------------- /brain_brew/build_tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/build_tasks/__init__.py -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/build_tasks/crowd_anki/__init__.py -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/crowd_anki_generate.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Union, Optional, List, Set 3 | 4 | from brain_brew.build_tasks.crowd_anki.headers_to_crowd_anki import HeadersToCrowdAnki 5 | from brain_brew.build_tasks.crowd_anki.media_group_to_crowd_anki import MediaGroupToCrowdAnki 6 | from brain_brew.build_tasks.crowd_anki.note_models_to_crowd_anki import NoteModelsToCrowdAnki 7 | from brain_brew.build_tasks.crowd_anki.notes_to_crowd_anki import NotesToCrowdAnki 8 | from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask 9 | from brain_brew.configuration.representation_base import RepresentationBase 10 | from brain_brew.representation.generic.media_file import MediaFile 11 | from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 12 | from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper 13 | 14 | 15 | @dataclass 16 | class CrowdAnkiGenerate(TopLevelBuildTask): 17 | @classmethod 18 | def task_name(cls) -> str: 19 | return r'generate_crowd_anki' 20 | 21 | @classmethod 22 | def yamale_schema(cls) -> str: 23 | return f'''\ 24 | folder: str() 25 | headers: str() 26 | notes: include('{NotesToCrowdAnki.task_name()}') 27 | note_models: include('{NoteModelsToCrowdAnki.task_name()}') 28 | media: include('{MediaGroupToCrowdAnki.task_name()}', required=False) 29 | ''' 30 | 31 | @classmethod 32 | def yamale_dependencies(cls) -> set: 33 | return {NotesToCrowdAnki, NoteModelsToCrowdAnki, MediaGroupToCrowdAnki} 34 | 35 | @dataclass 36 | class Representation(RepresentationBase): 37 | folder: str 38 | notes: dict 39 | note_models: dict 40 | headers: dict 41 | media: Optional[dict] = field(default_factory=lambda: dict()) 42 | 43 | @classmethod 44 | def from_repr(cls, data: Union[Representation, dict]): 45 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 46 | return cls( 47 | rep=rep, 48 | crowd_anki_export=CrowdAnkiExport.create_or_get(rep.folder), 49 | notes_transform=NotesToCrowdAnki.from_repr(rep.notes), 50 | note_model_transform=NoteModelsToCrowdAnki.from_repr(rep.note_models), 51 | headers_transform=HeadersToCrowdAnki.from_repr(rep.headers), 52 | media_transform=MediaGroupToCrowdAnki.from_repr(rep.media) if rep.media else None 53 | ) 54 | 55 | rep: Representation 56 | crowd_anki_export: CrowdAnkiExport 57 | notes_transform: NotesToCrowdAnki 58 | note_model_transform: NoteModelsToCrowdAnki 59 | headers_transform: HeadersToCrowdAnki 60 | media_transform: Optional[MediaGroupToCrowdAnki] 61 | 62 | def execute(self): 63 | headers = self.headers_transform.execute() 64 | ca_wrapper = CrowdAnkiJsonWrapper(headers) 65 | 66 | note_models: List[dict] = self.note_model_transform.execute() 67 | 68 | nm_name_to_id: dict = {model.part_id: model.part.id for model in self.note_model_transform.note_models} 69 | notes = self.notes_transform.execute(nm_name_to_id) 70 | 71 | media_files: Set[MediaFile] = set() 72 | if self.media_transform: 73 | media_files = self.media_transform.execute(self.crowd_anki_export.media_loc) 74 | 75 | ca_wrapper.media_files = sorted([m.filename for m in media_files]) 76 | ca_wrapper.name = self.headers_transform.headers.name 77 | ca_wrapper.note_models = note_models 78 | ca_wrapper.notes = notes 79 | 80 | # Set to CrowdAnkiExport 81 | self.crowd_anki_export.write_to_files(ca_wrapper.data) 82 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/headers_from_crowdanki.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Union, Optional 3 | 4 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 8 | from brain_brew.representation.json.wrappers_for_crowd_anki import CA_NOTE_MODELS, CA_NOTES, CA_MEDIA_FILES, \ 9 | CA_CHILDREN, CA_TYPE 10 | from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper 11 | from brain_brew.representation.yaml.headers import Headers 12 | 13 | headers_skip_keys = [CA_NOTE_MODELS, CA_NOTES, CA_MEDIA_FILES] 14 | headers_default_values = { 15 | CA_TYPE: "Deck", 16 | CA_CHILDREN: [], 17 | } 18 | 19 | 20 | @dataclass 21 | class HeadersFromCrowdAnki(BuildPartTask): 22 | @classmethod 23 | def task_name(cls) -> str: 24 | return r'headers_from_crowd_anki' 25 | 26 | @classmethod 27 | def task_regex(cls) -> str: 28 | return r'headers?_from_crowd_anki' 29 | 30 | @classmethod 31 | def yamale_schema(cls) -> str: 32 | return f'''\ 33 | part_id: str() 34 | source: str() 35 | save_to_file: str(required=False) 36 | ''' 37 | 38 | @dataclass 39 | class Representation(RepresentationBase): 40 | part_id: str 41 | source: str 42 | save_to_file: Optional[str] = field(default=None) 43 | 44 | @classmethod 45 | def from_repr(cls, data: Union[Representation, dict]): 46 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 47 | return cls( 48 | rep=rep, 49 | ca_export=CrowdAnkiExport.create_or_get(rep.source), 50 | part_id=rep.part_id, 51 | save_to_file=rep.save_to_file 52 | ) 53 | 54 | rep: Representation 55 | part_id: str 56 | ca_export: CrowdAnkiExport 57 | save_to_file: Optional[str] 58 | 59 | def execute(self): 60 | ca_wrapper: CrowdAnkiJsonWrapper = self.ca_export.json_data 61 | 62 | headers = Headers(self.crowd_anki_to_headers(ca_wrapper.data)) 63 | 64 | return PartHolder.override_or_create(self.part_id, self.save_to_file, headers) 65 | 66 | @staticmethod 67 | def crowd_anki_to_headers(ca_data: dict): 68 | return {key: value for key, value in ca_data.items() 69 | if key not in headers_skip_keys and key not in headers_default_values.keys()} 70 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/headers_to_crowd_anki.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Union 3 | 4 | from brain_brew.build_tasks.crowd_anki.headers_from_crowdanki import headers_default_values 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.yaml.headers import Headers 8 | 9 | 10 | @dataclass 11 | class HeadersToCrowdAnki: 12 | @dataclass 13 | class Representation(RepresentationBase): 14 | part_id: str 15 | 16 | @classmethod 17 | def from_repr(cls, data: Union[Representation, dict, str]): 18 | rep: cls.Representation 19 | if isinstance(data, cls.Representation): 20 | rep = data 21 | elif isinstance(data, dict): 22 | rep = cls.Representation.from_dict(data) 23 | else: 24 | rep = cls.Representation(part_id=data) # Support single string being passed in 25 | 26 | return cls( 27 | rep=rep, 28 | headers=PartHolder.from_file_manager(rep.part_id).part 29 | ) 30 | 31 | rep: Representation 32 | headers: Headers 33 | 34 | def execute(self) -> dict: 35 | headers = self.headers_to_crowd_anki(self.headers.data_without_name) 36 | 37 | return headers 38 | 39 | @staticmethod 40 | def headers_to_crowd_anki(headers_data: dict): 41 | return {**headers_default_values, **headers_data} 42 | 43 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/media_group_from_crowd_anki.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Union 3 | 4 | from brain_brew.build_tasks.deck_parts.media_group_from_folder import MediaGroupFromFolder 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 7 | from brain_brew.representation.yaml.media_group import MediaGroup 8 | from brain_brew.transformers.create_media_group_from_location import create_media_group_from_location 9 | 10 | 11 | @dataclass 12 | class MediaGroupFromCrowdAnki(MediaGroupFromFolder): 13 | @classmethod 14 | def task_name(cls) -> str: 15 | return r"media_group_from_crowd_anki" 16 | 17 | @classmethod 18 | def from_repr(cls, data: Union[MediaGroupFromFolder.Representation, dict]): 19 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 20 | 21 | cae: CrowdAnkiExport = CrowdAnkiExport.create_or_get(rep.source) 22 | return cls( 23 | rep=rep, 24 | part=create_media_group_from_location( 25 | part_id=rep.part_id, 26 | save_to_file=rep.save_to_file, 27 | media_group=MediaGroup.from_directory(cae.media_loc, rep.recursive), 28 | groups_to_blacklist=list(holder.part for holder in 29 | map(PartHolder.from_file_manager, rep.filter_blacklist_from_parts)), 30 | groups_to_whitelist=list(holder.part for holder in 31 | map(PartHolder.from_file_manager, rep.filter_whitelist_from_parts)) 32 | ) 33 | ) 34 | 35 | rep: MediaGroupFromFolder.Representation 36 | part: MediaGroup 37 | 38 | def execute(self): 39 | pass 40 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/media_group_to_crowd_anki.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Union, List, Set 3 | 4 | from brain_brew.configuration.part_holder import PartHolder 5 | from brain_brew.configuration.representation_base import RepresentationBase 6 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 7 | from brain_brew.representation.generic.media_file import MediaFile 8 | from brain_brew.representation.yaml.media_group import MediaGroup 9 | from brain_brew.transformers.save_media_group_to_location import save_media_groups_to_location 10 | 11 | 12 | @dataclass 13 | class MediaGroupToCrowdAnki(YamlRepr): 14 | @classmethod 15 | def task_name(cls) -> str: 16 | return r'media_group_to_crowd_anki' 17 | 18 | @classmethod 19 | def yamale_schema(cls) -> str: 20 | return f'''\ 21 | parts: list(str()) 22 | ''' 23 | 24 | @dataclass 25 | class Representation(RepresentationBase): 26 | parts: List[str] 27 | 28 | @classmethod 29 | def from_repr(cls, data: Union[Representation, dict]): 30 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 31 | return cls( 32 | rep=rep, 33 | parts=list(holder.part for holder in map(PartHolder.from_file_manager, rep.parts)) 34 | ) 35 | 36 | rep: Representation 37 | parts: List[MediaGroup] 38 | 39 | def execute(self, ca_media_folder: str) -> Set[MediaFile]: 40 | return save_media_groups_to_location(self.parts, ca_media_folder, True, False) 41 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/note_model_single_from_crowd_anki.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Optional, Union 3 | 4 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 8 | from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper 9 | from brain_brew.representation.yaml.note_model import NoteModel 10 | 11 | 12 | @dataclass 13 | class NoteModelSingleFromCrowdAnki(BuildPartTask): 14 | @classmethod 15 | def task_name(cls) -> str: 16 | return r'note_model_from_crowd_anki' 17 | 18 | @classmethod 19 | def yamale_schema(cls) -> str: 20 | return f'''\ 21 | part_id: str() 22 | source: str() 23 | model_name: str(required=False) 24 | save_to_file: str(required=False) 25 | ''' 26 | 27 | @dataclass 28 | class Representation(RepresentationBase): 29 | part_id: str 30 | source: str 31 | model_name: Optional[str] = field(default=None) 32 | save_to_file: Optional[str] = field(default=None) 33 | # TODO: fields: Optional[List[str]] 34 | # TODO: templates: Optional[List[str]] 35 | 36 | @classmethod 37 | def from_repr(cls, data: Union[Representation, dict]): 38 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 39 | return cls( 40 | rep=rep, 41 | ca_export=CrowdAnkiExport.create_or_get(rep.source), 42 | part_id=rep.part_id, 43 | model_name=rep.model_name or rep.part_id, 44 | save_to_file=rep.save_to_file 45 | ) 46 | 47 | rep: Representation 48 | part_id: str 49 | ca_export: CrowdAnkiExport 50 | model_name: str 51 | save_to_file: Optional[str] 52 | 53 | def execute(self): 54 | ca_wrapper: CrowdAnkiJsonWrapper = self.ca_export.json_data 55 | 56 | note_models_dict = {model.get('name'): model for model in ca_wrapper.note_models} 57 | 58 | if self.model_name not in note_models_dict: 59 | raise ReferenceError(f"Missing Note Model '{self.model_name}' in CrowdAnki file") 60 | 61 | part = NoteModel.from_crowdanki(note_models_dict[self.model_name]) 62 | return PartHolder.override_or_create(self.part_id, self.save_to_file, part) 63 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/note_models_all_from_crowd_anki.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dataclasses import dataclass, field 3 | from typing import Optional, Union, List 4 | 5 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 6 | from brain_brew.configuration.part_holder import PartHolder 7 | from brain_brew.configuration.representation_base import RepresentationBase 8 | from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 9 | from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper 10 | from brain_brew.representation.yaml.note_model import NoteModel 11 | 12 | 13 | @dataclass 14 | class NoteModelsAllFromCrowdAnki(BuildPartTask): 15 | @classmethod 16 | def task_name(cls) -> str: 17 | return r'note_models_all_from_crowd_anki' 18 | 19 | @classmethod 20 | def yamale_schema(cls) -> str: 21 | return f'''\ 22 | source: str() 23 | ''' 24 | 25 | @dataclass 26 | class Representation(RepresentationBase): 27 | source: str 28 | 29 | @classmethod 30 | def from_repr(cls, data: Union[Representation, dict]): 31 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 32 | return cls( 33 | rep=rep, 34 | ca_export=CrowdAnkiExport.create_or_get(rep.source) 35 | ) 36 | 37 | rep: Representation 38 | ca_export: CrowdAnkiExport 39 | 40 | def execute(self) -> List[PartHolder[NoteModel]]: 41 | ca_wrapper: CrowdAnkiJsonWrapper = self.ca_export.json_data 42 | 43 | note_models_dict = {model.get('name'): model for model in ca_wrapper.note_models} 44 | 45 | parts = [] 46 | for name, model in note_models_dict.items(): 47 | parts.append(PartHolder.override_or_create(name, None, NoteModel.from_crowdanki(model))) 48 | 49 | logging.info(f"Found {len(parts)} note model{'s' if len(parts) > 1 else ''} in CrowdAnki Export: '" 50 | + "', '".join(note_models_dict.keys()) + "'") 51 | 52 | return parts 53 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/note_models_to_crowd_anki.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Union, List 3 | 4 | from brain_brew.configuration.part_holder import PartHolder 5 | from brain_brew.configuration.representation_base import RepresentationBase 6 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 7 | from brain_brew.representation.yaml.note_model import NoteModel 8 | 9 | 10 | @dataclass 11 | class NoteModelsToCrowdAnki(YamlRepr): 12 | @classmethod 13 | def task_name(cls) -> str: 14 | return r'note_models_to_crowd_anki' 15 | 16 | @classmethod 17 | def yamale_schema(cls) -> str: 18 | return f'''\ 19 | parts: list(include('{cls.NoteModelListItem.task_name()}')) 20 | ''' 21 | 22 | @classmethod 23 | def yamale_dependencies(cls) -> set: 24 | return {cls.NoteModelListItem} 25 | 26 | @dataclass 27 | class NoteModelListItem(YamlRepr): 28 | @classmethod 29 | def task_name(cls) -> str: 30 | return r'note_models_to_crowd_anki_item' 31 | 32 | @classmethod 33 | def yamale_schema(cls) -> str: 34 | return f'''\ 35 | part_id: str() 36 | ''' 37 | 38 | @dataclass 39 | class Representation(RepresentationBase): 40 | part_id: str 41 | # TODO: fields: Optional[List[str]] 42 | # TODO: templates: Optional[List[str]] 43 | 44 | @classmethod 45 | def from_repr(cls, data: Union[Representation, dict, str]): 46 | rep: cls.Representation 47 | if isinstance(data, cls.Representation): 48 | rep = data 49 | elif isinstance(data, dict): 50 | rep = cls.Representation.from_dict(data) 51 | else: 52 | rep = cls.Representation(part_id=data) # Support string 53 | 54 | return cls( 55 | rep=rep, 56 | part_to_read=rep.part_id 57 | ) 58 | 59 | def get_note_model(self) -> PartHolder[NoteModel]: 60 | self.part = PartHolder.from_file_manager(self.part_to_read) 61 | return self.part # Todo: add filters in here 62 | 63 | rep: Representation 64 | part: PartHolder[NoteModel] = field(init=False) 65 | part_to_read: str 66 | 67 | @dataclass 68 | class Representation(RepresentationBase): 69 | parts: List[Union[dict, str]] 70 | 71 | @classmethod 72 | def from_repr(cls, data: Union[Representation, dict, List[str]]): 73 | rep: cls.Representation 74 | if isinstance(data, cls.Representation): 75 | rep = data 76 | elif isinstance(data, dict): 77 | rep = cls.Representation.from_dict(data) 78 | else: 79 | rep = cls.Representation(parts=data) # Support list of Note Models 80 | 81 | note_model_items = list(map(cls.NoteModelListItem.from_repr, rep.parts)) 82 | return cls( 83 | rep=rep, 84 | note_models=[nm.get_note_model() for nm in note_model_items] 85 | ) 86 | 87 | rep: Representation 88 | note_models: List[PartHolder[NoteModel]] 89 | 90 | def execute(self) -> List[dict]: 91 | return [model.part.encode_as_crowdanki() for model in self.note_models] 92 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/notes_from_crowd_anki.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dataclasses import dataclass, field 3 | from typing import Union, Optional, List 4 | 5 | from brain_brew.build_tasks.crowd_anki.shared_base_notes import SharedBaseNotes 6 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 7 | from brain_brew.configuration.part_holder import PartHolder 8 | from brain_brew.configuration.representation_base import RepresentationBase 9 | from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 10 | from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper, CrowdAnkiNoteWrapper 11 | from brain_brew.representation.yaml.notes import Notes, Note 12 | 13 | 14 | @dataclass 15 | class NotesFromCrowdAnki(SharedBaseNotes, BuildPartTask): 16 | @classmethod 17 | def task_name(cls) -> str: 18 | return r'notes_from_crowd_anki' 19 | 20 | @classmethod 21 | def yamale_schema(cls) -> str: 22 | return f'''\ 23 | part_id: str() 24 | source: str() 25 | sort_order: list(str(), required=False) 26 | save_to_file: str(required=False) 27 | reverse_sort: str(required=False) 28 | ''' 29 | 30 | @dataclass 31 | class Representation(RepresentationBase): 32 | part_id: str 33 | source: str 34 | sort_order: Optional[List[str]] = field(default_factory=lambda: None) 35 | reverse_sort: Optional[bool] = field(default_factory=lambda: None) 36 | save_to_file: Optional[str] = field(default=None) 37 | 38 | @classmethod 39 | def from_repr(cls, data: Union[Representation, dict]): 40 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 41 | return cls( 42 | rep=rep, 43 | ca_export=CrowdAnkiExport.create_or_get(rep.source), 44 | part_id=rep.part_id, 45 | sort_order=SharedBaseNotes._get_sort_order(rep.sort_order), 46 | reverse_sort=SharedBaseNotes._get_reverse_sort(rep.reverse_sort), 47 | save_to_file=rep.save_to_file 48 | ) 49 | 50 | rep: Representation 51 | part_id: str 52 | ca_export: CrowdAnkiExport 53 | sort_order: Optional[List[str]] 54 | reverse_sort: Optional[bool] 55 | save_to_file: Optional[str] 56 | 57 | def execute(self) -> PartHolder[Notes]: 58 | ca_wrapper: CrowdAnkiJsonWrapper = self.ca_export.json_data 59 | if ca_wrapper.children: 60 | logging.warning("Child Decks / Sub-decks are not currently supported.") 61 | 62 | ca_models = self.ca_export.note_models 63 | ca_notes = ca_wrapper.notes 64 | 65 | nm_id_to_name: dict = {model.id: model.name for model in ca_models} 66 | note_list = [self.ca_note_to_note(note, nm_id_to_name) for note in ca_notes] 67 | 68 | notes = Notes.from_list_of_notes(note_list) # TODO: pass in sort method 69 | return PartHolder.override_or_create(self.part_id, self.save_to_file, notes) 70 | 71 | @staticmethod 72 | def ca_note_to_note(note: dict, nm_id_to_name: dict) -> Note: 73 | wrapper = CrowdAnkiNoteWrapper(note) 74 | 75 | return Note( 76 | note_model=nm_id_to_name[wrapper.note_model], 77 | tags=wrapper.tags, 78 | guid=wrapper.guid, 79 | fields=wrapper.fields, 80 | flags=wrapper.flags 81 | ) 82 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/notes_to_crowd_anki.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Optional, Union, List 3 | 4 | from brain_brew.build_tasks.crowd_anki.shared_base_notes import SharedBaseNotes 5 | from brain_brew.build_tasks.overrides.notes_override import NotesOverride 6 | from brain_brew.configuration.part_holder import PartHolder 7 | from brain_brew.configuration.representation_base import RepresentationBase 8 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 9 | from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiNoteWrapper 10 | from brain_brew.representation.yaml.notes import Notes, Note 11 | from brain_brew.utils import blank_str_if_none 12 | 13 | 14 | @dataclass 15 | class NotesToCrowdAnki(YamlRepr, SharedBaseNotes): 16 | @classmethod 17 | def task_name(cls) -> str: 18 | return r'notes_to_crowd_anki' 19 | 20 | @classmethod 21 | def yamale_schema(cls) -> str: 22 | return f'''\ 23 | part_id: str() 24 | sort_order: list(str(), required=False) 25 | reverse_sort: bool(required=False) 26 | additional_items_to_add: map(str(), key=str(), required=False) 27 | override: include('{NotesOverride.task_name()}', required=False) 28 | case_insensitive_sort: bool(required=False) 29 | ''' 30 | 31 | @classmethod 32 | def yamale_dependencies(cls) -> set: 33 | return {NotesOverride} 34 | 35 | @dataclass 36 | class Representation(RepresentationBase): 37 | part_id: str 38 | additional_items_to_add: Optional[dict] = field(default_factory=lambda: None) 39 | sort_order: Optional[List[str]] = field(default_factory=lambda: None) 40 | reverse_sort: Optional[bool] = field(default_factory=lambda: None) 41 | override: Optional[dict] = field(default_factory=lambda: None) 42 | case_insensitive_sort: Optional[bool] = field(default_factory=lambda: None) 43 | 44 | @classmethod 45 | def from_repr(cls, data: Union[Representation, dict]): 46 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 47 | return cls( 48 | rep=rep, 49 | notes=PartHolder.from_file_manager(rep.part_id).part, 50 | sort_order=SharedBaseNotes._get_sort_order(rep.sort_order), 51 | reverse_sort=SharedBaseNotes._get_reverse_sort(rep.reverse_sort), 52 | additional_items_to_add=rep.additional_items_to_add or {}, 53 | override=NotesOverride.from_repr(rep.override) if rep.override else None, 54 | case_insensitive_sort=rep.case_insensitive_sort or True 55 | ) 56 | 57 | rep: Representation 58 | notes: Notes 59 | additional_items_to_add: dict 60 | sort_order: Optional[List[str]] = field(default_factory=lambda: None) 61 | reverse_sort: Optional[bool] = field(default_factory=lambda: None) 62 | override: Optional[NotesOverride] = field(default_factory=lambda: None) 63 | case_insensitive_sort: bool = field(default=True) 64 | 65 | def execute(self, nm_name_to_id: dict) -> List[dict]: 66 | 67 | notes = self.notes.get_sorted_notes_copy( 68 | sort_by_keys=self.sort_order, 69 | reverse_sort=self.reverse_sort, 70 | case_insensitive_sort=self.case_insensitive_sort 71 | ) 72 | 73 | if self.override: 74 | notes = [self.override.override(note) for note in notes] 75 | 76 | note_dicts = [self.note_to_ca_note(note, nm_name_to_id, self.additional_items_to_add) for note in notes] 77 | 78 | return note_dicts 79 | 80 | @staticmethod 81 | def note_to_ca_note(note: Note, nm_name_to_id: dict, additional_items_to_add: dict) -> dict: 82 | wrapper = CrowdAnkiNoteWrapper({ 83 | "__type__": "Note", 84 | "data": "" 85 | }) 86 | 87 | for key, value in additional_items_to_add.items(): 88 | wrapper.data[key] = blank_str_if_none(value) 89 | 90 | wrapper.fields = note.fields 91 | wrapper.flags = note.flags 92 | wrapper.guid = note.guid 93 | wrapper.note_model = nm_name_to_id[note.note_model] 94 | wrapper.tags = note.tags 95 | 96 | return wrapper.data 97 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/crowd_anki/shared_base_notes.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional, Union, List 3 | 4 | 5 | @dataclass 6 | class SharedBaseNotes: 7 | @staticmethod 8 | def _get_sort_order(sort_order: Optional[Union[str, List[str]]]): 9 | if isinstance(sort_order, list): 10 | return sort_order 11 | elif isinstance(sort_order, str): 12 | return [sort_order] 13 | return [] 14 | 15 | @staticmethod 16 | def _get_reverse_sort(reverse_sort: Optional[bool]): 17 | return reverse_sort or False 18 | 19 | # sort_order: Optional[List[str]] 20 | # reverse_sort: Optional[bool] 21 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/csvs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/build_tasks/csvs/__init__.py -------------------------------------------------------------------------------- /brain_brew/build_tasks/csvs/csvs_generate.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | import logging 3 | from typing import List, Dict, Union 4 | 5 | from brain_brew.build_tasks.csvs.shared_base_csvs import SharedBaseCsvs 6 | from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask 7 | from brain_brew.configuration.part_holder import PartHolder 8 | from brain_brew.representation.yaml.notes import Notes, Note 9 | from brain_brew.transformers.file_mapping import FileMapping 10 | from brain_brew.transformers.note_model_mapping import NoteModelMapping 11 | from brain_brew.utils import join_tags 12 | 13 | 14 | @dataclass 15 | class CsvsGenerate(SharedBaseCsvs, TopLevelBuildTask): 16 | @classmethod 17 | def task_name(cls) -> str: 18 | return r'generate_csvs' 19 | 20 | @classmethod 21 | def task_regex(cls) -> str: 22 | return r'generate_csvs?' 23 | 24 | @classmethod 25 | def yamale_schema(cls) -> str: # TODO: Use NotesOverride here, just as in NotesToCrowdAnki 26 | return f'''\ 27 | notes: str() 28 | note_model_mappings: list(include('{NoteModelMapping.task_name()}')) 29 | file_mappings: list(include('{FileMapping.task_name()}')) 30 | ''' 31 | 32 | @classmethod 33 | def yamale_dependencies(cls) -> set: 34 | return {NoteModelMapping, FileMapping} 35 | 36 | @dataclass 37 | class Representation(SharedBaseCsvs.Representation): 38 | notes: str 39 | 40 | def encode(self): 41 | return { 42 | "notes": self.notes, 43 | "file_mappings": [fm.encode() for fm in self.file_mappings], 44 | "note_model_mappings": [nmm.encode() for nmm in self.note_model_mappings] 45 | } 46 | 47 | @classmethod 48 | def from_repr(cls, data: Union[Representation, dict]): 49 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 50 | return cls( 51 | rep=rep, 52 | notes=PartHolder.from_file_manager(rep.notes), 53 | file_mappings=rep.get_file_mappings(), 54 | note_model_mappings={k: v for nm in rep.note_model_mappings for k, v in cls.map_nmm(nm).items()} 55 | ) 56 | 57 | rep: Representation 58 | notes: PartHolder[Notes] # TODO: Accept Multiple Note Parts 59 | 60 | def execute(self): 61 | self.verify_contents() 62 | 63 | notes: List[Note] = self.notes.part.get_sorted_notes_copy( 64 | sort_by_keys=[], 65 | reverse_sort=False, 66 | case_insensitive_sort=True 67 | ) 68 | self.verify_notes_match_note_model_mappings(notes) 69 | 70 | if not self.file_mappings[0].csv_file.column_headers: 71 | logging.warning("Empty top level csv found. Populating headers automatically.") 72 | model_name = self.file_mappings[0].note_model 73 | self.file_mappings[0].csv_file.set_data_from_superset({}, column_header_override=list(f.value for f in self.note_model_mappings[model_name].columns_manually_mapped)) 74 | 75 | for fm in self.file_mappings: 76 | csv_data: List[dict] = [self.note_to_csv_row(note, self.note_model_mappings) for note in notes 77 | if note.note_model in fm.get_used_note_model_names()] 78 | rows_by_guid = {row["guid"]: row for row in csv_data} 79 | 80 | fm.compile_data() 81 | fm.set_relevant_data(rows_by_guid) 82 | fm.write_file_on_close() 83 | 84 | def verify_notes_match_note_model_mappings(self, notes: List[Note]): 85 | note_models_used = {note.note_model for note in notes} 86 | errors = [TypeError(f"Unknown note model type '{model}' in deck part '{self.notes.part_id}'. " 87 | f"Add mapping for that model.") 88 | for model in note_models_used if model not in self.note_model_mappings.keys()] 89 | 90 | if errors: 91 | raise Exception(errors) 92 | 93 | @staticmethod 94 | def note_to_csv_row(note: Note, note_model_mappings: Dict[str, NoteModelMapping]) -> dict: 95 | nm_name = note.note_model 96 | row = note_model_mappings[nm_name].note_models[nm_name].part.zip_field_to_data(note.fields) 97 | row["guid"] = note.guid 98 | row["tags"] = join_tags(note.tags) 99 | # TODO: Flags? 100 | 101 | return note_model_mappings[nm_name].note_fields_map_to_csv_row(row) 102 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/csvs/generate_guids_in_csvs.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dataclasses import dataclass, field 3 | from typing import List, Union, Optional 4 | 5 | from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.generic.csv_file import CsvFile 8 | from brain_brew.utils import single_item_to_list, generate_anki_guid 9 | 10 | 11 | @dataclass 12 | class GenerateGuidsInCsvs(TopLevelBuildTask): 13 | execute_immediately = True 14 | 15 | @classmethod 16 | def task_name(cls) -> str: 17 | return r'generate_guids_in_csvs' 18 | 19 | @classmethod 20 | def task_regex(cls) -> str: 21 | return r'generate_guids_in_csvs?' 22 | 23 | @classmethod 24 | def yamale_schema(cls) -> str: 25 | return f'''\ 26 | source: any(str(), list(str())) 27 | columns: any(str(), list(str())) 28 | delimiter: str(required=False) 29 | ''' 30 | 31 | @dataclass 32 | class Representation(RepresentationBase): 33 | source: Union[str, List[str]] 34 | columns: Union[str, List[str]] 35 | delimiter: Optional[str] = field(default=None) 36 | 37 | def encode_filter(self, key, value): 38 | if not super().encode_filter(key, value): 39 | return False 40 | if key == 'delimiter' and all(CsvFile.delimiter_matches_file_type(value, f) for f in single_item_to_list(self.source)): 41 | return False 42 | return True 43 | 44 | @classmethod 45 | def from_repr(cls, data: Union[Representation, dict]): 46 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 47 | csv_files = [CsvFile.create_or_get(csv) for csv in single_item_to_list(rep.source)] 48 | for c in csv_files: 49 | c.set_delimiter(rep.delimiter) 50 | c.read_file() 51 | return cls( 52 | rep=rep, 53 | sources=csv_files, 54 | columns=rep.columns 55 | ) 56 | 57 | rep: Representation 58 | sources: List[CsvFile] 59 | columns: List[str] 60 | 61 | def execute(self): 62 | logging.info("Attempting to generate Guids") 63 | 64 | errors = [] 65 | 66 | # Make sure the columns exist on all 67 | for source in self.sources: 68 | missing = [c for c in self.columns if c not in source.column_headers] 69 | if any(missing): 70 | errors.append(f"Csv '{source.file_location}' does not contain all the specified columns: {missing}") 71 | 72 | if errors: 73 | raise KeyError(errors) 74 | 75 | for source in self.sources: 76 | guids_generated = 0 77 | data = source.get_data() 78 | for row in data: 79 | for column_name in row.keys(): 80 | if column_name in self.columns and not row[column_name]: 81 | row[column_name] = generate_anki_guid() 82 | guids_generated += 1 83 | if guids_generated > 0: 84 | logging.info(f"Generated {guids_generated} guids in csv '{source.file_location}'") 85 | source.set_data(data) 86 | source.write_file() 87 | 88 | logging.info("Generate guids complete") 89 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/csvs/notes_from_csvs.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Dict, List, Union, Optional 3 | 4 | from brain_brew.build_tasks.csvs.shared_base_csvs import SharedBaseCsvs 5 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 6 | from brain_brew.configuration.part_holder import PartHolder 7 | from brain_brew.representation.yaml.notes import Note, Notes 8 | from brain_brew.transformers.file_mapping import FileMapping 9 | from brain_brew.transformers.note_model_mapping import NoteModelMapping 10 | from brain_brew.utils import split_tags 11 | 12 | 13 | @dataclass 14 | class NotesFromCsvs(SharedBaseCsvs, BuildPartTask): 15 | @classmethod 16 | def task_name(cls) -> str: 17 | return r'notes_from_csvs' 18 | 19 | @classmethod 20 | def task_regex(cls) -> str: 21 | return r'notes_from_csvs?' 22 | 23 | @classmethod 24 | def yamale_schema(cls) -> str: 25 | return f'''\ 26 | part_id: str() 27 | save_to_file: str(required=False) 28 | note_model_mappings: list(include('{NoteModelMapping.task_name()}')) 29 | file_mappings: list(include('{FileMapping.task_name()}')) 30 | ''' 31 | 32 | @classmethod 33 | def yamale_dependencies(cls) -> set: 34 | return {NoteModelMapping, FileMapping} 35 | 36 | @dataclass 37 | class Representation(SharedBaseCsvs.Representation): 38 | part_id: str 39 | save_to_file: Optional[str] = field(default=None) 40 | 41 | def encode(self): 42 | return { 43 | "part_id": self.part_id, 44 | "save_to_file": self.save_to_file, 45 | "file_mappings": [fm.encode() for fm in self.file_mappings], 46 | "note_model_mappings": [nmm.encode() for nmm in self.note_model_mappings] 47 | } 48 | 49 | @classmethod 50 | def from_repr(cls, data: Union[Representation, dict]): 51 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 52 | return cls( 53 | rep=rep, 54 | part_id=rep.part_id, 55 | save_to_file=rep.save_to_file, 56 | file_mappings=rep.get_file_mappings(), 57 | note_model_mappings={k: v for nm in rep.note_model_mappings for k, v in cls.map_nmm(nm).items()} 58 | ) 59 | 60 | rep: Representation 61 | part_id: str 62 | save_to_file: Optional[str] 63 | 64 | def execute(self): 65 | self.verify_contents() 66 | 67 | csv_data_by_guid: Dict[str, dict] = {} 68 | for csv_map in self.file_mappings: 69 | csv_map.compile_data() 70 | csv_data_by_guid = {**csv_data_by_guid, **csv_map.compiled_data} 71 | csv_map.write_file_on_close() 72 | csv_rows: List[dict] = list(csv_data_by_guid.values()) 73 | 74 | notes_part: List[Note] = [self.csv_row_to_note(row, self.note_model_mappings) for row in csv_rows] 75 | 76 | notes = Notes.from_list_of_notes(notes_part) 77 | PartHolder.override_or_create(self.part_id, self.save_to_file, notes) 78 | 79 | @staticmethod 80 | def csv_row_to_note(row: dict, note_model_mappings: Dict[str, NoteModelMapping]) -> Note: 81 | note_model_name = row["note_model"] # TODO: Use object 82 | row_nm: NoteModelMapping = note_model_mappings[note_model_name] 83 | 84 | filtered_fields = row_nm.csv_row_map_to_note_fields(row) 85 | 86 | guid = filtered_fields.pop("guid") 87 | tags = split_tags(filtered_fields.pop("tags")) 88 | flags = filtered_fields.pop("flags") if "flags" in filtered_fields else 0 89 | 90 | fields = row_nm.field_values_in_note_model_order(note_model_name, filtered_fields) 91 | 92 | return Note(guid=guid, tags=tags, note_model=note_model_name, fields=fields, flags=flags) 93 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/csvs/shared_base_csvs.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dataclasses import dataclass 3 | from typing import List, Dict 4 | 5 | from brain_brew.configuration.representation_base import RepresentationBase 6 | from brain_brew.transformers.file_mapping import FileMapping 7 | from brain_brew.transformers.note_model_mapping import NoteModelMapping 8 | 9 | 10 | @dataclass 11 | class SharedBaseCsvs: 12 | @dataclass(init=False) 13 | class Representation(RepresentationBase): 14 | file_mappings: List[FileMapping.Representation] 15 | note_model_mappings: List[NoteModelMapping.Representation] 16 | 17 | def __init__(self, file_mappings, note_model_mappings): 18 | self.file_mappings = list(map(FileMapping.Representation.from_dict, file_mappings)) 19 | self.note_model_mappings = list(map(NoteModelMapping.Representation.from_dict, note_model_mappings)) 20 | 21 | def get_file_mappings(self) -> List[FileMapping]: 22 | return list(map(FileMapping.from_repr, self.file_mappings)) 23 | 24 | file_mappings: List[FileMapping] 25 | note_model_mappings: Dict[str, NoteModelMapping] 26 | 27 | @classmethod 28 | def map_nmm(cls, nmm_to_map): 29 | nmm = NoteModelMapping.from_repr(nmm_to_map) 30 | return nmm.get_note_model_mapping_dict() 31 | 32 | def verify_contents(self): 33 | errors = [] 34 | 35 | for nm in self.note_model_mappings.values(): 36 | try: 37 | nm.verify_contents() 38 | except KeyError as e: 39 | errors.append(e) 40 | 41 | # Check all referenced note models have a mapping 42 | for csv_map in self.file_mappings: 43 | for nm in csv_map.get_used_note_model_names(): 44 | if nm not in self.note_model_mappings.keys(): 45 | errors.append(f"Missing Note Model Map for {nm}") 46 | 47 | # Check each of the Csvs (or their derivatives) contain all the necessary columns for their stated note model 48 | for cfm in self.file_mappings: 49 | note_model_names = cfm.get_used_note_model_names() 50 | available_columns = cfm.get_available_columns() 51 | 52 | referenced_note_models_maps = [value for key, value in self.note_model_mappings.items() 53 | if key in note_model_names] 54 | for nm_map in referenced_note_models_maps: 55 | for holder in nm_map.note_models.values(): 56 | if holder.part.name in note_model_names: 57 | missing_columns = [col for col in holder.part.field_names_lowercase if 58 | col not in nm_map.csv_headers_map_to_note_fields(available_columns)] 59 | if missing_columns: 60 | logging.warning(f"Csvs are missing columns from {holder.part_id} {missing_columns}") 61 | 62 | if errors: 63 | raise Exception(errors) 64 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/build_tasks/deck_parts/__init__.py -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/from_yaml_part.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta 2 | from dataclasses import dataclass 3 | from typing import Union 4 | 5 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 6 | from brain_brew.configuration.part_holder import PartHolder 7 | from brain_brew.configuration.representation_base import RepresentationBase 8 | from brain_brew.representation.yaml.media_group import MediaGroup 9 | from brain_brew.representation.yaml.notes import Notes 10 | from brain_brew.representation.yaml.yaml_object import YamlObject 11 | 12 | 13 | @dataclass 14 | class FromYamlPartBase(BuildPartTask, metaclass=ABCMeta): 15 | part_type = None 16 | 17 | @classmethod 18 | def yamale_schema(cls) -> str: 19 | return f'''\ 20 | part_id: str() 21 | file: str() 22 | ''' 23 | 24 | @dataclass 25 | class Representation(RepresentationBase): 26 | part_id: str 27 | file: str 28 | 29 | @classmethod 30 | def from_repr(cls, data: Union[Representation, dict]): 31 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 32 | 33 | return cls( 34 | rep=rep, 35 | part=PartHolder.override_or_create( 36 | part_id=rep.part_id, save_to_file=None, part=cls.part_type.from_yaml_file(rep.file)) 37 | ) 38 | 39 | def execute(self): 40 | pass 41 | 42 | rep: Representation 43 | part: YamlObject 44 | 45 | 46 | @dataclass 47 | class NotesFromYamlPart(FromYamlPartBase): 48 | @classmethod 49 | def task_name(cls) -> str: 50 | return r'notes_from_yaml_part' 51 | 52 | part_type = Notes 53 | 54 | 55 | @dataclass 56 | class MediaGroupFromYamlPart(FromYamlPartBase, BuildPartTask): 57 | @classmethod 58 | def task_name(cls) -> str: 59 | return r'media_group_from_yaml_part' 60 | 61 | part_type = MediaGroup 62 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/headers_from_yaml_part.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Union, Optional 3 | 4 | from brain_brew.build_tasks.overrides.headers_override import HeadersOverride 5 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 6 | from brain_brew.configuration.part_holder import PartHolder 7 | from brain_brew.configuration.representation_base import RepresentationBase 8 | from brain_brew.representation.yaml.headers import Headers 9 | 10 | 11 | @dataclass 12 | class HeadersFromYamlPart(BuildPartTask): 13 | @classmethod 14 | def yamale_schema(cls) -> str: 15 | return f'''\ 16 | part_id: str() 17 | file: str() 18 | override: include('{HeadersOverride.task_name()}', required=False) 19 | ''' 20 | 21 | @classmethod 22 | def yamale_dependencies(cls) -> set: 23 | return {HeadersOverride} 24 | 25 | @classmethod 26 | def task_name(cls) -> str: 27 | return r'headers_from_yaml_part' 28 | 29 | @classmethod 30 | def task_regex(cls) -> str: 31 | return r'headers?_from_yaml_part' 32 | 33 | @dataclass 34 | class Representation(RepresentationBase): 35 | part_id: str 36 | file: str 37 | override: Optional[dict] = field(default_factory=lambda: None) 38 | 39 | def encode(self): 40 | d = { 41 | "part_id": self.part_id, 42 | "file": self.file 43 | } 44 | if self.override: 45 | d.setdefault("override", self.override) 46 | return d 47 | 48 | @classmethod 49 | def from_repr(cls, data: Union[Representation, dict]): 50 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 51 | return cls( 52 | rep=rep, 53 | headers=PartHolder.override_or_create( 54 | part_id=rep.part_id, 55 | save_to_file=None, 56 | part=Headers.from_yaml_file(rep.file) 57 | ).part, 58 | override=HeadersOverride.from_repr(rep.override) if rep.override else None 59 | ) 60 | 61 | rep: Representation 62 | headers: Headers 63 | override: Optional[HeadersOverride] 64 | 65 | def execute(self): 66 | if self.override: 67 | self.headers = self.override.override(self.headers) 68 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/media_group_from_folder.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Optional, Union, List 3 | 4 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.yaml.media_group import MediaGroup 8 | from brain_brew.transformers.create_media_group_from_location import create_media_group_from_location 9 | 10 | 11 | @dataclass 12 | class MediaGroupFromFolder(BuildPartTask): 13 | @classmethod 14 | def task_name(cls) -> str: 15 | return r"media_group_from_folder" 16 | 17 | @classmethod 18 | def yamale_schema(cls) -> str: 19 | return f'''\ 20 | part_id: str() 21 | source: str() 22 | save_to_file: str(required=False) 23 | recursive: bool(required=False) 24 | filter_whitelist_from_parts: list(str(), required=False) 25 | filter_blacklist_from_parts: list(str(), required=False) 26 | ''' 27 | 28 | @dataclass 29 | class Representation(RepresentationBase): 30 | part_id: str 31 | source: str 32 | filter_blacklist_from_parts: List[str] = field(default_factory=list) 33 | filter_whitelist_from_parts: List[str] = field(default_factory=list) 34 | recursive: Optional[bool] = field(default=True) 35 | save_to_file: Optional[str] = field(default=None) 36 | 37 | @classmethod 38 | def from_repr(cls, data: Union[Representation, dict]): 39 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 40 | return cls( 41 | rep=rep, 42 | part=create_media_group_from_location( 43 | part_id=rep.part_id, 44 | save_to_file=rep.save_to_file, 45 | media_group=MediaGroup.from_directory(rep.source, rep.recursive), 46 | groups_to_blacklist=list(holder.part for holder in 47 | map(PartHolder.from_file_manager, rep.filter_blacklist_from_parts)), 48 | groups_to_whitelist=list(holder.part for holder in 49 | map(PartHolder.from_file_manager, rep.filter_whitelist_from_parts)) 50 | # match criteria 51 | ) 52 | ) 53 | 54 | rep: Representation 55 | part: MediaGroup 56 | 57 | def execute(self): 58 | pass 59 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/note_model_from_html_parts.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Optional, Union, List 3 | 4 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.generic.html_file import HTMLFile 8 | from brain_brew.representation.yaml.note_model import NoteModel 9 | from brain_brew.representation.yaml.note_model_field import Field 10 | from brain_brew.representation.yaml.note_model_template import Template 11 | 12 | 13 | @dataclass 14 | class NoteModelFromHTMLParts(BuildPartTask): 15 | @classmethod 16 | def task_name(cls) -> str: 17 | return r'note_model_from_html_parts' 18 | 19 | @classmethod 20 | def yamale_schema(cls) -> str: 21 | return f'''\ 22 | part_id: str() 23 | model_id: str() 24 | css_file: str() 25 | fields: list(include('{Field.task_name()}')) 26 | templates: list(str()) 27 | model_name: str(required=False) 28 | save_to_file: str(required=False) 29 | ''' 30 | 31 | @classmethod 32 | def yamale_dependencies(cls) -> set: 33 | return {Field} 34 | 35 | @dataclass 36 | class Representation(RepresentationBase): 37 | part_id: str 38 | model_id: str 39 | css_file: str 40 | fields: List[dict] 41 | templates: List[dict] 42 | model_name: Optional[str] = field(default=None) 43 | save_to_file: Optional[str] = field(default=None) 44 | 45 | @classmethod 46 | def from_repr(cls, data: Union[Representation, dict]): 47 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 48 | return cls( 49 | rep=rep, 50 | part_id=rep.part_id, 51 | model_id=rep.model_id, 52 | css=HTMLFile.create_or_get(rep.css_file).get_data(deep_copy=True), 53 | fields=list(map(Field.from_dict, rep.fields)), 54 | templates=list(holder.part for holder in map(PartHolder.from_file_manager, rep.templates)), 55 | model_name=rep.model_name or rep.part_id, 56 | save_to_file=rep.save_to_file 57 | ) 58 | 59 | rep: Representation 60 | part_id: str 61 | model_id: str 62 | css: str 63 | fields: List[Field] 64 | templates: List[Template] 65 | model_name: str 66 | save_to_file: Optional[str] 67 | 68 | def execute(self): 69 | part = NoteModel( 70 | name=self.model_name, 71 | id=self.model_id, 72 | css=self.css, 73 | fields=self.fields, 74 | templates=self.templates, 75 | required_fields_per_template=[] 76 | ) 77 | 78 | PartHolder.override_or_create(self.part_id, self.save_to_file, part) 79 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/note_model_from_yaml_part.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Union 3 | 4 | from brain_brew.build_tasks.deck_parts.from_yaml_part import FromYamlPartBase 5 | from brain_brew.commands.run_recipe.build_task import BuildPartTask 6 | from brain_brew.configuration.part_holder import PartHolder 7 | from brain_brew.configuration.representation_base import RepresentationBase 8 | from brain_brew.representation.yaml.note_model import NoteModel 9 | 10 | 11 | @dataclass 12 | class NoteModelsFromYamlPart(FromYamlPartBase, BuildPartTask): 13 | @classmethod 14 | def task_name(cls) -> str: 15 | return r'note_models_from_yaml_part' 16 | 17 | @classmethod 18 | def task_regex(cls) -> str: 19 | return r'note_models?_from_yaml_part' 20 | 21 | @classmethod 22 | def yamale_schema(cls) -> str: 23 | return f'''\ 24 | part_id: str() 25 | file: str() 26 | ''' 27 | 28 | @dataclass 29 | class Representation(RepresentationBase): 30 | part_id: str 31 | file: str 32 | # TODO: Overrides 33 | 34 | @classmethod 35 | def from_repr(cls, data: Union[Representation, dict]): 36 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 37 | 38 | return cls( 39 | rep=rep, 40 | part=PartHolder.override_or_create( 41 | part_id=rep.part_id, save_to_file=None, part=NoteModel.from_yaml_file(rep.file)) 42 | ) 43 | 44 | def execute(self): 45 | pass 46 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/save_media_group_to_folder.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import List, Union, Optional 3 | 4 | from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.yaml.media_group import MediaGroup 8 | from brain_brew.transformers.save_media_group_to_location import save_media_groups_to_location 9 | 10 | 11 | @dataclass 12 | class SaveMediaGroupsToFolder(TopLevelBuildTask): 13 | @classmethod 14 | def task_name(cls) -> str: 15 | return r'save_media_groups_to_folder' 16 | 17 | @classmethod 18 | def task_regex(cls) -> str: 19 | return r"save_media_groups?_to_folder" 20 | 21 | @classmethod 22 | def yamale_schema(cls) -> str: 23 | return f'''\ 24 | parts: list(str()) 25 | folder: str() 26 | clear_folder: bool(required=False) 27 | recursive: bool(required=False) 28 | ''' 29 | 30 | @dataclass 31 | class Representation(RepresentationBase): 32 | parts: List[str] 33 | folder: str 34 | clear_folder: Optional[bool] = field(default=None) 35 | recursive: Optional[bool] = field(default=None) 36 | 37 | @classmethod 38 | def from_repr(cls, data: Union[Representation, dict]): 39 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 40 | return cls( 41 | rep=rep, 42 | parts=list(holder.part for holder in map(PartHolder.from_file_manager, rep.parts)), 43 | folder=rep.folder, 44 | clear_folder=rep.clear_folder or False, 45 | recursive=rep.recursive or False 46 | ) 47 | 48 | rep: Representation 49 | parts: List[MediaGroup] 50 | folder: str 51 | clear_folder: bool 52 | recursive: bool 53 | 54 | def execute(self): 55 | save_media_groups_to_location(self.parts, self.folder, self.clear_folder, self.recursive) 56 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/deck_parts/save_note_models_to_folder.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import List, Union, Optional, Dict 3 | 4 | from brain_brew.commands.run_recipe.build_task import TopLevelBuildTask 5 | from brain_brew.configuration.part_holder import PartHolder 6 | from brain_brew.configuration.representation_base import RepresentationBase 7 | from brain_brew.representation.yaml.note_model import NoteModel 8 | from brain_brew.transformers.save_note_model_to_location import save_note_model_to_location 9 | 10 | 11 | @dataclass 12 | class SaveNoteModelsToFolder(TopLevelBuildTask): 13 | @classmethod 14 | def task_name(cls) -> str: 15 | return r'save_note_models_to_folder' 16 | 17 | @classmethod 18 | def task_regex(cls) -> str: 19 | return r"save_note_models?_to_folder" 20 | 21 | @classmethod 22 | def yamale_schema(cls) -> str: 23 | return f'''\ 24 | parts: list(str()) 25 | folder: str() 26 | clear_existing: bool(required=False) 27 | ''' 28 | 29 | @dataclass 30 | class Representation(RepresentationBase): 31 | parts: List[str] 32 | folder: str 33 | clear_existing: Optional[bool] = field(default=None) 34 | 35 | @classmethod 36 | def from_repr(cls, data: Union[Representation, dict]): 37 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 38 | return cls( 39 | rep=rep, 40 | parts=list(holder.part for holder in map(PartHolder.from_file_manager, rep.parts)), 41 | folder=rep.folder, 42 | clear_existing=rep.clear_existing or False, 43 | ) 44 | 45 | rep: Representation 46 | parts: List[NoteModel] 47 | folder: str 48 | clear_existing: bool 49 | 50 | def execute(self) -> Dict[str, str]: 51 | model_yaml_files: Dict[str, str] = {} 52 | for model in self.parts: 53 | model_yaml_files.setdefault( 54 | model.name, 55 | save_note_model_to_location(model, self.folder, self.clear_existing) 56 | ) 57 | return model_yaml_files 58 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/overrides/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/build_tasks/overrides/__init__.py -------------------------------------------------------------------------------- /brain_brew/build_tasks/overrides/headers_override.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Optional, Union 3 | 4 | from brain_brew.configuration.representation_base import RepresentationBase 5 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 6 | from brain_brew.representation.generic.html_file import HTMLFile 7 | from brain_brew.representation.yaml.headers import Headers 8 | 9 | 10 | @dataclass 11 | class HeadersOverride(YamlRepr): 12 | @classmethod 13 | def task_name(cls) -> str: 14 | return r"headers_override" 15 | 16 | @classmethod 17 | def yamale_schema(cls) -> str: 18 | return f'''\ 19 | crowdanki_uuid: str(required=False) 20 | deck_description_html_file: str(required=False) 21 | name: str(required=False) 22 | ''' 23 | 24 | @dataclass 25 | class Representation(RepresentationBase): 26 | crowdanki_uuid: Optional[str] = field(default=None) 27 | deck_description_html_file: Optional[str] = field(default=None) 28 | name: Optional[str] = field(default=None) 29 | 30 | @classmethod 31 | def from_repr(cls, data: Union[Representation, dict]): 32 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 33 | return cls( 34 | rep=rep, 35 | crowdanki_uuid=rep.crowdanki_uuid, 36 | deck_desc_html_file=HTMLFile.create_or_get(rep.deck_description_html_file), 37 | name=rep.name 38 | ) 39 | 40 | rep: Representation 41 | crowdanki_uuid: Optional[str] 42 | deck_desc_html_file: Optional[HTMLFile] 43 | name: Optional[str] 44 | 45 | def override(self, header: Headers): 46 | if self.deck_desc_html_file: 47 | header.description = self.deck_desc_html_file.get_data(deep_copy=True) 48 | 49 | if self.crowdanki_uuid: 50 | header.crowdanki_uuid = self.crowdanki_uuid 51 | 52 | if self.name: 53 | header.name = self.name 54 | 55 | return header 56 | -------------------------------------------------------------------------------- /brain_brew/build_tasks/overrides/notes_override.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Optional, Union 3 | 4 | from brain_brew.configuration.representation_base import RepresentationBase 5 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 6 | from brain_brew.representation.yaml.notes import Note 7 | 8 | 9 | @dataclass 10 | class NotesOverride(YamlRepr): 11 | @classmethod 12 | def task_name(cls) -> str: 13 | return r"notes_override" 14 | 15 | @classmethod 16 | def yamale_schema(cls) -> str: 17 | return f'''\ 18 | note_model: str(required=False) 19 | ''' 20 | 21 | @dataclass 22 | class Representation(RepresentationBase): 23 | note_model: Optional[str] 24 | 25 | @classmethod 26 | def from_repr(cls, data: Union[Representation, dict]): 27 | rep: cls.Representation = data if isinstance(data, cls.Representation) else cls.Representation.from_dict(data) 28 | return cls( 29 | rep=rep, 30 | note_model=rep.note_model 31 | ) 32 | 33 | rep: Representation 34 | note_model: Optional[str] 35 | 36 | def override(self, note: Note): 37 | if self.note_model: 38 | note.note_model = self.note_model 39 | 40 | return note 41 | -------------------------------------------------------------------------------- /brain_brew/commands/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/commands/__init__.py -------------------------------------------------------------------------------- /brain_brew/commands/argument_reader.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | import sys 4 | from argparse import ArgumentParser 5 | 6 | from brain_brew.front_matter import latest_version_number 7 | from brain_brew.commands.init_repo.init_repo import InitRepo 8 | from brain_brew.commands.run_recipe.run_recipe import RunRecipe 9 | from brain_brew.interfaces.command import Command 10 | 11 | 12 | class Commands(Enum): 13 | RUN_RECIPE = "run" 14 | INIT_REPO = "init" 15 | 16 | 17 | class BBArgumentReader(ArgumentParser): 18 | def __init__(self, test_mode=False): 19 | super().__init__( 20 | prog="brainbrew", 21 | description='Manage Flashcards by transforming them to various types.' 22 | ) 23 | 24 | self._set_parser_arguments() 25 | 26 | if not test_mode and len(sys.argv) == 1: 27 | self.print_help(sys.stderr) 28 | sys.exit(1) 29 | 30 | def _set_parser_arguments(self): 31 | 32 | subparsers = self.add_subparsers(parser_class=ArgumentParser, help='Commands that can be run', dest="command") 33 | 34 | parser_run = subparsers.add_parser( 35 | Commands.RUN_RECIPE.value, 36 | help="Run a recipe file. This will convert some data to another format, based on the instructions in the recipe file." 37 | ) 38 | parser_run.add_argument( 39 | "recipe", 40 | metavar="recipe", 41 | type=str, 42 | help="Yaml file to use as the recipe" 43 | ) 44 | parser_run.add_argument( 45 | "--verify", "-v", 46 | action="store_true", 47 | dest="verify_only", 48 | default=False, 49 | help="Only verify the recipe contents, without running it." 50 | ) 51 | 52 | parser_init = subparsers.add_parser( 53 | Commands.INIT_REPO.value, 54 | help="Initialise a Brain Brew repository, using a CrowdAnki export as the base data." 55 | ) 56 | parser_init.add_argument( 57 | "crowdanki_folder", 58 | metavar="crowdanki_folder", 59 | type=str, 60 | help="The folder that stores the CrowdAnki files to build this repo from" 61 | ) 62 | parser_init.add_argument( 63 | '--delimiter', 64 | dest='delimiter', 65 | action='store', 66 | help="Set the delimiter for Csv files to specific character", 67 | type=str 68 | ) 69 | parser_init.add_argument( 70 | "--delimitertab", "--tab", 71 | action="store_true", 72 | dest="delimiter_tab", 73 | default=False, 74 | help="Use tabs as the delimiter for Csv files" 75 | ) 76 | 77 | def get_parsed(self, override_args=None) -> Command: 78 | parsed_args = self.parse_args(args=override_args) 79 | 80 | if parsed_args.command == Commands.RUN_RECIPE.value: 81 | # Required 82 | recipe = self.error_if_blank(parsed_args.recipe) 83 | 84 | # Optional 85 | verify_only = parsed_args.verify_only 86 | 87 | return RunRecipe( 88 | recipe_file_name=recipe, 89 | verify_only=verify_only 90 | ) 91 | 92 | if parsed_args.command == Commands.INIT_REPO.value: 93 | # Required 94 | crowdanki_folder = parsed_args.crowdanki_folder 95 | delimiter = parsed_args.delimiter 96 | delimiter_tab = parsed_args.delimiter_tab 97 | 98 | return InitRepo( 99 | crowdanki_folder=crowdanki_folder, 100 | delimiter="\t" if delimiter_tab else delimiter 101 | ) 102 | 103 | raise KeyError("Unknown Command") 104 | 105 | def error_if_blank(self, arg): 106 | if arg == "" or arg is None: 107 | self.error("Required argument missing") 108 | return arg 109 | 110 | def error(self, message): 111 | sys.stderr.write('error: %s\n' % message) 112 | self.print_help() 113 | sys.exit(2) 114 | 115 | def print_help(self, message=None): 116 | print(f"Brain Brew v{latest_version_number()}") 117 | super().print_help(message) 118 | -------------------------------------------------------------------------------- /brain_brew/commands/init_repo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/commands/init_repo/__init__.py -------------------------------------------------------------------------------- /brain_brew/commands/run_recipe/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/commands/run_recipe/__init__.py -------------------------------------------------------------------------------- /brain_brew/commands/run_recipe/build_task.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | from typing import Dict, Type, Set, Optional 3 | 4 | from brain_brew.configuration.representation_base import RepresentationBase 5 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 6 | 7 | 8 | class BuildTask(YamlRepr, object, metaclass=ABCMeta): 9 | execute_immediately: bool = False 10 | accepts_list_of_self: bool = True 11 | rep: Optional[RepresentationBase] 12 | 13 | def encode_rep(self) -> Dict[str, any]: 14 | return self.rep.encode() 15 | 16 | @abstractmethod 17 | def execute(self): 18 | pass 19 | 20 | @classmethod 21 | def task_regex(cls) -> str: 22 | return cls.task_name() 23 | 24 | @classmethod 25 | def get_all_task_regex(cls, subclasses: Set[Type['BuildTask']]) -> Dict[str, Type['BuildTask']]: 26 | task_regex_matches: Dict[str, Type[BuildTask]] = {} 27 | 28 | for sc in subclasses: 29 | if sc.task_regex in task_regex_matches: 30 | raise KeyError(f"Multiple instances of task regex '{sc.task_regex}'") 31 | elif sc.task_regex == "" or sc.task_regex is None: 32 | raise KeyError(f"Unknown task regex in {sc.__name__}") 33 | 34 | task_regex_matches.setdefault(sc.task_regex(), sc) 35 | 36 | # logging.debug(f"Known build tasks: {known_build_tasks}") 37 | return task_regex_matches 38 | 39 | 40 | class TopLevelBuildTask(BuildTask, metaclass=ABCMeta): 41 | pass 42 | 43 | 44 | class BuildPartTask(BuildTask, metaclass=ABCMeta): 45 | execute_immediately: bool = True 46 | -------------------------------------------------------------------------------- /brain_brew/commands/run_recipe/parts_builder.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Dict, Type, List, Set 3 | 4 | from brain_brew.build_tasks.crowd_anki.headers_from_crowdanki import HeadersFromCrowdAnki 5 | from brain_brew.build_tasks.crowd_anki.media_group_from_crowd_anki import MediaGroupFromCrowdAnki 6 | from brain_brew.build_tasks.crowd_anki.note_model_single_from_crowd_anki import NoteModelSingleFromCrowdAnki 7 | from brain_brew.build_tasks.crowd_anki.note_models_all_from_crowd_anki import NoteModelsAllFromCrowdAnki 8 | from brain_brew.build_tasks.crowd_anki.notes_from_crowd_anki import NotesFromCrowdAnki 9 | from brain_brew.build_tasks.csvs.notes_from_csvs import NotesFromCsvs 10 | from brain_brew.build_tasks.deck_parts.from_yaml_part import NotesFromYamlPart, MediaGroupFromYamlPart 11 | from brain_brew.build_tasks.deck_parts.note_model_from_yaml_part import NoteModelsFromYamlPart 12 | from brain_brew.build_tasks.deck_parts.headers_from_yaml_part import HeadersFromYamlPart 13 | from brain_brew.build_tasks.deck_parts.media_group_from_folder import MediaGroupFromFolder 14 | from brain_brew.build_tasks.deck_parts.note_model_from_html_parts import NoteModelFromHTMLParts 15 | from brain_brew.commands.run_recipe.build_task import BuildTask, BuildPartTask, TopLevelBuildTask 16 | from brain_brew.commands.run_recipe.recipe_builder import RecipeBuilder 17 | 18 | 19 | @dataclass 20 | class PartsBuilder(RecipeBuilder, TopLevelBuildTask): 21 | tasks: List[BuildPartTask] 22 | accepts_list_of_self: bool = False 23 | 24 | @classmethod 25 | def task_name(cls) -> str: 26 | return r'build_parts' 27 | 28 | @classmethod 29 | def task_regex(cls) -> str: 30 | return r'build_parts?' 31 | 32 | @classmethod 33 | def known_task_dict(cls) -> Dict[str, Type[BuildTask]]: 34 | return BuildPartTask.get_all_task_regex(cls.yamale_dependencies()) 35 | 36 | @classmethod 37 | def from_repr(cls, data: List[dict]): 38 | if not isinstance(data, list): 39 | raise TypeError(f"PartsBuilder needs a list") 40 | return cls.from_list(data) 41 | 42 | def encode(self) -> dict: 43 | pass 44 | 45 | def encode_rep(self) -> list: 46 | return self.tasks_to_encoded() 47 | 48 | @classmethod 49 | def from_yaml_file(cls, filename: str): 50 | pass 51 | 52 | @classmethod 53 | def yamale_schema(cls) -> str: 54 | return cls.build_yamale_root_node(cls.yamale_dependencies()) 55 | 56 | @classmethod 57 | def yamale_dependencies(cls) -> Set[Type[BuildPartTask]]: 58 | return { 59 | NotesFromCsvs, 60 | NotesFromYamlPart, HeadersFromYamlPart, NoteModelsFromYamlPart, MediaGroupFromYamlPart, 61 | MediaGroupFromFolder, 62 | NoteModelFromHTMLParts, NoteModelsFromYamlPart, NoteModelSingleFromCrowdAnki, NoteModelsAllFromCrowdAnki, 63 | HeadersFromCrowdAnki, MediaGroupFromCrowdAnki, NotesFromCrowdAnki 64 | } 65 | -------------------------------------------------------------------------------- /brain_brew/commands/run_recipe/recipe_builder.py: -------------------------------------------------------------------------------- 1 | import re 2 | from abc import ABCMeta, abstractmethod 3 | from dataclasses import dataclass 4 | from textwrap import indent 5 | from typing import Dict, List, Type, Set 6 | 7 | from brain_brew.commands.run_recipe.build_task import BuildTask 8 | from brain_brew.representation.yaml.yaml_object import YamlObject 9 | 10 | 11 | @dataclass 12 | class RecipeBuilder(YamlObject, metaclass=ABCMeta): 13 | tasks: List[BuildTask] 14 | 15 | def tasks_to_encoded(self) -> list: 16 | return [{task.task_name(): task.encode_rep()} for task in self.tasks] 17 | 18 | @classmethod 19 | def from_list(cls, data: List[dict]): 20 | tasks = cls.read_tasks(data) 21 | return cls( 22 | tasks=tasks 23 | ) 24 | 25 | @classmethod 26 | @abstractmethod 27 | def known_task_dict(cls) -> Dict[str, Type[BuildTask]]: 28 | pass 29 | 30 | @classmethod 31 | def build_yamale_root_node(cls, subclasses: Set[Type['BuildTask']]) -> str: 32 | task_list = [] 33 | for c in sorted(subclasses, key=lambda x: x.task_name()): 34 | task_command = f"any(include('{c.task_name()}'), list(include('{c.task_name()}')))"\ 35 | if c.accepts_list_of_self else f"include('{c.task_name()}')" 36 | task_list.append(f"map({task_command}, key=regex('{c.task_regex()}', ignore_case=True))") 37 | 38 | final_tasks: str = "list(\n" + indent(",\n".join(task_list), ' ') + "\n)\n" 39 | 40 | return final_tasks 41 | 42 | @classmethod 43 | def read_tasks(cls, tasks: List[dict]) -> list: 44 | task_regex_matches = cls.known_task_dict() 45 | build_tasks = [] 46 | 47 | def find_matching_task(task_n): 48 | for regex, task_to_run in task_regex_matches.items(): 49 | if re.match(regex, task_n, re.RegexFlag.IGNORECASE): 50 | return task_to_run 51 | return None 52 | 53 | # Tasks 54 | for task in tasks: 55 | task_keys = list(task.keys()) 56 | if len(task_keys) != 1: 57 | raise KeyError(f"Task should only contain 1 entry, but contains {task_keys} instead. " 58 | f"Missing list separator '-'?", task) 59 | 60 | task_name = task_keys[0] 61 | task_arguments = task[task_keys[0]] 62 | 63 | matching_task = find_matching_task(task_name) 64 | if matching_task is not None: 65 | if matching_task.accepts_list_of_self and isinstance(task_arguments, list): 66 | task_or_tasks = [matching_task.from_repr(t_arg) for t_arg in task_arguments] 67 | else: 68 | task_or_tasks = [matching_task.from_repr(task_arguments)] 69 | 70 | for inner_task in task_or_tasks: 71 | build_tasks.append(inner_task) 72 | if inner_task.execute_immediately: 73 | inner_task.execute() 74 | else: 75 | raise KeyError(f"Unknown task '{task_name}'") # TODO: check this first on all and return all errors 76 | 77 | return build_tasks 78 | 79 | def execute(self): 80 | for task in self.tasks: 81 | if not task.execute_immediately: 82 | task.execute() 83 | -------------------------------------------------------------------------------- /brain_brew/commands/run_recipe/run_recipe.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from brain_brew.interfaces.command import Command 3 | from brain_brew.commands.run_recipe.top_level_builder import TopLevelBuilder 4 | from brain_brew.configuration.yaml_verifier import YamlVerifier 5 | 6 | 7 | @dataclass 8 | class RunRecipe(Command): 9 | recipe_file_name: str 10 | verify_only: bool 11 | 12 | def execute(self): 13 | # Parse Build Config File 14 | YamlVerifier() 15 | recipe = TopLevelBuilder.parse_and_read(self.recipe_file_name, self.verify_only) 16 | 17 | if not self.verify_only: 18 | recipe.execute() 19 | -------------------------------------------------------------------------------- /brain_brew/commands/run_recipe/top_level_builder.py: -------------------------------------------------------------------------------- 1 | from textwrap import indent, dedent 2 | from typing import Dict, Type, List, Set 3 | 4 | from brain_brew.build_tasks.crowd_anki.crowd_anki_generate import CrowdAnkiGenerate 5 | from brain_brew.build_tasks.csvs.csvs_generate import CsvsGenerate 6 | from brain_brew.build_tasks.csvs.generate_guids_in_csvs import GenerateGuidsInCsvs 7 | from brain_brew.build_tasks.deck_parts.save_media_group_to_folder import SaveMediaGroupsToFolder 8 | from brain_brew.build_tasks.deck_parts.save_note_models_to_folder import SaveNoteModelsToFolder 9 | from brain_brew.commands.run_recipe.build_task import BuildTask, TopLevelBuildTask 10 | from brain_brew.commands.run_recipe.parts_builder import PartsBuilder 11 | from brain_brew.commands.run_recipe.recipe_builder import RecipeBuilder 12 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 13 | 14 | 15 | class TopLevelBuilder(YamlRepr, RecipeBuilder): 16 | @classmethod 17 | def known_task_dict(cls) -> Dict[str, Type[BuildTask]]: 18 | values = TopLevelBuildTask.get_all_task_regex(cls.yamale_dependencies()) 19 | return values 20 | 21 | @classmethod 22 | def build_yamale(cls): 23 | separator = '\n---\n' 24 | top_level = cls.yamale_dependencies() 25 | 26 | builder: List[str] = [cls.build_yamale_root_node(top_level), separator] 27 | 28 | def to_sorted_yamale_string(lines: Set[Type[BuildTask]]): 29 | return [f'''{line.task_name()}:\n{indent(dedent(line.yamale_schema()), ' ')}''' 30 | for line in sorted(lines, key=lambda x: x.task_name())] 31 | 32 | # Schema 33 | builder += to_sorted_yamale_string(top_level) 34 | 35 | builder.append(separator) 36 | 37 | # Dependencies 38 | def resolve_dependencies(deps: Set[Type[BuildTask]]) -> Set[Type[BuildTask]]: 39 | result = set() 40 | for d in deps: 41 | result.add(d) 42 | result = result.union(resolve_dependencies(d.yamale_dependencies())) 43 | return result 44 | 45 | children = resolve_dependencies(cls.yamale_dependencies()) 46 | builder += to_sorted_yamale_string({dep for dep in children if dep not in top_level}) 47 | 48 | return '\n'.join(builder) 49 | 50 | @classmethod 51 | def parse_and_read(cls, filename, verify_only: bool) -> 'TopLevelBuilder': 52 | recipe_data = cls.read_to_dict(filename) 53 | 54 | from brain_brew.configuration.yaml_verifier import YamlVerifier 55 | YamlVerifier.get_instance().verify_recipe(filename) 56 | 57 | if verify_only: 58 | return None 59 | 60 | return cls.from_list(recipe_data) 61 | 62 | @classmethod 63 | def task_name(cls) -> str: 64 | pass 65 | 66 | @classmethod 67 | def yamale_schema(cls) -> str: 68 | pass 69 | 70 | @classmethod 71 | def from_repr(cls, data: dict): 72 | pass 73 | 74 | def encode(self) -> list: 75 | return self.tasks_to_encoded() 76 | 77 | @classmethod 78 | def from_yaml_file(cls, filename: str): 79 | pass 80 | 81 | @classmethod 82 | def yamale_dependencies(cls) -> Set[Type[TopLevelBuildTask]]: 83 | return { 84 | PartsBuilder, 85 | CrowdAnkiGenerate, CsvsGenerate, 86 | GenerateGuidsInCsvs, SaveMediaGroupsToFolder, SaveNoteModelsToFolder 87 | } 88 | -------------------------------------------------------------------------------- /brain_brew/configuration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/configuration/__init__.py -------------------------------------------------------------------------------- /brain_brew/configuration/anki_field.py: -------------------------------------------------------------------------------- 1 | class AnkiField: 2 | name: str 3 | anki_name: str 4 | default_value: any 5 | 6 | def __init__(self, anki_name, name=None, default_value=None): 7 | self.anki_name = anki_name 8 | self.name = name if name is not None else anki_name 9 | self.default_value = default_value 10 | 11 | def append_name_if_differs(self, dict_to_add_to: dict, value): 12 | if value != self.default_value: 13 | dict_to_add_to.setdefault(self.name, value) 14 | 15 | def does_differ(self, value): 16 | return value != self.default_value 17 | -------------------------------------------------------------------------------- /brain_brew/configuration/file_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Union 2 | 3 | from brain_brew.configuration.part_holder import PartHolder 4 | from brain_brew.representation.generic.source_file import SourceFile 5 | from brain_brew.representation.yaml.yaml_object import YamlObject 6 | 7 | 8 | class FileManager: 9 | __instance = None 10 | 11 | known_files_dict: Dict[str, SourceFile] 12 | known_parts: Dict[str, PartHolder[YamlObject]] 13 | 14 | def __init__(self): 15 | if FileManager.__instance is None: 16 | FileManager.__instance = self 17 | else: 18 | raise Exception("Multiple FileManagers created") 19 | 20 | self.known_files_dict = {} 21 | self.known_parts = {} 22 | 23 | @staticmethod 24 | def get_instance() -> 'FileManager': 25 | return FileManager.__instance 26 | 27 | @staticmethod 28 | def clear_instance(): 29 | if FileManager.__instance: 30 | FileManager.__instance = None 31 | 32 | # Source Files 33 | 34 | def register_file(self, full_path, file): 35 | if full_path in self.known_files_dict: 36 | raise FileExistsError(f"File already known to FileManager, cannot be registered twice: {full_path}") 37 | self.known_files_dict.setdefault(full_path, file) 38 | 39 | def file_if_exists(self, file_location) -> Union[SourceFile, None]: 40 | if file_location in self.known_files_dict.keys(): 41 | return self.known_files_dict[file_location] 42 | return None 43 | 44 | # Deck Parts 45 | 46 | def register_part(self, dp: PartHolder) -> PartHolder: 47 | if dp.part_id in self.known_parts: 48 | raise KeyError(f"Cannot use same name '{dp.part_id}' for multiple Deck Parts") 49 | self.known_parts.setdefault(dp.part_id, dp) 50 | return dp 51 | 52 | def get_part_if_exists(self, dp_name) -> Union[PartHolder[YamlObject], None]: 53 | return self.known_parts.get(dp_name) 54 | 55 | def get_part(self, name: str): 56 | if name not in self.known_parts: 57 | raise KeyError(f"Cannot find Deck Part '{name}'") 58 | return self.known_parts[name] 59 | -------------------------------------------------------------------------------- /brain_brew/configuration/part_holder.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dataclasses import dataclass 3 | from typing import Optional, TypeVar, Generic 4 | 5 | T = TypeVar('T') 6 | 7 | 8 | @dataclass 9 | class PartHolder(Generic[T]): 10 | part_id: str 11 | save_to_file: Optional[str] 12 | part: T 13 | 14 | file_manager = None 15 | 16 | @classmethod 17 | def get_file_manager(cls): 18 | if not cls.file_manager: 19 | from brain_brew.configuration.file_manager import FileManager 20 | cls.file_manager = FileManager.get_instance() 21 | return cls.file_manager 22 | 23 | @classmethod 24 | def from_file_manager(cls, part_id: str) -> T: 25 | return cls.get_file_manager().get_part(part_id) 26 | 27 | @classmethod 28 | def override_or_create(cls, part_id: str, save_to_file: Optional[str], part: T): 29 | fm = cls.get_file_manager() 30 | 31 | dp = fm.get_part_if_exists(part_id) 32 | if dp is None: 33 | dp = fm.register_part(PartHolder(part_id, save_to_file, part)) 34 | else: 35 | logging.warning(f"Overwriting existing Deck Part '{part_id}'") 36 | dp.part = part 37 | dp.save_to_file = save_to_file 38 | 39 | dp.write_to_file() 40 | 41 | return dp 42 | 43 | def write_to_file(self): 44 | if self.save_to_file is not None: 45 | self.part.dump_to_yaml(self.save_to_file) 46 | -------------------------------------------------------------------------------- /brain_brew/configuration/representation_base.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import logging 3 | 4 | 5 | class RepresentationBase: 6 | @classmethod 7 | def from_dict(cls, data: dict): 8 | expected_values = { 9 | k: v for k, v in data.items() 10 | if k in inspect.signature(cls).parameters 11 | } 12 | 13 | if len(expected_values) != len(data): 14 | logging.warning(f"Unexpected values found when creating '{cls.__name__}': " 15 | f"{[k for k, v in data.items() if k not in list(expected_values.keys())]}" 16 | "\n!!! Please report this error if it seems strange") 17 | 18 | return cls(**expected_values) 19 | 20 | def encode(self): 21 | return {key: value for key, value in self.__dict__.items() if self.encode_filter(key, value)} 22 | 23 | def encode_filter(self, key, value): 24 | if value is None: 25 | return False 26 | if not value: 27 | return False 28 | return True 29 | -------------------------------------------------------------------------------- /brain_brew/configuration/yaml_verifier.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import yamale 5 | from yamale import YamaleError 6 | from yamale.schema import Schema 7 | from yamale.validators import DefaultValidators 8 | 9 | validators = DefaultValidators.copy() 10 | 11 | 12 | class YamlVerifier: 13 | __instance = None 14 | recipe_schema: Schema 15 | 16 | def __init__(self): 17 | if YamlVerifier.__instance is None: 18 | YamlVerifier.__instance = self 19 | else: 20 | raise Exception("Multiple YamlVerifiers created") 21 | 22 | path = os.path.join(os.path.dirname(__file__), "../schemas/recipe.yaml") 23 | self.recipe_schema = yamale.make_schema(path, parser='ruamel', validators=validators) 24 | 25 | @staticmethod 26 | def get_instance() -> 'YamlVerifier': 27 | return YamlVerifier.__instance 28 | 29 | def verify_recipe(self, filename): 30 | data = yamale.make_data(filename) 31 | try: 32 | yamale.validate(self.recipe_schema, data) 33 | except YamaleError as e: 34 | print('Validation failed!\n') 35 | for result in e.results: 36 | print("Error validating data '%s' with '%s'\n\t" % (result.data, result.schema)) 37 | for error in result.errors: 38 | print('\t%s' % error) 39 | exit(1) 40 | logging.info(f"Builder file {filename} is ✔ good") 41 | -------------------------------------------------------------------------------- /brain_brew/front_matter.py: -------------------------------------------------------------------------------- 1 | def latest_version_number(): 2 | return "0.3.11" 3 | -------------------------------------------------------------------------------- /brain_brew/interfaces/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/interfaces/__init__.py -------------------------------------------------------------------------------- /brain_brew/interfaces/command.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Command(ABC): 5 | @abstractmethod 6 | def execute(self): 7 | pass 8 | -------------------------------------------------------------------------------- /brain_brew/interfaces/media_container.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Set 3 | 4 | 5 | class MediaContainer(ABC): 6 | @abstractmethod 7 | def get_all_media_references(self) -> Set[str]: 8 | pass 9 | -------------------------------------------------------------------------------- /brain_brew/interfaces/yamale_verifyable.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class YamlRepr(ABC): 5 | @classmethod 6 | @abstractmethod 7 | def task_name(cls) -> str: 8 | pass 9 | 10 | @classmethod 11 | @abstractmethod 12 | def yamale_schema(cls) -> str: 13 | pass 14 | 15 | @classmethod 16 | def yamale_dependencies(cls) -> set: 17 | return set() 18 | 19 | @classmethod 20 | @abstractmethod 21 | def from_repr(cls, data: dict): 22 | pass 23 | -------------------------------------------------------------------------------- /brain_brew/main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from brain_brew.commands.argument_reader import BBArgumentReader 4 | # sys.path.append(os.path.join(os.path.dirname(__file__), "dist")) 5 | # sys.path.append(os.path.dirname(__file__)) 6 | from brain_brew.configuration.file_manager import FileManager 7 | 8 | 9 | def main(): 10 | logging.basicConfig(level=logging.DEBUG) 11 | 12 | # Read in Arguments 13 | argument_reader = BBArgumentReader() 14 | command = argument_reader.get_parsed() 15 | 16 | # Create Singleton FileManager 17 | FileManager() 18 | 19 | command.execute() 20 | 21 | 22 | if __name__ == "__main__": 23 | main() 24 | -------------------------------------------------------------------------------- /brain_brew/representation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/representation/__init__.py -------------------------------------------------------------------------------- /brain_brew/representation/generic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/representation/generic/__init__.py -------------------------------------------------------------------------------- /brain_brew/representation/generic/csv_file.py: -------------------------------------------------------------------------------- 1 | import csv 2 | from pathlib import Path 3 | import re 4 | import logging 5 | from enum import Enum 6 | from typing import List, Optional 7 | 8 | from brain_brew.representation.generic.source_file import SourceFile 9 | from brain_brew.utils import create_path_if_not_exists, list_of_str_to_lowercase, sort_dict 10 | 11 | _encoding = "utf-8" 12 | 13 | 14 | class CsvKeys(Enum): 15 | GUID = "guid" 16 | TAGS = "tags" 17 | 18 | 19 | class CsvFile(SourceFile): 20 | file_location: str = "" 21 | _data: List[dict] = [] 22 | column_headers: list = [] 23 | delimiter: str = ',' 24 | 25 | def __init__(self, file, delimiter=None): 26 | self.file_location = file 27 | self.set_delimiter(delimiter) 28 | 29 | def set_delimiter(self, delimiter: str): 30 | if delimiter: 31 | self.delimiter = delimiter 32 | elif re.match(r'.*\.tsv', self.file_location, re.RegexFlag.IGNORECASE): 33 | self.delimiter = '\t' 34 | 35 | @classmethod 36 | def from_file_loc(cls, file_loc) -> 'CsvFile': 37 | return cls(file_loc) 38 | 39 | def read_file(self, create_if_not_exists: Optional[bool] = True): 40 | self._data = [] 41 | 42 | if create_if_not_exists: 43 | create_path_if_not_exists(self.file_location) 44 | Path(self.file_location).touch() 45 | 46 | with open(self.file_location, mode='r', newline='', encoding=_encoding) as csv_file: 47 | csv_reader = csv.DictReader(csv_file, delimiter=self.delimiter) 48 | 49 | self.column_headers = list_of_str_to_lowercase(csv_reader.fieldnames) 50 | 51 | for row in csv_reader: 52 | self._data.append({key.lower(): row[key] for key in row}) 53 | 54 | def write_file(self): 55 | logging.info(f"Writing to Csv '{self.file_location}'") 56 | with open(self.file_location, mode='w+', newline='', encoding=_encoding) as csv_file: 57 | csv_writer = csv.DictWriter(csv_file, fieldnames=self.column_headers, lineterminator='\n', delimiter=self.delimiter) 58 | 59 | csv_writer.writeheader() 60 | 61 | for row in self._data: 62 | csv_writer.writerow(row) 63 | 64 | def set_data(self, data_override): 65 | self._data = data_override 66 | self.column_headers = list(data_override[0].keys()) if data_override else [] 67 | 68 | def set_data_from_superset(self, superset: List[dict], column_header_override=None): 69 | if column_header_override: 70 | self.column_headers = column_header_override 71 | 72 | data_to_set: List[dict] = [] 73 | for row in superset: 74 | if not all(column in row for column in self.column_headers): 75 | continue 76 | new_row = {} 77 | for column in self.column_headers: 78 | new_row[column] = row[column] 79 | data_to_set.append(new_row) 80 | 81 | self._data = data_to_set 82 | 83 | 84 | def get_data(self, deep_copy=False) -> List[dict]: 85 | return self.get_deep_copy(self._data) if deep_copy else self._data 86 | 87 | @staticmethod 88 | def to_filename_csv(filename: str, delimiter: str = None) -> str: 89 | if not re.match(r'.*\.(csv|tsv)', filename, re.RegexFlag.IGNORECASE): 90 | if delimiter == '\t': 91 | return filename + '.tsv' 92 | else: 93 | return filename + ".csv" 94 | return filename 95 | 96 | @classmethod 97 | def formatted_file_location(cls, location): 98 | return cls.to_filename_csv(location) 99 | 100 | def sort_data(self, sort_by_keys, reverse_sort, case_insensitive_sort): 101 | self._data = sort_dict(self._data, sort_by_keys, reverse_sort, case_insensitive_sort) 102 | 103 | @classmethod 104 | def create_file_with_headers(cls, filepath: str, headers: List[str], delimiter: str = None): 105 | with open(filepath, mode='w+', newline='', encoding=_encoding) as csv_file: 106 | csv_writer = csv.DictWriter(csv_file, fieldnames=headers, lineterminator='\n', delimiter=delimiter or ",") 107 | 108 | csv_writer.writeheader() 109 | 110 | @staticmethod 111 | def delimiter_matches_file_type(delimiter: str, filename: str) -> bool: 112 | if delimiter == '\t' and re.match(r'.*\.tsv', filename, re.RegexFlag.IGNORECASE): 113 | return True 114 | if delimiter == ',' and re.match(r'.*\.csv', filename, re.RegexFlag.IGNORECASE): 115 | return True 116 | return False 117 | -------------------------------------------------------------------------------- /brain_brew/representation/generic/html_file.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from brain_brew.representation.generic.source_file import SourceFile 4 | 5 | _encoding = "utf-8" 6 | 7 | @dataclass 8 | class HTMLFile(SourceFile): 9 | file_location: str 10 | _data: str 11 | 12 | def __init__(self, file): 13 | self.file_location = file 14 | self.read_file() 15 | 16 | @classmethod 17 | def from_file_loc(cls, file_loc) -> 'HTMLFile': 18 | return cls(file_loc) 19 | 20 | def read_file(self): 21 | r = open(self.file_location, 'r', encoding=_encoding) 22 | self._data = r.read() 23 | 24 | def get_data(self, deep_copy=False) -> str: 25 | return self.get_deep_copy(self._data) if deep_copy else self._data 26 | 27 | @staticmethod 28 | def write_file(file_location, data): 29 | with open(file_location, "w+", encoding=_encoding) as file: 30 | file.write(data) 31 | 32 | @staticmethod 33 | def to_filename_html(filename: str) -> str: 34 | return filename + ".html" if not filename.endswith(".html") else filename 35 | 36 | @classmethod 37 | def formatted_file_location(cls, location): 38 | return cls.to_filename_html(location) 39 | -------------------------------------------------------------------------------- /brain_brew/representation/generic/media_file.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from dataclasses import dataclass, field 4 | 5 | from brain_brew.representation.generic.source_file import SourceFile 6 | from brain_brew.utils import filename_from_full_path 7 | 8 | 9 | @dataclass 10 | class MediaFile(SourceFile): 11 | file_path: str 12 | filename: str = field(init=False) 13 | 14 | def __post_init__(self): 15 | self.filename = filename_from_full_path(self.file_path) 16 | 17 | @classmethod 18 | def from_file_loc(cls, file_loc) -> 'MediaFile': 19 | return cls(file_loc) 20 | 21 | def __repr__(self): 22 | return f"MediaFile({self.file_path})" 23 | 24 | def __hash__(self): 25 | return hash(self.__repr__()) 26 | 27 | def copy_self_to_target(self, target: str): 28 | shutil.copy2(self.file_path, target) 29 | 30 | def delete_self(self): 31 | os.remove(self.file_path) 32 | -------------------------------------------------------------------------------- /brain_brew/representation/generic/source_file.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from pathlib import Path 3 | 4 | 5 | class SourceFile(object): 6 | @classmethod 7 | def from_file_loc(cls, file_loc) -> 'SourceFile': 8 | pass 9 | 10 | @classmethod 11 | def is_file(cls, filename: str): 12 | return Path(filename).is_file() 13 | 14 | @classmethod 15 | def is_dir(cls, folder_name: str): 16 | return Path(folder_name).is_dir() 17 | 18 | @classmethod 19 | def get_deep_copy(cls, data): 20 | return copy.deepcopy(data) 21 | 22 | @classmethod 23 | def create_or_get(cls, location): 24 | from brain_brew.configuration.file_manager import FileManager 25 | _file_manager = FileManager.get_instance() 26 | formatted_location = cls.formatted_file_location(location) 27 | file = _file_manager.file_if_exists(formatted_location) 28 | 29 | if file is not None: 30 | return file 31 | 32 | # if not cls.is_file(location) and not cls.is_dir(location): 33 | # raise FileNotFoundError(f"No file or folder '{location}' exists") 34 | 35 | file = cls.from_file_loc(location) 36 | _file_manager.register_file(formatted_location, file) 37 | return file 38 | 39 | @classmethod 40 | def formatted_file_location(cls, location): 41 | return location 42 | -------------------------------------------------------------------------------- /brain_brew/representation/json/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/representation/json/__init__.py -------------------------------------------------------------------------------- /brain_brew/representation/json/crowd_anki_export.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import logging 3 | from typing import List 4 | 5 | from brain_brew.representation.generic.source_file import SourceFile 6 | from brain_brew.representation.json.json_file import JsonFile 7 | from brain_brew.representation.json.wrappers_for_crowd_anki import CrowdAnkiJsonWrapper 8 | from brain_brew.representation.yaml.note_model import NoteModel 9 | from brain_brew.utils import create_path_if_not_exists 10 | 11 | 12 | class CrowdAnkiExport(SourceFile): 13 | folder_location: str 14 | json_file_location: str 15 | # import_config: CrowdAnkiImportConfig # TODO: Make this 16 | json_data: CrowdAnkiJsonWrapper 17 | note_models: List[NoteModel] 18 | 19 | media_loc: str 20 | 21 | def __init__(self, folder_location): 22 | self.folder_location = folder_location 23 | if self.folder_location[-1] != "/": 24 | self.folder_location = self.folder_location + "/" 25 | 26 | create_path_if_not_exists(self.folder_location) 27 | 28 | self.json_file_location = self.find_json_file_in_folder() 29 | self._read_json_file() 30 | 31 | self.media_loc = self.folder_location + "media/" 32 | 33 | if not self.is_dir(self.media_loc): 34 | create_path_if_not_exists(self.media_loc) 35 | return 36 | 37 | @classmethod 38 | def from_file_loc(cls, file_loc) -> 'CrowdAnkiExport': 39 | return cls(file_loc) 40 | 41 | def find_json_file_in_folder(self): 42 | files = glob.glob(f"{glob.escape(self.folder_location)}*.json") 43 | 44 | if len(files) == 1: 45 | return files[0] 46 | elif not files: 47 | file_loc = self.folder_location + "deck.json" 48 | logging.warning(f"Creating missing json file '{file_loc}'") 49 | return file_loc 50 | else: 51 | logging.error(f"Multiple json files found in '{self.folder_location}': {files}") 52 | raise FileExistsError() 53 | 54 | def write_to_files(self, json_data): # import_config_data 55 | JsonFile.write_file(self.json_file_location, json_data) 56 | 57 | def _read_json_file(self): 58 | if SourceFile.is_file(self.json_file_location): 59 | self.json_data = CrowdAnkiJsonWrapper(JsonFile.read_file(self.json_file_location)) 60 | self.note_models = list(map(NoteModel.from_crowdanki, self.json_data.note_models)) 61 | else: 62 | self.write_to_files({}) 63 | self.json_data = CrowdAnkiJsonWrapper({}) 64 | -------------------------------------------------------------------------------- /brain_brew/representation/json/json_file.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | _encoding = "utf-8" 4 | 5 | 6 | class JsonFile: 7 | @staticmethod 8 | def pretty_print(data): 9 | return json.dumps(data, indent=4) 10 | 11 | @staticmethod 12 | def to_filename_json(filename: str): 13 | if filename[-5:] != ".json": 14 | return filename + ".json" 15 | return filename 16 | 17 | @staticmethod 18 | def read_file(file_location): 19 | with open(JsonFile.to_filename_json(file_location), "r", encoding=_encoding) as read_file: 20 | return json.load(read_file) 21 | 22 | @staticmethod 23 | def write_file(file_location, data): 24 | with open(JsonFile.to_filename_json(file_location), "w+", encoding=_encoding) as write_file: 25 | json.dump(data, write_file, indent=4, sort_keys=False, ensure_ascii=False) 26 | -------------------------------------------------------------------------------- /brain_brew/representation/json/wrappers_for_crowd_anki.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | 4 | CA_NOTE_MODELS = "note_models" 5 | CA_NOTES = "notes" 6 | CA_MEDIA_FILES = "media_files" 7 | CA_CHILDREN = "children" 8 | CA_TYPE = "__type__" 9 | CA_NAME = "name" 10 | CA_DESCRIPTION = "desc" 11 | CA_UUID = "crowdanki_uuid" 12 | 13 | NOTE_MODEL = "note_model_uuid" 14 | FLAGS = "flags" 15 | GUID = "guid" 16 | TAGS = "tags" 17 | FIELDS = "fields" 18 | 19 | 20 | class CrowdAnkiJsonWrapper: 21 | data: dict 22 | 23 | def __init__(self, data: dict = None): 24 | self.data = data 25 | 26 | @property 27 | def children(self) -> list: 28 | return self.data.get(CA_CHILDREN, []) 29 | 30 | @property 31 | def note_models(self) -> list: 32 | return CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(self.data, [], CA_NOTE_MODELS) 33 | 34 | 35 | @note_models.setter 36 | def note_models(self, value: list): 37 | self.data[CA_NOTE_MODELS] = value 38 | 39 | @property 40 | def notes(self) -> list: 41 | return CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(self.data, [], CA_NOTES) 42 | 43 | @notes.setter 44 | def notes(self, value: list): 45 | self.data[CA_NOTES] = value 46 | 47 | @property 48 | def media_files(self) -> list: 49 | return CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(self.data, [], CA_MEDIA_FILES) 50 | 51 | @media_files.setter 52 | def media_files(self, value: list): 53 | self.data[CA_MEDIA_FILES] = value 54 | 55 | @property 56 | def name(self) -> list: 57 | return self.data.get(CA_NAME, []) 58 | 59 | @name.setter 60 | def name(self, value: list): 61 | self.data[CA_NAME] = value 62 | 63 | @staticmethod 64 | def get_from_self_and_children_recursively(data: dict, running_data: list, key_name: str): 65 | running_data += data.get(key_name, []) 66 | children = data.get(CA_CHILDREN, []) 67 | if isinstance(children, list): 68 | for child in children: 69 | running_data = CrowdAnkiJsonWrapper.get_from_self_and_children_recursively(child, running_data, key_name) 70 | return running_data 71 | 72 | 73 | class CrowdAnkiNoteWrapper: 74 | data: dict 75 | 76 | def __init__(self, data: dict = None): 77 | self.data = data 78 | 79 | @property 80 | def note_model(self) -> str: 81 | return self.data.get(NOTE_MODEL) 82 | 83 | @note_model.setter 84 | def note_model(self, value: str): 85 | self.data[NOTE_MODEL] = value 86 | 87 | @property 88 | def flags(self) -> int: 89 | return self.data.get(FLAGS) 90 | 91 | @flags.setter 92 | def flags(self, value: int): 93 | self.data[FLAGS] = value 94 | 95 | @property 96 | def guid(self) -> str: 97 | return self.data.get(GUID) 98 | 99 | @guid.setter 100 | def guid(self, value: str): 101 | self.data[GUID] = value 102 | 103 | @property 104 | def tags(self) -> list: 105 | return self.data.get(TAGS, []) 106 | 107 | @tags.setter 108 | def tags(self, value: list): 109 | self.data[TAGS] = value 110 | 111 | @property 112 | def fields(self) -> List[str]: 113 | return self.data.get(FIELDS, []) 114 | 115 | @fields.setter 116 | def fields(self, value: List[str]): 117 | self.data[FIELDS] = value 118 | -------------------------------------------------------------------------------- /brain_brew/representation/yaml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/representation/yaml/__init__.py -------------------------------------------------------------------------------- /brain_brew/representation/yaml/headers.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from brain_brew.representation.json.wrappers_for_crowd_anki import CA_NAME, CA_DESCRIPTION, CA_UUID 4 | from brain_brew.representation.yaml.yaml_object import YamlObject 5 | 6 | 7 | @dataclass 8 | class Headers(YamlObject): 9 | data: dict 10 | 11 | @classmethod 12 | def from_yaml_file(cls, filename: str): 13 | return cls(data=cls.read_to_dict(filename)) 14 | 15 | def encode(self) -> dict: 16 | return self.data 17 | 18 | @property 19 | def name(self) -> str: 20 | return self.data[CA_NAME] 21 | 22 | @name.setter 23 | def name(self, desc: str): 24 | self.data[CA_NAME] = desc 25 | 26 | @property 27 | def description(self) -> str: 28 | return self.data.get(CA_DESCRIPTION, "") 29 | 30 | @description.setter 31 | def description(self, desc: str): 32 | self.data[CA_DESCRIPTION] = desc 33 | 34 | @property 35 | def crowdanki_uuid(self) -> str: 36 | return self.data.get(CA_UUID, "") 37 | 38 | @crowdanki_uuid.setter 39 | def crowdanki_uuid(self, desc: str): 40 | self.data[CA_UUID] = desc 41 | 42 | @property 43 | def data_without_name(self) -> dict: 44 | return {k: v for k, v in sorted(self.data.items()) if k != CA_NAME} 45 | -------------------------------------------------------------------------------- /brain_brew/representation/yaml/media_group.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Set, Dict, List, Tuple 3 | 4 | from brain_brew.representation.generic.media_file import MediaFile 5 | from brain_brew.representation.yaml.yaml_object import YamlObject 6 | from brain_brew.utils import find_all_files_in_directory 7 | 8 | 9 | @dataclass 10 | class MediaGroup(YamlObject): 11 | media_files: Dict[str, MediaFile] 12 | 13 | def encode(self) -> list: 14 | return list(m.file_path for m in self.media_files.values()) # TODO: Use relative path for directory? 15 | 16 | @classmethod 17 | def from_yaml_file(cls, filename: str) -> 'MediaGroup': 18 | return cls(media_files=cls.from_full_path_list(cls.read_to_dict(filename))) 19 | 20 | @classmethod 21 | def from_directory(cls, directory: str, recursive: bool) -> 'MediaGroup': 22 | return cls(media_files=cls.from_full_path_list(find_all_files_in_directory(directory, recursive=recursive))) 23 | 24 | @classmethod 25 | def from_many(cls, groups: List['MediaGroup']) -> 'MediaGroup': 26 | files = list(set(file.file_path for group in groups for file in group.media_files.values())) 27 | return cls(media_files=cls.from_full_path_list(files)) 28 | 29 | @staticmethod 30 | def from_full_path_list(known_files: list): 31 | files: Dict[str, MediaFile] = dict() 32 | 33 | for full_path in known_files: 34 | file = MediaFile.create_or_get(full_path) 35 | if file.filename not in files.keys(): 36 | files[file.filename] = file 37 | else: 38 | raise NameError(f"Duplicate files with same filename '{file.filename}' in group") 39 | 40 | return files 41 | 42 | def remove_by_filename(self, filename: str): 43 | self.media_files.pop(filename, None) 44 | 45 | def filter_by_filenames(self, filenames: List[str], should_match: bool): 46 | for media_filename in self.media_files.keys(): 47 | is_match = media_filename in filenames 48 | if is_match != should_match: 49 | self.remove_by_filename(media_filename) 50 | # TODO: Find all missing files 51 | 52 | def compare(self, other: 'MediaGroup') -> Tuple[Set[str], Set[str], Set[str]]: 53 | 54 | self_set = set(self.media_files) 55 | other_set = set(other.media_files) 56 | 57 | return self_set.intersection(other_set), self_set - other_set, other_set - self_set 58 | -------------------------------------------------------------------------------- /brain_brew/representation/yaml/note_model_field.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import List, Union 3 | 4 | from brain_brew.configuration.anki_field import AnkiField 5 | from brain_brew.configuration.representation_base import RepresentationBase 6 | from brain_brew.interfaces.yamale_verifyable import YamlRepr 7 | 8 | NAME = AnkiField("name") 9 | ORDINAL = AnkiField("ord", "ordinal") 10 | FONT = AnkiField("font", default_value="Liberation Sans") 11 | MEDIA = AnkiField("media", default_value=[]) 12 | IS_RIGHT_TO_LEFT = AnkiField("rtl", "is_right_to_left", default_value=False) 13 | FONT_SIZE = AnkiField("size", "font_size", default_value=20) 14 | IS_STICKY = AnkiField("sticky", "is_sticky", default_value=False) 15 | 16 | 17 | @dataclass 18 | class Field(RepresentationBase, YamlRepr): 19 | @classmethod 20 | def task_name(cls) -> str: 21 | return r"note_model_field" 22 | 23 | @classmethod 24 | def yamale_schema(cls) -> str: 25 | return f"""\ 26 | name: str() 27 | font: str(required=False) 28 | font_size: int(required=False) 29 | is_sticky: bool(required=False) 30 | is_right_to_left: bool(required=False) 31 | """ 32 | 33 | @classmethod 34 | def from_repr(cls, data: dict): 35 | return cls.from_dict(data) 36 | 37 | @dataclass 38 | class CrowdAnki(RepresentationBase): 39 | name: str 40 | ord: int = field(default=None) 41 | font: str = field(default=FONT.default_value) 42 | media: List[str] = field(default_factory=lambda: MEDIA.default_value) 43 | rtl: bool = field(default=IS_RIGHT_TO_LEFT.default_value) 44 | size: int = field(default=FONT_SIZE.default_value) 45 | sticky: bool = field(default=IS_STICKY.default_value) 46 | 47 | name: str 48 | font: str = field(default=FONT.default_value) 49 | is_right_to_left: bool = field(default=IS_RIGHT_TO_LEFT.default_value) 50 | font_size: int = field(default=FONT_SIZE.default_value) 51 | is_sticky: bool = field(default=IS_STICKY.default_value) 52 | media: List[str] = field(default_factory=lambda: MEDIA.default_value) # Unused in Anki 53 | 54 | @classmethod 55 | def from_crowd_anki(cls, data: Union[CrowdAnki, dict]): 56 | ca: cls.CrowdAnki = data if isinstance(data, cls.CrowdAnki) else cls.CrowdAnki.from_dict(data) 57 | return cls( 58 | name=ca.name, font=ca.font, media=ca.media, 59 | is_right_to_left=ca.rtl, font_size=ca.size, is_sticky=ca.sticky 60 | ) 61 | 62 | def encode_as_crowdanki(self, ordinal: int) -> dict: 63 | data_dict = { 64 | FONT.anki_name: self.font, 65 | MEDIA.anki_name: self.media, 66 | NAME.anki_name: self.name, 67 | ORDINAL.anki_name: ordinal, 68 | IS_RIGHT_TO_LEFT.anki_name: self.is_right_to_left, 69 | FONT_SIZE.anki_name: self.font_size, 70 | IS_STICKY.anki_name: self.is_sticky 71 | } 72 | 73 | return data_dict 74 | 75 | def encode_as_part(self) -> dict: 76 | data_dict = { 77 | NAME.name: self.name 78 | } 79 | 80 | FONT.append_name_if_differs(data_dict, self.font) 81 | MEDIA.append_name_if_differs(data_dict, self.media) 82 | IS_RIGHT_TO_LEFT.append_name_if_differs(data_dict, self.is_right_to_left) 83 | FONT_SIZE.append_name_if_differs(data_dict, self.font_size) 84 | IS_STICKY.append_name_if_differs(data_dict, self.is_sticky) 85 | 86 | return data_dict 87 | -------------------------------------------------------------------------------- /brain_brew/representation/yaml/yaml_object.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from pathlib import Path 3 | 4 | from ruamel.yaml import YAML 5 | 6 | from brain_brew.utils import create_path_if_not_exists 7 | 8 | yaml_load = YAML(typ='safe') 9 | 10 | 11 | yaml_dump = YAML() 12 | yaml_dump.preserve_quotes = False 13 | yaml_dump.indent(mapping=2, sequence=2, offset=0) 14 | yaml_dump.representer.ignore_aliases = lambda *data: True 15 | # yaml.sort_base_mapping_type_on_output = False 16 | 17 | 18 | class YamlObject(ABC): 19 | @staticmethod 20 | def read_to_dict(filename: str): 21 | filename = YamlObject.to_filename_yaml(filename) 22 | 23 | if not Path(filename).is_file(): 24 | raise FileNotFoundError(filename) 25 | 26 | with open(filename) as file: 27 | return yaml_load.load(file) 28 | 29 | @staticmethod 30 | def to_filename_yaml(filename: str): 31 | if filename[-5:] != ".yaml" and filename[-4:] != ".yml": 32 | return filename + ".yaml" 33 | return filename 34 | 35 | @abstractmethod 36 | def encode(self) -> dict: 37 | pass 38 | 39 | @classmethod 40 | @abstractmethod 41 | def from_yaml_file(cls, filename: str) -> 'YamlObject': 42 | pass 43 | 44 | def dump_to_yaml(self, filepath): 45 | self.dump_to_yaml_file(filepath, self.encode()) 46 | 47 | @classmethod 48 | def dump_to_yaml_file(cls, filepath, data): 49 | filepath = YamlObject.to_filename_yaml(filepath) 50 | 51 | create_path_if_not_exists(filepath) 52 | 53 | with open(filepath, 'w') as fp: 54 | yaml_dump.dump(data, fp) 55 | 56 | -------------------------------------------------------------------------------- /brain_brew/schemas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/schemas/__init__.py -------------------------------------------------------------------------------- /brain_brew/schemas/recipe.yaml: -------------------------------------------------------------------------------- 1 | list( 2 | map(include('build_parts'), key=regex('build_parts?', ignore_case=True)), 3 | map(any(include('generate_crowd_anki'), list(include('generate_crowd_anki'))), key=regex('generate_crowd_anki', ignore_case=True)), 4 | map(any(include('generate_csvs'), list(include('generate_csvs'))), key=regex('generate_csvs?', ignore_case=True)), 5 | map(any(include('generate_guids_in_csvs'), list(include('generate_guids_in_csvs'))), key=regex('generate_guids_in_csvs?', ignore_case=True)), 6 | map(any(include('save_media_groups_to_folder'), list(include('save_media_groups_to_folder'))), key=regex('save_media_groups?_to_folder', ignore_case=True)), 7 | map(any(include('save_note_models_to_folder'), list(include('save_note_models_to_folder'))), key=regex('save_note_models?_to_folder', ignore_case=True)) 8 | ) 9 | 10 | 11 | --- 12 | 13 | build_parts: 14 | list( 15 | map(any(include('headers_from_crowd_anki'), list(include('headers_from_crowd_anki'))), key=regex('headers?_from_crowd_anki', ignore_case=True)), 16 | map(any(include('headers_from_yaml_part'), list(include('headers_from_yaml_part'))), key=regex('headers?_from_yaml_part', ignore_case=True)), 17 | map(any(include('media_group_from_crowd_anki'), list(include('media_group_from_crowd_anki'))), key=regex('media_group_from_crowd_anki', ignore_case=True)), 18 | map(any(include('media_group_from_folder'), list(include('media_group_from_folder'))), key=regex('media_group_from_folder', ignore_case=True)), 19 | map(any(include('media_group_from_yaml_part'), list(include('media_group_from_yaml_part'))), key=regex('media_group_from_yaml_part', ignore_case=True)), 20 | map(any(include('note_model_from_crowd_anki'), list(include('note_model_from_crowd_anki'))), key=regex('note_model_from_crowd_anki', ignore_case=True)), 21 | map(any(include('note_model_from_html_parts'), list(include('note_model_from_html_parts'))), key=regex('note_model_from_html_parts', ignore_case=True)), 22 | map(any(include('note_models_all_from_crowd_anki'), list(include('note_models_all_from_crowd_anki'))), key=regex('note_models_all_from_crowd_anki', ignore_case=True)), 23 | map(any(include('note_models_from_yaml_part'), list(include('note_models_from_yaml_part'))), key=regex('note_models?_from_yaml_part', ignore_case=True)), 24 | map(any(include('notes_from_crowd_anki'), list(include('notes_from_crowd_anki'))), key=regex('notes_from_crowd_anki', ignore_case=True)), 25 | map(any(include('notes_from_csvs'), list(include('notes_from_csvs'))), key=regex('notes_from_csvs?', ignore_case=True)), 26 | map(any(include('notes_from_yaml_part'), list(include('notes_from_yaml_part'))), key=regex('notes_from_yaml_part', ignore_case=True)) 27 | ) 28 | 29 | generate_crowd_anki: 30 | folder: str() 31 | headers: str() 32 | notes: include('notes_to_crowd_anki') 33 | note_models: include('note_models_to_crowd_anki') 34 | media: include('media_group_to_crowd_anki', required=False) 35 | 36 | generate_csvs: 37 | notes: str() 38 | note_model_mappings: list(include('note_model_mapping')) 39 | file_mappings: list(include('file_mapping')) 40 | 41 | generate_guids_in_csvs: 42 | source: any(str(), list(str())) 43 | columns: any(str(), list(str())) 44 | delimiter: str(required=False) 45 | 46 | save_media_groups_to_folder: 47 | parts: list(str()) 48 | folder: str() 49 | clear_folder: bool(required=False) 50 | recursive: bool(required=False) 51 | 52 | save_note_models_to_folder: 53 | parts: list(str()) 54 | folder: str() 55 | clear_existing: bool(required=False) 56 | 57 | 58 | --- 59 | 60 | file_mapping: 61 | file: str() 62 | note_model: str(required=False) 63 | sort_by_columns: list(str(), required=False) 64 | reverse_sort: bool(required=False) 65 | case_insensitive_sort: bool(required=False) 66 | derivatives: list(include('file_mapping'), required=False) 67 | delimiter: str(required=False) 68 | 69 | headers_from_crowd_anki: 70 | part_id: str() 71 | source: str() 72 | save_to_file: str(required=False) 73 | 74 | headers_from_yaml_part: 75 | part_id: str() 76 | file: str() 77 | override: include('headers_override', required=False) 78 | 79 | headers_override: 80 | crowdanki_uuid: str(required=False) 81 | deck_description_html_file: str(required=False) 82 | name: str(required=False) 83 | 84 | media_group_from_crowd_anki: 85 | part_id: str() 86 | source: str() 87 | save_to_file: str(required=False) 88 | recursive: bool(required=False) 89 | filter_whitelist_from_parts: list(str(), required=False) 90 | filter_blacklist_from_parts: list(str(), required=False) 91 | 92 | media_group_from_folder: 93 | part_id: str() 94 | source: str() 95 | save_to_file: str(required=False) 96 | recursive: bool(required=False) 97 | filter_whitelist_from_parts: list(str(), required=False) 98 | filter_blacklist_from_parts: list(str(), required=False) 99 | 100 | media_group_from_yaml_part: 101 | part_id: str() 102 | file: str() 103 | 104 | media_group_to_crowd_anki: 105 | parts: list(str()) 106 | 107 | note_model_field: 108 | name: str() 109 | font: str(required=False) 110 | font_size: int(required=False) 111 | is_sticky: bool(required=False) 112 | is_right_to_left: bool(required=False) 113 | 114 | note_model_from_crowd_anki: 115 | part_id: str() 116 | source: str() 117 | model_name: str(required=False) 118 | save_to_file: str(required=False) 119 | 120 | note_model_from_html_parts: 121 | part_id: str() 122 | model_id: str() 123 | css_file: str() 124 | fields: list(include('note_model_field')) 125 | templates: list(str()) 126 | model_name: str(required=False) 127 | save_to_file: str(required=False) 128 | 129 | note_model_mapping: 130 | note_models: any(list(str()), str()) 131 | columns_to_fields: map(str(), key=str(), required=False) 132 | personal_fields: list(str(), required=False) 133 | 134 | note_models_all_from_crowd_anki: 135 | source: str() 136 | 137 | note_models_from_yaml_part: 138 | part_id: str() 139 | file: str() 140 | 141 | note_models_to_crowd_anki: 142 | parts: list(include('note_models_to_crowd_anki_item')) 143 | 144 | note_models_to_crowd_anki_item: 145 | part_id: str() 146 | 147 | notes_from_crowd_anki: 148 | part_id: str() 149 | source: str() 150 | sort_order: list(str(), required=False) 151 | save_to_file: str(required=False) 152 | reverse_sort: str(required=False) 153 | 154 | notes_from_csvs: 155 | part_id: str() 156 | save_to_file: str(required=False) 157 | note_model_mappings: list(include('note_model_mapping')) 158 | file_mappings: list(include('file_mapping')) 159 | 160 | notes_from_yaml_part: 161 | part_id: str() 162 | file: str() 163 | 164 | notes_override: 165 | note_model: str(required=False) 166 | 167 | notes_to_crowd_anki: 168 | part_id: str() 169 | sort_order: list(str(), required=False) 170 | reverse_sort: bool(required=False) 171 | additional_items_to_add: map(str(), key=str(), required=False) 172 | override: include('notes_override', required=False) 173 | case_insensitive_sort: bool(required=False) 174 | -------------------------------------------------------------------------------- /brain_brew/transformers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/brain_brew/transformers/__init__.py -------------------------------------------------------------------------------- /brain_brew/transformers/create_media_group_from_location.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from brain_brew.configuration.part_holder import PartHolder 4 | from brain_brew.interfaces.media_container import MediaContainer 5 | from brain_brew.representation.yaml.media_group import MediaGroup 6 | 7 | 8 | def create_media_group_from_location( 9 | part_id: str, 10 | save_to_file: str, 11 | media_group: MediaGroup, 12 | groups_to_blacklist: List[MediaContainer], 13 | groups_to_whitelist: List[MediaContainer] 14 | ) -> MediaGroup: 15 | if groups_to_whitelist: 16 | white = list(set.union(*[container.get_all_media_references() for container in groups_to_whitelist])) 17 | media_group.filter_by_filenames(white, should_match=True) 18 | 19 | if groups_to_blacklist: 20 | black = list(set.union(*[container.get_all_media_references() for container in groups_to_blacklist])) 21 | media_group.filter_by_filenames(black, should_match=False) 22 | 23 | holder = PartHolder.override_or_create(part_id, save_to_file, media_group) 24 | return holder.part 25 | -------------------------------------------------------------------------------- /brain_brew/transformers/save_media_group_to_location.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List, Set 3 | 4 | from brain_brew.representation.generic.media_file import MediaFile 5 | from brain_brew.representation.yaml.media_group import MediaGroup 6 | from brain_brew.utils import create_path_if_not_exists 7 | 8 | 9 | def save_media_groups_to_location( 10 | parts: List[MediaGroup], 11 | folder: str, 12 | clear_folder: bool, 13 | recursive: bool 14 | ) -> Set[MediaFile]: 15 | 16 | create_path_if_not_exists(folder, is_path_override=True) 17 | 18 | existing_media_group = MediaGroup.from_directory(folder, recursive) 19 | all_media_group = MediaGroup.from_many(parts) 20 | 21 | in_both, to_move, to_delete = all_media_group.compare(existing_media_group) 22 | 23 | for filename, media_file in all_media_group.media_files.items(): 24 | if filename in in_both: 25 | media_file.copy_self_to_target(existing_media_group.media_files[filename].file_path) 26 | # TODO: Check if copying is needed? 27 | elif filename in to_move: 28 | media_file.copy_self_to_target(folder) 29 | 30 | if clear_folder and to_delete: 31 | deleted = '\n'.join(to_delete) 32 | logging.warning(f"Deleting extra files in media folder '{folder}':\n{'-'*20}\n{deleted}\n{'-'*20}") 33 | for delete_name in to_delete: 34 | existing_media_group.media_files[delete_name].delete_self() 35 | 36 | return set(all_media_group.media_files.values()) 37 | -------------------------------------------------------------------------------- /brain_brew/transformers/save_note_model_to_location.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from typing import List 4 | 5 | from brain_brew.representation.generic.html_file import HTMLFile 6 | from brain_brew.representation.yaml.note_model import NoteModel, CSS_FILE, TEMPLATES 7 | from brain_brew.representation.yaml.note_model_template import HTML_FILE as TEMPLATE_HTML_FILE, NAME as TEMPLATE_NAME, BROWSER_HTML_FILE as TEMPLATE_BROWSER_HTML_FILE 8 | from brain_brew.representation.yaml.yaml_object import YamlObject 9 | from brain_brew.utils import create_path_if_not_exists, clear_contents_of_folder 10 | 11 | 12 | def save_note_model_to_location( 13 | model: NoteModel, 14 | folder: str, 15 | clear_folder: bool 16 | ) -> str: 17 | 18 | nm_folder = os.path.join(folder, model.name + '/') 19 | create_path_if_not_exists(nm_folder) 20 | 21 | if clear_folder: 22 | clear_contents_of_folder(nm_folder) 23 | 24 | model_encoded = model.encode_as_part_with_empty_file_references() 25 | 26 | model_encoded[CSS_FILE.name] = os.path.join(nm_folder, "style.css") 27 | HTMLFile.write_file(model_encoded[CSS_FILE.name], model.css) 28 | 29 | templates_dict = {t.name: t for t in model.templates} 30 | 31 | for template_data in model_encoded[TEMPLATES.name]: 32 | name = template_data[TEMPLATE_NAME.name] 33 | template = templates_dict[name] 34 | t_data, b_t_data = template.get_template_files_data() 35 | 36 | template_data[TEMPLATE_HTML_FILE.name] = os.path.join(nm_folder, HTMLFile.to_filename_html(name)) 37 | HTMLFile.write_file(template_data[TEMPLATE_HTML_FILE.name], t_data) 38 | 39 | if TEMPLATE_BROWSER_HTML_FILE.name in template_data and b_t_data is not None: 40 | template_data[TEMPLATE_BROWSER_HTML_FILE.name] = os.path.join(nm_folder, HTMLFile.to_filename_html(name + "_browser")) 41 | HTMLFile.write_file(template_data[TEMPLATE_HTML_FILE.name], b_t_data) 42 | 43 | model_yaml_file_name = YamlObject.to_filename_yaml(os.path.join(nm_folder, model.name)) 44 | YamlObject.dump_to_yaml_file(model_yaml_file_name, model_encoded) 45 | 46 | return model_yaml_file_name 47 | -------------------------------------------------------------------------------- /brain_brew/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import random 4 | import re 5 | import shutil 6 | import string 7 | from pathlib import Path 8 | from typing import List 9 | 10 | 11 | def blank_str_if_none(s): 12 | return '' if s is None else s 13 | 14 | 15 | def list_of_str_to_lowercase(list_of_strings): 16 | if not list_of_strings: 17 | return [] 18 | return [entry.lower() for entry in list_of_strings] 19 | 20 | 21 | def single_item_to_list(item): 22 | if isinstance(item, list): 23 | return item 24 | if item is None: 25 | return [] 26 | return [item] 27 | 28 | 29 | def str_to_lowercase_no_separators(str_to_tidy: str): 30 | return re.sub(r'[\s+_-]+', '', str_to_tidy.lower()) 31 | 32 | 33 | def filename_from_full_path(full_path): 34 | return re.findall(r'[^\\/:*?"<>|\r\n]+$', full_path)[0] 35 | 36 | 37 | def folder_name_from_full_path(full_path): 38 | return re.findall(r'[^\\/:*?"<>|\r\n]+[/]?$', full_path)[0] 39 | 40 | 41 | def split_by_regex(str_to_split: str, pattern: str) -> List[str]: 42 | return re.split(pattern, str_to_split) 43 | 44 | 45 | def find_media_in_field(field_value: str) -> List[str]: 46 | if not field_value: 47 | return [] 48 | 49 | images = re.findall(r'<\s*?img.*?src="(.*?)"[^>]*?>', field_value) 50 | audio = re.findall(r'\[sound:(.*?)]', field_value) 51 | 52 | return images + audio 53 | 54 | 55 | def find_all_files_in_directory(directory, recursive=False): 56 | found_files = [] 57 | for path, dirs, files in os.walk(directory): 58 | for file in files: 59 | found_files.append(os.path.join(path, file)) 60 | if not recursive: 61 | return found_files 62 | return found_files 63 | 64 | 65 | def create_path_if_not_exists(path, is_path_override=False): 66 | dir_name = os.path.dirname(path) if not is_path_override else path 67 | if not Path(dir_name).is_dir(): 68 | logging.warning(f"Creating missing filepath '{dir_name}'") 69 | os.makedirs(dir_name, exist_ok=True) 70 | 71 | 72 | def clear_contents_of_folder(path): 73 | for filename in os.listdir(path): 74 | file_path = os.path.join(path, filename) 75 | try: 76 | if os.path.isfile(file_path) or os.path.islink(file_path): 77 | os.unlink(file_path) 78 | elif os.path.isdir(file_path): 79 | shutil.rmtree(file_path) 80 | except Exception as e: 81 | print('Failed to delete %s. Reason: %s' % (file_path, e)) 82 | 83 | 84 | def split_tags(tags_value: str) -> list: 85 | split = [entry.strip() for entry in re.split(r';\s*|,\s*|\s+', tags_value)] 86 | while "" in split: 87 | split.remove("") 88 | return split 89 | 90 | 91 | def join_tags(tags_list: list) -> str: 92 | return ", ".join(tags_list) # TODO: Make configurable 93 | 94 | 95 | def generate_anki_guid() -> str: 96 | """Return a base91-encoded 64bit random number.""" 97 | 98 | def base62(num: int, extra: str = "") -> str: 99 | s = string 100 | table = s.ascii_letters + s.digits + extra 101 | buf = "" 102 | while num: 103 | num, i = divmod(num, len(table)) 104 | buf = table[i] + buf 105 | return buf 106 | 107 | _base91_extra_chars = "!#$%&()*+,-./:;<=>?@[]^_`{|}~" 108 | 109 | def base91(num: int) -> str: 110 | # all printable characters minus quotes, backslash and separators 111 | return base62(num, _base91_extra_chars) 112 | 113 | return base91(random.randint(0, 2 ** 64 - 1)) 114 | 115 | 116 | def sort_dict(data, sort_by_keys, reverse_sort, case_insensitive_sort): 117 | if sort_by_keys: 118 | if case_insensitive_sort: 119 | def sort_method(i): 120 | return tuple((i[column] == "", i[column].lower()) for column in sort_by_keys) 121 | else: 122 | def sort_method(i): 123 | return tuple((i[column] == "", i[column]) for column in sort_by_keys) 124 | 125 | return sorted(data, key=sort_method, reverse=reverse_sort) 126 | elif reverse_sort: 127 | return list(reversed(data)) 128 | 129 | return data 130 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/build.bash: -------------------------------------------------------------------------------- 1 | # Build 2 | rm -r dist 3 | rm -r build 4 | python3 setup.py sdist bdist_wheel -------------------------------------------------------------------------------- /scripts/dist.bash: -------------------------------------------------------------------------------- 1 | # See credentials in ~/.pypirc 2 | 3 | # To use an API token: 4 | # 5 | # Set your username to __token__ 6 | # Set your password to the token value, including the pypi- prefix 7 | 8 | # Upload 9 | twine upload dist/* --verbose 10 | -------------------------------------------------------------------------------- /scripts/yamale_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.abspath('')) 5 | 6 | from brain_brew.commands.run_recipe.top_level_builder import TopLevelBuilder 7 | 8 | build: str = TopLevelBuilder.build_yamale() 9 | filepath = "brain_brew/schemas/recipe.yaml" 10 | 11 | with open(filepath, 'w') as fp: 12 | fp.write(build) 13 | fp.close() 14 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | from brain_brew.front_matter import latest_version_number 3 | 4 | with open("README.md", "r") as fh: 5 | long_description = fh.read() 6 | 7 | setuptools.setup( 8 | name="Brain-Brew", 9 | version=latest_version_number(), 10 | author="Jordan Munch O'Hare", 11 | author_email="brainbrew@jordan.munchohare.com", 12 | description="Automated Anki flashcard creation and extraction to/from Csv ", 13 | long_description=long_description, 14 | long_description_content_type="text/markdown", 15 | url="https://github.com/ohare93/brain-brew", 16 | packages=setuptools.find_packages(), 17 | include_package_data=True, 18 | entry_points={ 19 | 'console_scripts': [ 20 | 'brain_brew = brain_brew.main:main', 21 | 'brain-brew = brain_brew.main:main', 22 | 'brainbrew = brain_brew.main:main', 23 | ] 24 | }, 25 | classifiers=[ 26 | "Programming Language :: Python :: 3", 27 | "License :: Public Domain", 28 | "Operating System :: OS Independent", 29 | ], 30 | python_requires='>=3.7', 31 | install_requires=[ 32 | 'ruamel.yaml.clib>=0.2.2', 33 | 'ruamel.yaml>=0.16.10', 34 | 'yamale>=3.0.4' 35 | ] 36 | ) 37 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/__init__.py -------------------------------------------------------------------------------- /tests/build_tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/build_tasks/__init__.py -------------------------------------------------------------------------------- /tests/build_tasks/test_source_crowd_anki_json.py: -------------------------------------------------------------------------------- 1 | # from unittest.mock import patch 2 | # 3 | # import pytest 4 | # 5 | # from brain_brew.constants.build_config_keys import BuildConfigKeys 6 | # from brain_brew.build_tasks.source_crowd_anki import SourceCrowdAnki, CrowdAnkiKeys 7 | # from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 8 | # from brain_brew.representation.json.part_header import DeckPartHeader 9 | # from brain_brew.representation.yaml.note_model_repr import DeckPartNoteModel 10 | # from brain_brew.representation.json.part_notes import DeckPartNotes 11 | # 12 | # 13 | # def setup_ca_config(file, media, useless_note_keys, notes, headers): 14 | # return { 15 | # CrowdAnkiKeys.FILE.value: file, 16 | # CrowdAnkiKeys.MEDIA.value: media, 17 | # CrowdAnkiKeys.USELESS_NOTE_KEYS.value: useless_note_keys, 18 | # BuildConfigKeys.NOTES.value: notes, 19 | # BuildConfigKeys.HEADERS.value: headers 20 | # } 21 | # 22 | # 23 | # class TestConstructor: 24 | # @pytest.mark.parametrize("file, media, useless_note_keys, notes, headers, read_file_now", [ 25 | # ("test", False, {}, "test.json", "header.json", False), 26 | # ("export1", True, {}, "test.json", "header.json", False), 27 | # ("json.json", False, {}, "test.json", "", True), 28 | # ("", False, {"__type__": "Note", "data": None, "flags": 0}, "test.json", "header.json", False) 29 | # ]) 30 | # def test_runs(self, file, media, useless_note_keys, notes, headers, read_file_now, global_config): 31 | # config = setup_ca_config(file, media, useless_note_keys, notes, headers) 32 | # 33 | # def assert_dp_header(passed_file, read_now): 34 | # assert passed_file == headers 35 | # assert read_now == read_file_now 36 | # 37 | # def assert_dp_notes(passed_file, read_now): 38 | # assert passed_file == notes 39 | # assert read_now == read_file_now 40 | # 41 | # def assert_ca_export(passed_file, read_now): 42 | # assert passed_file == file 43 | # assert read_now == read_file_now 44 | # 45 | # with patch.object(DeckPartHeader, "create", side_effect=assert_dp_header) as mock_header, \ 46 | # patch.object(DeckPartNotes, "create", side_effect=assert_dp_notes) as mock_notes, \ 47 | # patch.object(CrowdAnkiExport, "create", side_effect=assert_ca_export) as ca_export: 48 | # 49 | # source = SourceCrowdAnki(config, read_now=read_file_now) 50 | # 51 | # assert isinstance(source, SourceCrowdAnki) 52 | # assert source.should_handle_media == media 53 | # assert source.useless_note_keys == useless_note_keys 54 | # 55 | # assert mock_header.call_count == 1 56 | # assert mock_notes.call_count == 1 57 | # assert ca_export.call_count == 1 58 | # 59 | # 60 | # @pytest.fixture() 61 | # def source_crowd_anki_test1(global_config) -> SourceCrowdAnki: 62 | # with patch.object(DeckPartHeader, "create", return_value=None) as mock_header, \ 63 | # patch.object(DeckPartNotes, "create", return_value=None) as mock_notes, \ 64 | # patch.object(CrowdAnkiExport, "create", return_value=None) as mock_ca_export: 65 | # 66 | # source = SourceCrowdAnki( 67 | # setup_ca_config("", False, {"__type__": "Note", "data": None, "flags": 0}, "", "") 68 | # ) 69 | # 70 | # # source.notes = dp_notes_test1 71 | # # source.headers = dp_headers_test1 72 | # # source.crowd_anki_export = ca_export_test1 73 | # 74 | # return source 75 | # 76 | # 77 | # class TestSourceToDeckParts: 78 | # def test_runs(self, source_crowd_anki_test1: SourceCrowdAnki, ca_export_test1, 79 | # temp_dp_note_model_file, temp_dp_headers_file, temp_dp_notes_file, 80 | # dp_note_model_test1, dp_headers_test1, dp_notes_test1): 81 | # 82 | # # CrowdAnki Export it will use to write to the DeckParts 83 | # source_crowd_anki_test1.crowd_anki_export = ca_export_test1 84 | # 85 | # # DeckParts to be written to (+ the NoteModel below) 86 | # source_crowd_anki_test1.headers = temp_dp_headers_file 87 | # source_crowd_anki_test1.notes = temp_dp_notes_file 88 | # 89 | # def assert_note_model(name, data_override): 90 | # assert data_override == dp_note_model_test1.get_data() 91 | # return dp_note_model_test1 92 | # 93 | # with patch.object(DeckPartNoteModel, "create", side_effect=assert_note_model) as mock_nm: 94 | # source_crowd_anki_test1.source_to_parts() 95 | # 96 | # assert source_crowd_anki_test1.headers.get_data() == dp_headers_test1.get_data() 97 | # assert source_crowd_anki_test1.notes.get_data() == dp_notes_test1.get_data() 98 | # 99 | # assert mock_nm.call_count == 1 100 | # 101 | # 102 | # class TestDeckPartsToSource: 103 | # def test_runs(self, source_crowd_anki_test1: SourceCrowdAnki, temp_ca_export_file, 104 | # ca_export_test1, dp_notes_test1, dp_headers_test1): 105 | # source_crowd_anki_test1.crowd_anki_export = temp_ca_export_file # File to write result to 106 | # 107 | # # DeckParts it will use (+ dp_note_model_test1, but it reads that in as a file) 108 | # source_crowd_anki_test1.headers = dp_headers_test1 109 | # source_crowd_anki_test1.notes = dp_notes_test1 110 | # 111 | # source_crowd_anki_test1.parts_to_source() # Where the magic happens 112 | # 113 | # assert temp_ca_export_file.get_data() == ca_export_test1.get_data() 114 | -------------------------------------------------------------------------------- /tests/build_tasks/test_source_csv.py: -------------------------------------------------------------------------------- 1 | # from typing import List 2 | # from unittest.mock import patch 3 | # 4 | # import pytest 5 | # 6 | # from brain_brew.build_tasks.source_csv import SourceCsv, SourceCsvKeys 7 | # from brain_brew.constants.deckpart_keys import DeckPartNoteKeys 8 | # from brain_brew.representation.configuration.csv_file_mapping import FileMapping 9 | # from brain_brew.representation.configuration.note_model_mapping import NoteModelMapping 10 | # from brain_brew.representation.generic.csv_file import CsvFile 11 | # from brain_brew.representation.generic.source_file import SourceFile 12 | # from brain_brew.representation.json.part_notes import DeckPartNotes 13 | # from tests.representation.json.test_part_notes import dp_notes_test1 14 | # from tests.representation.configuration.test_note_model_mapping import setup_nmm_config 15 | # from tests.representation.configuration.test_csv_file_mapping import setup_csv_fm_config 16 | # 17 | # 18 | # def setup_source_csv_config(notes: str, nmm: list, csv_mappings: list): 19 | # return { 20 | # SourceCsvKeys.NOTES.value: notes, 21 | # SourceCsvKeys.NOTE_MODEL_MAPPINGS.value: nmm, 22 | # SourceCsvKeys.CSV_MAPPINGS.value: csv_mappings 23 | # } 24 | # 25 | # 26 | # def get_csv_default(notes: DeckPartNotes, nmm: List[NoteModelMapping], csv_maps: List[FileMapping]) -> SourceCsv: 27 | # csv_source = SourceCsv(setup_source_csv_config("", [], []), read_now=False) 28 | # 29 | # csv_source.notes = notes 30 | # csv_source.note_model_mappings_dict = {nm_map.note_model.name: nm_map for nm_map in nmm} 31 | # csv_source.csv_file_mappings = csv_maps 32 | # 33 | # return csv_source 34 | # 35 | # 36 | # @pytest.fixture() 37 | # def csv_source_test1(dp_notes_test1, nmm_test1, csv_file_mapping1) -> SourceCsv: 38 | # return get_csv_default(dp_notes_test1, [nmm_test1], [csv_file_mapping1]) 39 | # 40 | # 41 | # @pytest.fixture() 42 | # def csv_source_test1_split1(csv_source_default, csv_test1_split1, dp_notes_test1) -> SourceCsv: 43 | # csv_source_default.csv_file = csv_test1_split1 44 | # csv_source_default.notes = dp_notes_test1 45 | # return csv_source_default 46 | # 47 | # 48 | # @pytest.fixture() 49 | # def csv_source_test1_split2(csv_source_default2, csv_test1_split2, dp_notes_test2) -> SourceCsv: 50 | # csv_source_default2.csv_file = csv_test1_split2 51 | # csv_source_default2.notes = dp_notes_test1 52 | # return csv_source_default2 53 | # 54 | # 55 | # @pytest.fixture() 56 | # def csv_source_test2(dp_notes_test2, nmm_test1, csv_file_mapping2) -> SourceCsv: 57 | # return get_csv_default(dp_notes_test2, [nmm_test1], [csv_file_mapping2]) 58 | # 59 | # 60 | # # @pytest.fixture() 61 | # # def temp_csv_source(global_config, tmpdir) -> SourceCsv: 62 | # # file = tmpdir.mkdir("notes").join("file.csv") 63 | # # file.write("test,1,2,3") 64 | # 65 | # 66 | # class TestConstructor: 67 | # def test_runs(self): 68 | # source_csv = get_csv_default(None, [], []) 69 | # assert isinstance(source_csv, SourceCsv) 70 | # 71 | # @pytest.mark.parametrize("notes, model, columns, personal_fields, csv_file", [ 72 | # ("notes.json", "Test Model", {"a": "b"}, ["extra"], "file.csv") 73 | # ]) 74 | # def test_calls_correctly(self, notes, model, columns, personal_fields, csv_file, nmm_test1): 75 | # nmm_config = [setup_nmm_config(model, columns, personal_fields)] 76 | # csv_config = [setup_csv_fm_config(csv_file, note_model_name=model)] 77 | # 78 | # def assert_csv(config, read_now): 79 | # assert config in csv_config 80 | # assert read_now is False 81 | # 82 | # def assert_nmm(config, read_now): 83 | # assert config in nmm_config 84 | # assert read_now is False 85 | # 86 | # def assert_dpn(config, read_now): 87 | # assert config == notes 88 | # assert read_now is False 89 | # 90 | # with patch.object(FileMapping, "__init__", side_effect=assert_csv), \ 91 | # patch.object(NoteModelMapping, "__init__", side_effect=assert_nmm), \ 92 | # patch.object(NoteModelMapping, "note_model"), \ 93 | # patch.object(DeckPartNotes, "create", side_effect=assert_dpn): 94 | # 95 | # #nmm_mock.return_value = False 96 | # 97 | # source_csv = SourceCsv(setup_source_csv_config( 98 | # notes, 99 | # nmm_config, 100 | # csv_config 101 | # ), read_now=False) 102 | # 103 | # 104 | # # def test_missing_non_required_columns 105 | # 106 | # 107 | # class TestSourceToDeckParts: 108 | # def test_runs_first(self, csv_source_test1, dp_notes_test1, csv_source_test2, dp_notes_test2): 109 | # self.run_s2dp(csv_source_test1, dp_notes_test1) 110 | # self.run_s2dp(csv_source_test2, dp_notes_test2) 111 | # 112 | # @staticmethod 113 | # def run_s2dp(csv_source: SourceCsv, dp_notes: DeckPartNotes): 114 | # def assert_format(notes_data): 115 | # assert notes_data == dp_notes.get_data()[DeckPartNoteKeys.NOTES.value] 116 | # 117 | # with patch.object(DeckPartNotes, "set_data", side_effect=assert_format) as mock_set_data: 118 | # csv_source.source_to_parts() 119 | # assert mock_set_data.call_count == 1 120 | # 121 | # 122 | # class TestDeckPartsToSource: 123 | # def test_runs_with_no_change(self, csv_source_test1, csv_test1, csv_source_test2, csv_test2): 124 | # 125 | # self.run_dpts(csv_source_test1, csv_test1) 126 | # self.run_dpts(csv_source_test2, csv_test2) 127 | # 128 | # @staticmethod 129 | # def run_dpts(csv_source: SourceCsv, csv_file: CsvFile): 130 | # def assert_format(source_data): 131 | # assert source_data == csv_file.get_data() 132 | # 133 | # with patch.object(SourceFile, "set_data", side_effect=assert_format) as mock_set_data: 134 | # csv_source.parts_to_source() 135 | # assert csv_source.csv_file_mappings[0].data_set_has_changed is False 136 | # 137 | # csv_source.csv_file_mappings[0].data_set_has_changed = True 138 | # csv_source.csv_file_mappings[0].write_file_on_close() 139 | # assert mock_set_data.call_count == 1 140 | # 141 | -------------------------------------------------------------------------------- /tests/representation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/representation/__init__.py -------------------------------------------------------------------------------- /tests/representation/configuration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/representation/configuration/__init__.py -------------------------------------------------------------------------------- /tests/representation/configuration/test_csv_file_mapping.py: -------------------------------------------------------------------------------- 1 | # def setup_csv_fm_config(csv: str, sort_by_columns: List[str] = None, reverse_sort: bool = None, 2 | # note_model_name: str = None, derivatives: List[dict] = None): 3 | # cfm: dict = { 4 | # FILE: csv 5 | # } 6 | # if sort_by_columns is not None: 7 | # cfm.setdefault(SORT_BY_COLUMNS, sort_by_columns) 8 | # if reverse_sort is not None: 9 | # cfm.setdefault(REVERSE_SORT, reverse_sort) 10 | # if note_model_name is not None: 11 | # cfm.setdefault(NOTE_MODEL, note_model_name) 12 | # if derivatives is not None: 13 | # cfm.setdefault(DERIVATIVES, derivatives) 14 | # 15 | # return cfm 16 | # 17 | 18 | # class TestConstructor: 19 | # @pytest.mark.parametrize("read_file_now, note_model_name, csv, sort_by_columns, reverse_sort", [ 20 | # (False, "note_model.json", "first.csv", ["guid"], False), 21 | # (True, "model_model.json", "second.csv", ["guid", "note_model_name"], True), 22 | # (False, "note_model-json", "first.csv", ["guid"], False,) 23 | # ]) 24 | # def test_runs_without_derivatives(self, read_file_now, note_model_name, csv, 25 | # sort_by_columns, reverse_sort): 26 | # get_new_file_manager() 27 | # config = setup_csv_fm_config(csv, sort_by_columns, reverse_sort, note_model_name=note_model_name) 28 | # 29 | # def assert_csv(passed_file, read_now): 30 | # assert passed_file == csv 31 | # assert read_now == read_file_now 32 | # 33 | # with patch.object(FileMappingDerivative, "create_derivative", return_value=None) as mock_derivatives, \ 34 | # patch.object(CsvFile, "create", side_effect=assert_csv) as mock_csv: 35 | # 36 | # csv_fm = FileMapping(config, read_now=read_file_now) 37 | # 38 | # assert isinstance(csv_fm, FileMapping) 39 | # assert csv_fm.reverse_sort == reverse_sort 40 | # assert csv_fm.sort_by_columns == sort_by_columns 41 | # assert csv_fm.note_model_name == note_model_name 42 | # 43 | # assert mock_csv.call_count == 1 44 | # assert mock_derivatives.call_count == 0 45 | # 46 | # @pytest.mark.parametrize("derivatives", [ 47 | # [setup_csv_fm_config("test_csv.csv")], 48 | # [setup_csv_fm_config("test_csv.csv"), setup_csv_fm_config("second.csv")], 49 | # [setup_csv_fm_config("a.csv"), setup_csv_fm_config("b.csv"), setup_csv_fm_config("c.csv")], 50 | # [setup_csv_fm_config("a.csv", sort_by_columns=["word", "guid"], reverse_sort=True, note_model_name="d")], 51 | # [setup_csv_fm_config("test_csv.csv", derivatives=[setup_csv_fm_config("der_der.csv")])], 52 | # ]) 53 | # def test_runs_with_derivatives(self, derivatives: list): 54 | # get_new_file_manager() 55 | # config = setup_csv_fm_config("test", [], False, note_model_name="nm", derivatives=derivatives.copy()) 56 | # expected_call_count = len(derivatives) 57 | # 58 | # def assert_der(passed_file, read_now): 59 | # der = derivatives.pop(0) 60 | # assert passed_file == der 61 | # assert read_now is False 62 | # 63 | # with patch.object(FileMappingDerivative, "create_derivative", side_effect=assert_der) as mock_derivatives, \ 64 | # patch.object(CsvFile, "create", return_value=None): 65 | # 66 | # csv_fm = FileMapping(config, read_now=False) 67 | # 68 | # assert mock_derivatives.call_count == len(csv_fm.derivatives) == expected_call_count 69 | 70 | 71 | # def csv_fixture_gen(csv_fix): 72 | # with patch.object(CsvFile, "create_or_get", return_value=csv_fix): 73 | # csv = FileMapping(**setup_csv_fm_config("", note_model_name="Test Model")) 74 | # csv.compile_data() 75 | # return csv 76 | # 77 | # 78 | # @pytest.fixture() 79 | # def csv_file_mapping1(csv_test1): 80 | # return csv_fixture_gen(csv_test1) 81 | # 82 | # 83 | # @pytest.fixture() 84 | # def csv_file_mapping2(csv_test2): 85 | # return csv_fixture_gen(csv_test2) 86 | # 87 | # 88 | # @pytest.fixture() 89 | # def csv_file_mapping3(csv_test3): 90 | # return csv_fixture_gen(csv_test3) 91 | # 92 | # 93 | # @pytest.fixture() 94 | # def csv_file_mapping_split1(csv_test1_split1): 95 | # return csv_fixture_gen(csv_test1_split1) 96 | # 97 | # 98 | # @pytest.fixture() 99 | # def csv_file_mapping_split1(csv_test1_split2): 100 | # return csv_fixture_gen(csv_test1_split2) 101 | # 102 | # 103 | # @pytest.fixture() 104 | # def csv_file_mapping2_missing_guids(csv_test2_missing_guids): 105 | # return csv_fixture_gen(csv_test2_missing_guids) 106 | # 107 | # 108 | # class TestSetRelevantData: 109 | # def test_no_change(self, csv_file_mapping1: FileMapping, csv_file_mapping_split1: FileMapping): 110 | # assert csv_file_mapping1.data_set_has_changed is False 111 | # 112 | # previous_data = csv_file_mapping1.compiled_data.copy() 113 | # csv_file_mapping1.set_relevant_data(csv_file_mapping_split1.compiled_data) 114 | # 115 | # assert previous_data == csv_file_mapping1.compiled_data 116 | # assert csv_file_mapping1.data_set_has_changed is False 117 | # 118 | # def test_change_but_no_extra(self, csv_file_mapping1: FileMapping, csv_file_mapping2: FileMapping): 119 | # assert csv_file_mapping1.data_set_has_changed is False 120 | # assert len(csv_file_mapping1.compiled_data) == 15 121 | # 122 | # previous_data = copy.deepcopy(csv_file_mapping1.compiled_data) 123 | # csv_file_mapping1.set_relevant_data(csv_file_mapping2.compiled_data) 124 | # 125 | # assert previous_data != csv_file_mapping1.compiled_data 126 | # assert csv_file_mapping1.data_set_has_changed is True 127 | # assert len(csv_file_mapping1.compiled_data) == 15 128 | # 129 | # def test_change_extra_row(self, csv_file_mapping1: FileMapping, csv_file_mapping3: FileMapping): 130 | # assert csv_file_mapping1.data_set_has_changed is False 131 | # assert len(csv_file_mapping1.compiled_data) == 15 132 | # 133 | # previous_data = copy.deepcopy(csv_file_mapping1.compiled_data.copy()) 134 | # csv_file_mapping1.set_relevant_data(csv_file_mapping3.compiled_data) 135 | # 136 | # assert previous_data != csv_file_mapping1.compiled_data 137 | # assert csv_file_mapping1.data_set_has_changed is True 138 | # assert len(csv_file_mapping1.compiled_data) == 16 139 | # 140 | # 141 | # class TestCompileData: 142 | # num = 0 143 | # 144 | # def get_num(self): 145 | # self.num += 1 146 | # return self.num 147 | # 148 | # def test_when_missing_guids(self, csv_file_mapping2_missing_guids: FileMapping): 149 | # with patch("brain_brew.representation.configuration.csv_file_mapping.generate_anki_guid", wraps=self.get_num) as mock_guid: 150 | # 151 | # csv_file_mapping2_missing_guids.compile_data() 152 | # 153 | # assert csv_file_mapping2_missing_guids.data_set_has_changed is True 154 | # assert mock_guid.call_count == 9 155 | # assert list(csv_file_mapping2_missing_guids.compiled_data.keys()) == list(range(1, 10)) 156 | 157 | # Tests still to do: 158 | # 159 | # Top level needs a NoteModel, others do not 160 | 161 | -------------------------------------------------------------------------------- /tests/representation/configuration/test_note_model_mapping.py: -------------------------------------------------------------------------------- 1 | # from unittest.mock import patch 2 | # 3 | # import pytest 4 | # 5 | # from brain_brew.representation.configuration.note_model_mapping import NoteModelMapping, FieldMapping 6 | # from brain_brew.representation.generic.csv_file import CsvFile 7 | # from tests.test_file_manager import get_new_file_manager 8 | # 9 | # 10 | # @pytest.fixture(autouse=True) 11 | # def run_around_tests(): 12 | # get_new_file_manager() 13 | # yield 14 | # 15 | # 16 | # @pytest.fixture() 17 | # def nmm_test1_repr() -> NoteModelMapping.Representation: 18 | # return NoteModelMapping.Representation( 19 | # "Test Model", 20 | # { 21 | # "guid": "guid", 22 | # "tags": "tags", 23 | # 24 | # "english": "word", 25 | # "danish": "otherword" 26 | # }, 27 | # [] 28 | # ) 29 | # 30 | # 31 | # @pytest.fixture() 32 | # def nmm_test2_repr() -> NoteModelMapping.Representation: 33 | # return NoteModelMapping.Representation( 34 | # "Test Model", 35 | # { 36 | # "guid": "guid", 37 | # "tags": "tags", 38 | # 39 | # "english": "word", 40 | # "danish": "otherword" 41 | # }, 42 | # ["extra", "morph_focus"] 43 | # ) 44 | # 45 | # 46 | # @pytest.fixture() 47 | # def nmm_test1(nmm_test1_repr) -> NoteModelMapping: 48 | # return NoteModelMapping.from_repr(nmm_test1_repr) 49 | # 50 | # 51 | # @pytest.fixture() 52 | # def nmm_test2(nmm_test2_repr) -> NoteModelMapping: 53 | # return NoteModelMapping.from_repr(nmm_test2_repr) 54 | # 55 | # 56 | # class TestInit: 57 | # def test_runs(self): 58 | # nmm = NoteModelMapping.from_repr(NoteModelMapping.Representation("test", {}, [])) 59 | # assert isinstance(nmm, NoteModelMapping) 60 | # 61 | # @pytest.mark.parametrize("read_file_now, note_model, personal_fields, columns", [ 62 | # (False, "note_model.json", ["x"], {"guid": "guid", "tags": "tags", "english": "word", "danish": "otherword"}), 63 | # (True, "model_model", [], {"guid": "guid", "tags": "tags"}), 64 | # (False, "note_model-json", ["x", "y", "z"], {"guid": "guid", "tags": "tags", "english": "word", "danish": "otherword"}) 65 | # ]) 66 | # def test_values(self, read_file_now, note_model, personal_fields, columns): 67 | # config = setup_nmm_config(note_model, columns, personal_fields) 68 | # 69 | # def assert_dp_note_model(passed_file, read_now): 70 | # assert passed_file == note_model 71 | # assert read_now == read_file_now 72 | # 73 | # with patch.object(DeckPartNoteModel, "create", side_effect=assert_dp_note_model) as mock_nm: 74 | # 75 | # nmm = NoteModelMapping(config, read_now=read_file_now) 76 | # 77 | # assert isinstance(nmm, NoteModelMapping) 78 | # assert len(nmm.columns) == len(columns) 79 | # assert len(nmm.personal_fields) == len(personal_fields) 80 | # 81 | # assert mock_nm.call_count == 1 82 | # 83 | # 84 | # class TestVerifyContents: 85 | # pass # TODO 86 | # 87 | # 88 | # class TestCsvRowNoteFieldConversion: 89 | # @staticmethod 90 | # def get_csv_row(): return { 91 | # "guid": "AAAA", 92 | # "tags": "nice card", 93 | # 94 | # "english": "what", 95 | # "danish": "hvad" 96 | # } 97 | # 98 | # @staticmethod 99 | # def get_note_field(): return{ 100 | # "guid": "AAAA", 101 | # "tags": "nice card", 102 | # 103 | # "word": "what", 104 | # "otherword": "hvad", 105 | # "extra": False, 106 | # "morph_focus": False 107 | # } 108 | # 109 | # def test_csv_row_map_to_note_fields(self, nmm_test_with_personal_fields1): 110 | # assert nmm_test_with_personal_fields1.csv_row_map_to_note_fields(self.get_csv_row()) == self.get_note_field() 111 | # 112 | # def test_note_fields_map_to_csv_row(self, nmm_test_with_personal_fields1): 113 | # assert nmm_test_with_personal_fields1.note_fields_map_to_csv_row(self.get_note_field()) == self.get_csv_row() 114 | # 115 | # 116 | # class TestGetRelevantData: 117 | # def test_data_correct(self, nmm_test_with_personal_fields1: NoteModelMapping, csv_test1: CsvFile): 118 | # expected_relevant_columns = ["guid", "english", "danish", "tags"] 119 | # data = csv_test1.get_data() 120 | # 121 | # for row in data: 122 | # relevant_data = nmm_test_with_personal_fields1.get_relevant_data(row) 123 | # assert len(relevant_data) == 4 124 | # assert list(relevant_data.keys()) == expected_relevant_columns 125 | # 126 | # def test_data_missing_columns(self, nmm_test_with_personal_fields1: NoteModelMapping, csv_test1: CsvFile): 127 | # row_missing = { 128 | # "guid": "test", 129 | # "english": "test" 130 | # } 131 | # with pytest.raises(Exception) as e: 132 | # relevant_data = nmm_test_with_personal_fields1.get_relevant_data(row_missing) 133 | # 134 | # errors = e.value.args[0] 135 | # assert len(errors) == 2 136 | # assert isinstance(errors[0], KeyError) 137 | # assert isinstance(errors[1], KeyError) 138 | # assert errors[0].args[0] == "Missing column tags" 139 | # assert errors[1].args[0] == "Missing column danish" 140 | # 141 | # 142 | # class TestFieldMapping: 143 | # def test_init(self): 144 | # fm = FieldMapping(FieldMapping.FieldMappingType.COLUMN, "Csv_Row", "note_model_field") 145 | # assert isinstance(fm, FieldMapping) 146 | # assert (fm.field_name, fm.value) == ("csv_row", "note_model_field") 147 | -------------------------------------------------------------------------------- /tests/representation/generic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/representation/generic/__init__.py -------------------------------------------------------------------------------- /tests/representation/generic/test_csv_file.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brain_brew.representation.generic.csv_file import CsvFile 4 | from tests.test_file_manager import get_new_file_manager 5 | from tests.test_files import TestFiles 6 | 7 | get_new_file_manager() 8 | 9 | 10 | @pytest.fixture() 11 | def csv_test1(): 12 | csv = CsvFile(TestFiles.CsvFiles.TEST1) 13 | csv.read_file() 14 | return csv 15 | 16 | 17 | @pytest.fixture() 18 | def tsv_test1(): 19 | tsv = CsvFile(TestFiles.TsvFiles.TEST1, delimiter='\t') 20 | tsv.read_file() 21 | return tsv 22 | 23 | 24 | @pytest.fixture() 25 | def csv_test1_split1(): 26 | csv = CsvFile(TestFiles.CsvFiles.TEST1_SPLIT1) 27 | csv.read_file() 28 | return csv 29 | 30 | 31 | @pytest.fixture() 32 | def csv_test1_split2(): 33 | csv = CsvFile(TestFiles.CsvFiles.TEST1_SPLIT2) 34 | csv.read_file() 35 | return csv 36 | 37 | 38 | @pytest.fixture() 39 | def csv_test2(): 40 | csv = CsvFile(TestFiles.CsvFiles.TEST2) 41 | csv.read_file() 42 | return csv 43 | 44 | 45 | @pytest.fixture() 46 | def csv_test3(): 47 | csv = CsvFile(TestFiles.CsvFiles.TEST3) 48 | csv.read_file() 49 | return csv 50 | 51 | 52 | @pytest.fixture() 53 | def csv_test2_missing_guids(): 54 | csv = CsvFile(TestFiles.CsvFiles.TEST2_MISSING_GUIDS) 55 | csv.read_file() 56 | return csv 57 | 58 | 59 | @pytest.fixture() 60 | def temp_csv_test1(tmpdir, csv_test1) -> CsvFile: 61 | file = tmpdir.mkdir("json").join("file.csv") 62 | file.write("blank") 63 | 64 | csv = CsvFile.create_or_get(file.strpath) 65 | csv.read_file() 66 | return csv 67 | 68 | 69 | class TestConstructor: 70 | def test_runs(self, csv_test1): 71 | assert isinstance(csv_test1, CsvFile) 72 | assert csv_test1.file_location == TestFiles.CsvFiles.TEST1 73 | assert "guid" in csv_test1.column_headers 74 | 75 | 76 | def test_to_filename_csv(): 77 | assert "read-this-file.csv" == CsvFile.to_filename_csv("read-this-file") 78 | assert "read-this-file.csv" == CsvFile.to_filename_csv("read-this-file.csv") 79 | assert "read-this-file.tsv" == CsvFile.to_filename_csv("read-this-file.tsv") 80 | 81 | 82 | class TestWriteFile: 83 | def test_runs(self, temp_csv_test1: CsvFile, csv_test1: CsvFile): 84 | temp_csv_test1.set_data(csv_test1.get_data()) 85 | temp_csv_test1.write_file() 86 | temp_csv_test1.read_file() 87 | 88 | assert temp_csv_test1.get_data() == csv_test1.get_data() 89 | 90 | def test_tsv_same_data(self, temp_csv_test1: CsvFile, tsv_test1: CsvFile): 91 | temp_csv_test1.set_data(tsv_test1.get_data()) 92 | temp_csv_test1.write_file() 93 | temp_csv_test1.read_file() 94 | 95 | assert temp_csv_test1.get_data() == tsv_test1.get_data() 96 | 97 | 98 | class TestSortData: 99 | @pytest.mark.parametrize("columns, reverse, result_column, expected_results", [ 100 | (["guid"], False, "guid", [(0, "AAAA"), (1, "BBBB"), (2, "CCCC"), (14, "OOOO")]), 101 | (["guid"], True, "guid", [(14, "AAAA"), (13, "BBBB"), (12, "CCCC"), (0, "OOOO")]), 102 | (["english"], False, "english", [(0, "banana"), (1, "bird"), (2, "cat"), (14, "you")]), 103 | (["english"], True, "english", [(14, "banana"), (13, "bird"), (12, "cat"), (0, "you")]), 104 | (["tags"], False, "tags", [(0, "besttag"), (1, "funny"), (2, "tag2 tag3"), (13, ""), (14, "")]), 105 | (["esperanto", "english"], False, "esperanto", [(0, "banano"), (1, "birdo"), (6, "vi"), (14, "")]), 106 | (["esperanto", "guid"], False, "guid", [(7, "BBBB"), (14, "LLLL")]), 107 | ]) 108 | def test_sort(self, csv_test1: CsvFile, columns, reverse, result_column, expected_results): 109 | csv_test1.sort_data(columns, reverse, case_insensitive_sort=True) 110 | 111 | sorted_data = csv_test1.get_data() 112 | 113 | for result in expected_results: 114 | assert sorted_data[result[0]][result_column] == result[1] 115 | 116 | def test_insensitive(self): 117 | pass 118 | -------------------------------------------------------------------------------- /tests/representation/generic/test_media_file.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brain_brew.representation.generic.media_file import MediaFile 4 | 5 | 6 | @pytest.fixture() 7 | def media_file_test1() -> MediaFile: 8 | return MediaFile("place/loc/file.txt") 9 | 10 | 11 | class TestConstructor: 12 | def test_without_override(self): 13 | loc = "place/loc/file.txt" 14 | 15 | media_file = MediaFile(loc) 16 | 17 | assert isinstance(media_file, MediaFile) 18 | assert media_file.file_path == loc 19 | assert media_file.filename == "file.txt" 20 | 21 | 22 | class TestCopy: 23 | def test_copies_file(self, tmpdir): 24 | source_dir = tmpdir.mkdir("source") 25 | source = source_dir.join("test.txt") 26 | source.write("test content") 27 | assert len(source_dir.listdir()) == 1 28 | 29 | target_dir = tmpdir.mkdir("target") 30 | target = target_dir.join("test.txt") 31 | assert len(target_dir.listdir()) == 0 32 | 33 | media_file = MediaFile(str(source)) 34 | media_file.copy_self_to_target(str(target)) 35 | 36 | assert len(target_dir.listdir()) == len(source_dir.listdir()) == 1 37 | assert target.read() == "test content" 38 | -------------------------------------------------------------------------------- /tests/representation/json/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/representation/json/__init__.py -------------------------------------------------------------------------------- /tests/representation/json/test_crowd_anki_export.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brain_brew.representation.json.crowd_anki_export import CrowdAnkiExport 4 | from tests.test_files import TestFiles 5 | 6 | 7 | class TestConstructor: 8 | @pytest.mark.parametrize("export_name", [ 9 | TestFiles.CrowdAnkiExport.TEST1_FOLDER, 10 | TestFiles.CrowdAnkiExport.TEST1_FOLDER_WITHOUT_SLASH 11 | ]) 12 | def test_runs(self, export_name): 13 | file = CrowdAnkiExport(export_name) 14 | 15 | assert isinstance(file, CrowdAnkiExport) 16 | assert file.folder_location == TestFiles.CrowdAnkiExport.TEST1_FOLDER 17 | assert file.json_file_location == TestFiles.CrowdAnkiExport.TEST1_JSON 18 | assert len(file.json_data.data.keys()) == 13 19 | 20 | 21 | class TestFindJsonFileInFolder: 22 | # def test_no_json_file(self, tmpdir): 23 | # directory = tmpdir.mkdir("test") 24 | # 25 | # with pytest.raises(FileNotFoundError): 26 | # file = CrowdAnkiExport(directory.strpath) 27 | 28 | def test_too_many_json_files(self, tmpdir): 29 | directory = tmpdir.mkdir("test") 30 | file1, file2 = directory.join("file1.json"), directory.join("file2.json") 31 | file1.write("{}") 32 | file2.write("{}") 33 | 34 | with pytest.raises(FileExistsError): 35 | file = CrowdAnkiExport(directory.strpath) 36 | 37 | 38 | @pytest.fixture() 39 | def ca_export_test1() -> CrowdAnkiExport: 40 | return CrowdAnkiExport.create_or_get(TestFiles.CrowdAnkiExport.TEST1_FOLDER) 41 | 42 | 43 | @pytest.fixture() 44 | def temp_ca_export_file(tmpdir) -> CrowdAnkiExport: 45 | folder = tmpdir.mkdir("ca_export") 46 | file = folder.join("file.json") 47 | file.write("{}") 48 | 49 | return CrowdAnkiExport(folder.strpath) 50 | -------------------------------------------------------------------------------- /tests/representation/yaml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/representation/yaml/__init__.py -------------------------------------------------------------------------------- /tests/representation/yaml/test_note_model_repr.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brain_brew.representation.json.json_file import JsonFile 4 | from brain_brew.representation.yaml.note_model import NoteModel 5 | from brain_brew.representation.yaml.note_model_field import Field 6 | from brain_brew.representation.yaml.note_model_template import Template 7 | from brain_brew.representation.yaml.yaml_object import YamlObject 8 | from tests.test_files import TestFiles 9 | 10 | 11 | # CrowdAnki Files -------------------------------------------------------------------------- 12 | from tests.test_helpers import debug_write_part_to_file 13 | 14 | 15 | @pytest.fixture 16 | def ca_nm_data_word(): 17 | return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD) 18 | 19 | 20 | @pytest.fixture 21 | def ca_nm_word(ca_nm_data_word) -> NoteModel: 22 | return NoteModel.from_crowdanki(ca_nm_data_word) 23 | 24 | 25 | @pytest.fixture 26 | def ca_nm_data_word_required_only(): 27 | return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD_ONLY_REQUIRED) 28 | 29 | 30 | @pytest.fixture 31 | def ca_nm_word_required_only(ca_nm_data_word_required_only) -> NoteModel: 32 | return NoteModel.from_crowdanki(ca_nm_data_word_required_only) 33 | 34 | 35 | @pytest.fixture 36 | def ca_nm_data_word_no_defaults(): 37 | return JsonFile.read_file(TestFiles.CrowdAnkiNoteModels.LL_WORD_NO_DEFAULTS) 38 | 39 | 40 | @pytest.fixture 41 | def ca_nm_word_no_defaults(ca_nm_data_word_no_defaults) -> NoteModel: 42 | return NoteModel.from_crowdanki(ca_nm_data_word_no_defaults) 43 | 44 | 45 | # Yaml Files -------------------------------------------------------------------------- 46 | @pytest.fixture 47 | def nm_data_word_required_only(): 48 | return YamlObject.read_to_dict(TestFiles.NoteModels.LL_WORD_ONLY_REQUIRED) 49 | 50 | 51 | @pytest.fixture 52 | def nm_data_word_no_defaults(): 53 | return YamlObject.read_to_dict(TestFiles.NoteModels.LL_WORD_NO_DEFAULTS) 54 | 55 | 56 | class TestCrowdAnkiNoteModel: 57 | class TestConstructor: 58 | def test_normal(self, ca_nm_word): 59 | model = ca_nm_word 60 | assert isinstance(model, NoteModel) 61 | 62 | assert model.name == "LL Word" 63 | assert isinstance(model.fields, list) 64 | assert len(model.fields) == 7 65 | assert all([isinstance(field, Field) for field in model.fields]) 66 | 67 | assert isinstance(model.templates, list) 68 | assert len(model.templates) == 7 69 | assert all([isinstance(template, Template) for template in model.templates]) 70 | 71 | def test_only_required(self, ca_nm_word_required_only): 72 | model = ca_nm_word_required_only 73 | assert isinstance(model, NoteModel) 74 | 75 | def test_manual_construction(self): 76 | model = NoteModel( 77 | "name", 78 | "23094149+8124+91284+12984", 79 | "css is garbage", 80 | [], 81 | [Field( 82 | "field1" 83 | )], 84 | [Template( 85 | "template1", 86 | "{{Question}}", 87 | "{{Answer}}" 88 | )] 89 | ) 90 | 91 | assert isinstance(model, NoteModel) 92 | 93 | class TestEncodeAsCrowdAnki: 94 | def test_normal(self, ca_nm_word, ca_nm_data_word): 95 | model = ca_nm_word 96 | 97 | encoded = model.encode_as_crowdanki() 98 | # JsonFile.write_file(TestFiles.CrowdAnkiNoteModels.LL_WORD, encoded) 99 | 100 | assert encoded == ca_nm_data_word 101 | 102 | def test_only_required_uses_defaults(self, ca_nm_word_required_only, 103 | ca_nm_data_word, ca_nm_data_word_required_only): 104 | model = ca_nm_word_required_only 105 | 106 | encoded = model.encode_as_crowdanki() 107 | 108 | assert encoded != ca_nm_data_word_required_only 109 | assert encoded == ca_nm_data_word 110 | 111 | class TestEncodeAsDeckPart: 112 | def test_normal(self, ca_nm_word, ca_nm_data_word, ca_nm_data_word_required_only, nm_data_word_required_only): 113 | model = ca_nm_word 114 | 115 | encoded = model.encode() 116 | 117 | assert encoded != ca_nm_data_word 118 | assert encoded != ca_nm_data_word_required_only 119 | assert encoded == nm_data_word_required_only 120 | 121 | def test_only_required_uses_defaults(self, ca_nm_word_no_defaults, ca_nm_data_word_no_defaults, nm_data_word_no_defaults): 122 | model = ca_nm_word_no_defaults 123 | 124 | encoded = model.encode() 125 | 126 | 127 | assert encoded != ca_nm_data_word_no_defaults 128 | assert encoded == nm_data_word_no_defaults 129 | -------------------------------------------------------------------------------- /tests/test_argument_reader.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser, ArgumentError 2 | from unittest.mock import patch 3 | 4 | import pytest 5 | 6 | from brain_brew.commands.argument_reader import BBArgumentReader, Commands 7 | 8 | 9 | @pytest.fixture() 10 | def arg_reader_test1(): 11 | return BBArgumentReader(test_mode=True) 12 | 13 | 14 | def test_constructor(arg_reader_test1): 15 | assert isinstance(arg_reader_test1, BBArgumentReader) 16 | assert isinstance(arg_reader_test1, ArgumentParser) 17 | 18 | 19 | class TestArguments: 20 | class CommandRun: 21 | @pytest.mark.parametrize("arguments", [ 22 | ([Commands.RUN_RECIPE.value]), 23 | ([Commands.RUN_RECIPE.value, ""]), 24 | ]) 25 | def test_broken_arguments(self, arg_reader_test1, arguments): 26 | def raise_exit(message): 27 | raise SystemExit 28 | 29 | with pytest.raises(SystemExit): 30 | with patch.object(BBArgumentReader, "error", side_effect=raise_exit): 31 | arg_reader_test1.get_parsed(arguments) 32 | 33 | @pytest.mark.parametrize("arguments, recipe, verify_only", [ 34 | ([Commands.RUN_RECIPE.value, "test_recipe.yaml"], "test_recipe.yaml", False), 35 | ([Commands.RUN_RECIPE.value, "test_recipe.yaml", "--verify"], "test_recipe.yaml", True), 36 | ([Commands.RUN_RECIPE.value, "test_recipe.yaml", "-v"], "test_recipe.yaml", True), 37 | ]) 38 | def test_correct_arguments(self, arg_reader_test1, arguments, recipe, verify_only): 39 | parsed_args = arg_reader_test1.parse_args(arguments) 40 | 41 | assert parsed_args.recipe == recipe 42 | assert parsed_args.verify_only == verify_only 43 | 44 | class CommandInit: 45 | @pytest.mark.parametrize("arguments, location", [ 46 | (["init", "crowdankifolder72"], "crowdankifolder72"), 47 | ]) 48 | def test_correct_arguments(self, arg_reader_test1, arguments, location): 49 | parsed_args = arg_reader_test1.parse_args(arguments) 50 | 51 | assert parsed_args.crowdanki_folder == location 52 | -------------------------------------------------------------------------------- /tests/test_builder.py: -------------------------------------------------------------------------------- 1 | # class TestConstructor: 2 | # def test_runs(self): 3 | # with patch.object(CsvsGenerate, "__init__", return_value=None) as mock_csv_tr, \ 4 | # patch.object(DeckPartHolder, "from_part_pool", return_value=Mock()), \ 5 | # patch.object(CsvFile, "create_or_get", return_value=Mock()): 6 | # 7 | # data = YamlObject.read_to_dict(TestFiles.BuildConfig.ONE_OF_EACH_TYPE) 8 | # builder = TopLevelRecipeBuilder.from_list(data) 9 | # builder.execute() 10 | # 11 | # assert len(builder.tasks) == 1 12 | # assert mock_csv_tr.call_count == 1 13 | -------------------------------------------------------------------------------- /tests/test_file_manager.py: -------------------------------------------------------------------------------- 1 | from brain_brew.configuration.file_manager import FileManager 2 | 3 | 4 | def get_new_file_manager(): 5 | FileManager.clear_instance() 6 | return FileManager() 7 | 8 | 9 | # class TestSingletonConstructor: 10 | # def test_runs(self, global_config): 11 | # fm = get_new_file_manager() 12 | # assert isinstance(fm, FileManager) 13 | # 14 | # def test_returns_existing_singleton(self): 15 | # fm = get_new_file_manager() 16 | # fm.known_files_dict = {'test': None} 17 | # fm2 = FileManager.get_instance() 18 | # 19 | # assert fm2.known_files_dict == {'test': None} 20 | # assert fm2 == fm 21 | # 22 | # def test_raises_error(self): 23 | # with pytest.raises(Exception): 24 | # FileManager() 25 | # FileManager() 26 | # 27 | # 28 | # class TestFindMediaFiles: 29 | # def test_finds(self): 30 | # fm = get_new_file_manager() 31 | # 32 | # assert len(fm.known_media_files_dict) == 2 33 | -------------------------------------------------------------------------------- /tests/test_files.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class TestFiles: 4 | class Headers: 5 | LOC = "tests/test_files/deck_parts/headers/" 6 | 7 | FIRST = "default header" 8 | FIRST_COMPLETE = "default-header.json" 9 | 10 | class NoteFiles: 11 | LOC = "tests/test_files/deck_parts/" 12 | 13 | TEST1_NO_GROUPING_OR_SHARED_TAGS = "csvtonotes1_withnogroupingorsharedtags.json" 14 | TEST1_WITH_GROUPING = "csvtonotes1_withgrouping.json" 15 | TEST1_WITH_SHARED_TAGS = "csvtonotes1_withsharedtags.json" 16 | TEST1_WITH_SHARED_TAGS_EMPTY_AND_GROUPING = "csvtonotes1_withsharedtagsandgrouping_butnothingtogroup.json" 17 | TEST2_WITH_SHARED_TAGS_AND_GROUPING = "csvtonotes2_withsharedtagsandgrouping.json" 18 | 19 | class CrowdAnkiNoteModels: 20 | LOC = "tests/test_files/deck_parts/note_models/" 21 | 22 | TEST = "Test Model" 23 | 24 | LL_WORD = LOC + "LL Word" 25 | 26 | LL_WORD_ONLY_REQUIRED = LOC + "LL Word Only Required" 27 | 28 | LL_WORD_NO_DEFAULTS = LOC + "LL Word No Defaults" 29 | 30 | class NoteModels: 31 | LOC = "tests/test_files/deck_parts/yaml/note_models/" 32 | 33 | LL_WORD = LOC + "LL-Word.yaml" 34 | 35 | LL_WORD_ONLY_REQUIRED = LOC + "LL-Word-Only-Required.yaml" 36 | 37 | LL_WORD_NO_DEFAULTS = LOC + "LL-Word-No-Defaults.yaml" 38 | 39 | class CsvFiles: 40 | LOC = "tests/test_files/csv/" 41 | 42 | TEST1 = LOC + "test1.csv" 43 | TEST1_SPLIT1 = LOC + "test1_split1.csv" 44 | TEST1_SPLIT2 = LOC + "test1_split2.csv" 45 | TEST2 = LOC + "test2.csv" 46 | TEST2_MISSING_GUIDS = LOC + "test2_missing_guids.csv" 47 | TEST3 = LOC + "test3.csv" 48 | 49 | class TsvFiles: 50 | LOC = "tests/test_files/tsv/" 51 | 52 | TEST1 = LOC + "test1.tsv" 53 | 54 | class CrowdAnkiExport: 55 | LOC = "tests/test_files/crowd_anki/" 56 | 57 | TEST1_FOLDER = LOC + "crowdanki_example_1/" 58 | TEST1_FOLDER_WITHOUT_SLASH = LOC + "crowdanki_example_1" 59 | TEST1_JSON = TEST1_FOLDER + "deck.json" 60 | 61 | class BuildConfig: 62 | LOC = "tests/test_files/build_files/" 63 | 64 | ONE_OF_EACH_TYPE = LOC + "builder1.yaml" 65 | 66 | class MediaFiles: 67 | LOC = "tests/test_files/media_files/" 68 | 69 | class YamlNotes: 70 | LOC = "tests/test_files/yaml/notes/" 71 | 72 | TEST1 = LOC + "note1.yaml" 73 | -------------------------------------------------------------------------------- /tests/test_files/build_files/builder1.yaml: -------------------------------------------------------------------------------- 1 | 2 | - generate_csv_collection: 3 | notes: test_from_CA 4 | 5 | note_model_mappings: 6 | - note_models: 7 | - LL Word 8 | - LL Verb 9 | - LL Noun 10 | columns_to_fields: 11 | guid: guid 12 | tags: tags 13 | 14 | english: Word 15 | danish: X Word 16 | danish audio: X Pronunciation (Recording and/or IPA) 17 | esperanto: Y Word 18 | esperanto audio: Y Pronunciation (Recording and/or IPA) 19 | 20 | present: Form Present 21 | past: Form Past 22 | present perfect: Form Perfect Present 23 | 24 | plural: Plural 25 | indefinite plural: Indefinite Plural 26 | definite plural: Definite Plural 27 | personal_fields: 28 | - picture 29 | - extra 30 | - morphman_focusmorph 31 | 32 | file_mappings: 33 | - file: source/vocab/main.csv 34 | note_model: LL Word 35 | sort_by_columns: [english] 36 | reverse_sort: false 37 | 38 | derivatives: 39 | - file: source/vocab/derivatives/danish/danish_verbs.csv 40 | note_model: LL Verb 41 | - file: source/vocab/derivatives/danish/danish_nouns.csv 42 | note_model: LL Noun -------------------------------------------------------------------------------- /tests/test_files/csv/test1.csv: -------------------------------------------------------------------------------- 1 | guid,English,Danish,Esperanto,Danish Audio,Esperanto Audio,Japanese,Tags 2 | AAAA,you,du,vi,[sound:pronunciation_da_du.mp3],,,funny 3 | BBBB,healthy,rask,,,,test,tag2 tag3 4 | CCCC,tired,træt,,,,,besttag 5 | DDDD,banana,en banan,banano,[sound:pronunciation_da_banan.mp3],,, 6 | EEEE,cat,en kat,,,,, 7 | FFFF,dog,en hund,hundo,,,, 8 | GGGG,fish,en fisk,,,,, 9 | HHHH,bird,en fugl,birdo,,,, 10 | IIII,cow,en ko,,,,, 11 | JJJJ,pig,et svin,,,,, 12 | KKKK,mouse,en mus,,,,, 13 | LLLL,horse,en hest,,,,, 14 | MMMM,to learn,at lære,lerni,[sound:pronunciation_da_lære.mp3],,, 15 | NNNN,to eat,at spise,manĝi,,,, 16 | OOOO,to drink,at drikke,drinki,,,, -------------------------------------------------------------------------------- /tests/test_files/csv/test1_split1.csv: -------------------------------------------------------------------------------- 1 | guid,English,Danish,Esperanto,Danish Audio,Esperanto Audio,Japanese,Tags 2 | AAAA,you,du,vi,[sound:pronunciation_da_du.mp3],,,funny 3 | BBBB,healthy,rask,,,,test,tag2 tag3 4 | CCCC,tired,træt,,,,,besttag 5 | DDDD,banana,en banan,banano,[sound:pronunciation_da_banan.mp3],,, 6 | EEEE,cat,en kat,,,,, 7 | FFFF,dog,en hund,hundo,,,, -------------------------------------------------------------------------------- /tests/test_files/csv/test1_split2.csv: -------------------------------------------------------------------------------- 1 | guid,English,Danish,Esperanto,Danish Audio,Esperanto Audio,Japanese,Tags 2 | GGGG,fish,en fisk,,,,, 3 | HHHH,bird,en fugl,birdo,,,, 4 | IIII,cow,en ko,,,,, 5 | JJJJ,pig,et svin,,,,, 6 | KKKK,mouse,en mus,,,,, 7 | LLLL,horse,en hest,,,,, 8 | MMMM,to learn,at lære,lerni,[sound:pronunciation_da_lære.mp3],,, 9 | NNNN,to eat,at spise,manĝi,,,, 10 | OOOO,to drink,at drikke,drinki,,,, -------------------------------------------------------------------------------- /tests/test_files/csv/test2.csv: -------------------------------------------------------------------------------- 1 | guid,English,Danish,Esperanto,Danish Audio,Esperanto Audio,Japanese,Tags 2 | DDDD,banana,en banan,banano,[sound:pronunciation_da_banan.mp3],,,LL::Noun 3 | EEEE,cat,en kat,,,,,Animal LL::Noun 4 | FFFF,dog,en hund,hundo,,,,Animal LL::Noun 5 | GGGG,fish,en fisk,,,,,Animal LL::Noun 6 | HHHH,bird,en fugl,birdo,,,,Animal LL::Noun 7 | IIII,cow,en ko,,,,,Animal LL::Noun 8 | JJJJ,pig,et svin,,,,,Animal LL::Noun 9 | KKKK,mouse,en mus,,,,,Animal LL::Noun 10 | LLLL,horse,en hest,,,,,Animal LL::Noun -------------------------------------------------------------------------------- /tests/test_files/csv/test2_missing_guids.csv: -------------------------------------------------------------------------------- 1 | guid,English,Danish,Esperanto,Danish Audio,Esperanto Audio,Japanese,Tags 2 | ,banana,en banan,banano,[sound:pronunciation_da_banan.mp3],,,LL::Noun 3 | ,cat,en kat,,,,,Animal LL::Noun 4 | ,dog,en hund,hundo,,,,Animal LL::Noun 5 | ,fish,en fisk,,,,,Animal LL::Noun 6 | ,bird,en fugl,birdo,,,,Animal LL::Noun 7 | ,cow,en ko,,,,,Animal LL::Noun 8 | ,pig,et svin,,,,,Animal LL::Noun 9 | ,mouse,en mus,,,,,Animal LL::Noun 10 | ,horse,en hest,,,,,Animal LL::Noun -------------------------------------------------------------------------------- /tests/test_files/csv/test3.csv: -------------------------------------------------------------------------------- 1 | guid,English,Danish,Esperanto,Danish Audio,Esperanto Audio,Japanese,Tags 2 | 1111,New,Ny,nova,,,atarashi, -------------------------------------------------------------------------------- /tests/test_files/deck_parts/note_models/LL Word Only Required.json: -------------------------------------------------------------------------------- 1 | { 2 | "crowdanki_uuid": "057a8d66-bc4e-11e9-9822-d8cb8ac9abf0", 3 | "css": ".card {\n font-family: arial;\n font-size: 20px;\n text-align: center;\n color: black;\n background-color: white;\n}\n\n.card1,.card3, .card5 { background-color: #B60F2D; }\n.card2,.card4, .card6 { background-color: #2E9017; }\n.card7 { background: linear-gradient(90deg, #B60F2D 49.9%, #2E9017 50.1%); }\n\n.word {\n font-size:1.5em;\n}\n\n.pronunciation{\n color:blue;\n}\n\n.extrainfo{\n color:lightgrey;\n}", 4 | "flds": [ 5 | { 6 | "name": "Word", 7 | "ord": 0, 8 | "size": 12 9 | }, 10 | { 11 | "font": "Arial", 12 | "name": "X Word", 13 | "ord": 1 14 | }, 15 | { 16 | "font": "Arial", 17 | "name": "Y Word", 18 | "ord": 2 19 | }, 20 | { 21 | "font": "Arial", 22 | "name": "Picture", 23 | "ord": 3, 24 | "size": 6 25 | }, 26 | { 27 | "font": "Arial", 28 | "name": "Extra", 29 | "ord": 4 30 | }, 31 | { 32 | "font": "Arial", 33 | "name": "X Pronunciation (Recording and/or IPA)", 34 | "ord": 5 35 | }, 36 | { 37 | "font": "Arial", 38 | "name": "Y Pronunciation (Recording and/or IPA)", 39 | "ord": 6 40 | } 41 | ], 42 | "name": "LL Word", 43 | "tmpls": [ 44 | { 45 | "afmt": "{{#X Word}}\n\t{{X Word}}\n{{/X Word}}\n\n
\n\n{{Picture}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 46 | "name": "X Comprehension", 47 | "ord": 0, 48 | "qfmt": "{{#X Word}}\n\t{{text:X Word}}\n{{/X Word}}" 49 | }, 50 | { 51 | "afmt": "{{#Y Word}}\n\t{{Y Word}}\n{{/Y Word}}\n\n
\n\n{{Picture}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 52 | "name": "Y Comprehension", 53 | "ord": 1, 54 | "qfmt": "{{#Y Word}}\n\t{{text:Y Word}}\n{{/Y Word}}" 55 | }, 56 | { 57 | "afmt": "{{FrontSide}}\n\n
\n\n{{X Word}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 58 | "name": "X Production", 59 | "ord": 2, 60 | "qfmt": "{{#X Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/X Word}}" 61 | }, 62 | { 63 | "afmt": "{{FrontSide}}\n\n
\n\n{{Y Word}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 64 | "name": "Y Production", 65 | "ord": 3, 66 | "qfmt": "{{#Y Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/Y Word}}" 67 | }, 68 | { 69 | "afmt": "{{FrontSide}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 70 | "name": "X Spelling", 71 | "ord": 4, 72 | "qfmt": "{{#X Word}}\n\t
Spell this word:
\n\n\t
{{type:X Word}}
\n\n\t
{{Picture}}\n{{/X Word}}" 73 | }, 74 | { 75 | "afmt": "{{FrontSide}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 76 | "name": "Y Spelling", 77 | "ord": 5, 78 | "qfmt": "{{#Y Word}}\n\t
Spell this word:
\n\n\t
{{type:Y Word}}
\n\n\t
{{Picture}}\n{{/Y Word}}" 79 | }, 80 | { 81 | "afmt": "{{FrontSide}}\n\n
\n\n
{{text:X Word}}
\n
{{text:Y Word}}
\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 82 | "name": "X and Y Production", 83 | "ord": 6, 84 | "qfmt": "{{#X Word}}\n{{#Y Word}}\n\t{{Picture}}\n{{/Y Word}}\n{{/X Word}}" 85 | } 86 | ] 87 | } -------------------------------------------------------------------------------- /tests/test_files/deck_parts/note_models/Test-Model.json: -------------------------------------------------------------------------------- 1 | { 2 | "__type__": "NoteModel", 3 | "crowdanki_uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 4 | "css": ".card {\n font-family: arial;\n font-size: 20px;\n text-align: center;\n color: black;\n background-color: white;\n}\n\n.card1,.card3, .card5 { background-color: #B60F2D; }\n.card2,.card4, .card6 { background-color: #2E9017; }\n.card7 { background: linear-gradient(90deg, #B60F2D 49.9%, #2E9017 50.1%); }\n\n.word {\n font-size:1.5em;\n}\n\n.pronunciation{\n color:blue;\n}\n\n.extrainfo{\n color:lightgrey;\n}", 5 | "flds": [ 6 | { 7 | "name": "Word" 8 | }, 9 | { 10 | "name": "OtherWord" 11 | } 12 | ], 13 | "latexPost": "\\end{document}", 14 | "latexPre": "\\documentclass[12pt]{article}\n\\special{papersize=3in,5in}\n\\usepackage{amssymb,amsmath}\n\\pagestyle{empty}\n\\setlength{\\parindent}{0in}\n\\begin{document}\n", 15 | "name": "Test Model", 16 | "req": [ 17 | [ 18 | 0, 19 | "all", 20 | [ 21 | 1 22 | ] 23 | ], 24 | [ 25 | 1, 26 | "all", 27 | [ 28 | 2 29 | ] 30 | ], 31 | [ 32 | 2, 33 | "all", 34 | [ 35 | 1, 36 | 3 37 | ] 38 | ], 39 | [ 40 | 3, 41 | "all", 42 | [ 43 | 2, 44 | 3 45 | ] 46 | ], 47 | [ 48 | 4, 49 | "all", 50 | [ 51 | 1, 52 | 3 53 | ] 54 | ], 55 | [ 56 | 5, 57 | "all", 58 | [ 59 | 2, 60 | 3 61 | ] 62 | ], 63 | [ 64 | 6, 65 | "all", 66 | [ 67 | 1, 68 | 2, 69 | 3 70 | ] 71 | ] 72 | ], 73 | "sortf": 0, 74 | "tags": [ 75 | "Meta::InProgress" 76 | ], 77 | "tmpls": [ 78 | { 79 | "afmt": "{{#X Word}}\n\t{{X Word}}\n{{/X Word}}\n\n
\n\n{{Picture}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 80 | "bafmt": "", 81 | "bqfmt": "", 82 | "did": null, 83 | "name": "X Comprehension", 84 | "ord": 0, 85 | "qfmt": "{{#X Word}}\n\t{{text:X Word}}\n{{/X Word}}" 86 | }, 87 | { 88 | "afmt": "{{#Y Word}}\n\t {{Y Word}}\n{{/Y Word}}\n\n
\n\n{{Picture}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 89 | "bafmt": "", 90 | "bqfmt": "", 91 | "did": null, 92 | "name": "Y Comprehension", 93 | "ord": 1, 94 | "qfmt": "{{#Y Word}}\n\t{{text:Y Word}}\n{{/Y Word}}" 95 | }, 96 | { 97 | "afmt": "{{FrontSide}}\n\n
\n\n{{X Word}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 98 | "bafmt": "", 99 | "bqfmt": "", 100 | "did": null, 101 | "name": "X Production", 102 | "ord": 2, 103 | "qfmt": "{{#X Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/X Word}}" 104 | }, 105 | { 106 | "afmt": "{{FrontSide}}\n\n
\n\n {{Y Word}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 107 | "bafmt": "", 108 | "bqfmt": "", 109 | "did": null, 110 | "name": "Y Production", 111 | "ord": 3, 112 | "qfmt": "{{#Y Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/Y Word}}" 113 | }, 114 | { 115 | "afmt": "{{FrontSide}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 116 | "bafmt": "", 117 | "bqfmt": "", 118 | "did": null, 119 | "name": "X Spelling", 120 | "ord": 4, 121 | "qfmt": "{{#X Word}}\n\t
Spell this word:
\n\n\t{{type:X Word}}\n\n\t
{{Picture}}\n{{/X Word}}" 122 | }, 123 | { 124 | "afmt": "{{FrontSide}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 125 | "bafmt": "", 126 | "bqfmt": "", 127 | "did": null, 128 | "name": "Y Spelling", 129 | "ord": 5, 130 | "qfmt": "{{#Y Word}}\n\t
Spell this word:
\n\n\t{{type:Y Word}}\n\n\t
{{Picture}}\n{{/Y Word}}" 131 | }, 132 | { 133 | "afmt": "{{FrontSide}}\n\n
\n\n
{{text:X Word}}
\n
{{text:Y Word}}
\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}", 134 | "bafmt": "", 135 | "bqfmt": "", 136 | "did": null, 137 | "name": "X and Y Production", 138 | "ord": 6, 139 | "qfmt": "{{#X Word}}\n{{#Y Word}}\n\t{{Picture}}\n{{/Y Word}}\n{{/X Word}}\n" 140 | } 141 | ], 142 | "type": 0, 143 | "vers": [] 144 | } -------------------------------------------------------------------------------- /tests/test_files/deck_parts/yaml/note_models/LL-Word-No-Defaults.yaml: -------------------------------------------------------------------------------- 1 | name: LL Word 2 | id: 057a8d66-bc4e-11e9-9822-d8cb8ac9abf0 3 | css: ".card {\n font-family: arial;\n font-size: 20px;\n text-align: center;\n color:\ 4 | \ black;\n background-color: white;\n}\n\n.card1,.card3, .card5 { background-color:\ 5 | \ #B60F2D; }\n.card2,.card4, .card6 { background-color: #2E9017; }\n.card7 { background:\ 6 | \ linear-gradient(90deg, #B60F2D 49.9%, #2E9017 50.1%); }\n\n.word {\n font-size:1.5em;\n\ 7 | }\n\n.pronunciation{\n color:blue;\n}\n\n.extrainfo{\n color:lightgrey;\n}" 8 | sort_field_num: 1 9 | is_cloze: true 10 | latex_pre: "\\documentclass[12pt]{article}\n\\special{papersize=3in,5in}\n\\usepackage{amssymb,amsmath}\n\ 11 | \\pagestyle{empty}\n\\setlength{\\parindent}{0in}\n\\begin{document}\nTEST" 12 | latex_post: \end{document}TEST 13 | fields: 14 | - name: Word 15 | font: Liberation SansTEST 16 | media: 17 | - TEST 18 | is_right_to_left: true 19 | font_size: 10 20 | is_sticky: true 21 | - name: X Word 22 | font: Arial 23 | media: 24 | - TEST 25 | is_right_to_left: true 26 | font_size: 10 27 | is_sticky: true 28 | - name: Y Word 29 | font: Arial 30 | media: 31 | - TEST 32 | is_right_to_left: true 33 | font_size: 10 34 | is_sticky: true 35 | - name: Picture 36 | font: Arial 37 | media: 38 | - TEST 39 | is_right_to_left: true 40 | font_size: 10 41 | is_sticky: true 42 | - name: Extra 43 | font: Arial 44 | media: 45 | - TEST 46 | is_right_to_left: true 47 | font_size: 10 48 | is_sticky: true 49 | - name: X Pronunciation (Recording and/or IPA) 50 | font: Arial 51 | media: 52 | - TEST 53 | is_right_to_left: true 54 | font_size: 10 55 | is_sticky: true 56 | - name: Y Pronunciation (Recording and/or IPA) 57 | font: Arial 58 | media: 59 | - TEST 60 | is_right_to_left: true 61 | font_size: 10 62 | is_sticky: true 63 | templates: 64 | - name: X Comprehension 65 | question_format: "{{#X Word}}\n\t{{text:X Word}}\n{{/X\ 66 | \ Word}}" 67 | answer_format: "{{#X Word}}\n\t{{X Word}}\n{{/X Word}}\n\ 68 | \n
\n\n{{Picture}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\ 69 | \t
{{X Pronunciation (Recording and/or IPA)}}\n\ 70 | {{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 72 | browser_question_format: TEST 73 | browser_answer_format: TEST 74 | deck_override_id: 1 75 | - name: Y Comprehension 76 | question_format: "{{#Y Word}}\n\t{{text:Y Word}}\n{{/Y\ 77 | \ Word}}" 78 | answer_format: "{{#Y Word}}\n\t{{Y Word}}\n{{/Y Word}}\n\ 79 | \n
\n\n{{Picture}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\ 80 | \t
{{Y Pronunciation (Recording and/or IPA)}}\n\ 81 | {{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 83 | browser_question_format: TEST 84 | browser_answer_format: TEST 85 | deck_override_id: 1 86 | - name: X Production 87 | question_format: "{{#X Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/X Word}}" 88 | answer_format: "{{FrontSide}}\n\n
\n\n{{X Word}}\n\ 89 | \n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording\ 91 | \ and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n\ 92 | {{/Extra}}" 93 | browser_question_format: TEST 94 | browser_answer_format: TEST 95 | deck_override_id: 1 96 | - name: Y Production 97 | question_format: "{{#Y Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/Y Word}}" 98 | answer_format: "{{FrontSide}}\n\n
\n\n{{Y Word}}\n\ 99 | \n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording\ 101 | \ and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n\ 102 | {{/Extra}}" 103 | browser_question_format: TEST 104 | browser_answer_format: TEST 105 | deck_override_id: 1 106 | - name: X Spelling 107 | question_format: "{{#X Word}}\n\t
Spell this word:
\n\n\t
{{type:X Word}}
\n\n\t
{{Picture}}\n{{/X Word}}" 109 | answer_format: "{{FrontSide}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t\ 110 |
{{X Pronunciation (Recording and/or IPA)}}\n\ 111 | {{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 113 | browser_question_format: TEST 114 | browser_answer_format: TEST 115 | deck_override_id: 1 116 | - name: Y Spelling 117 | question_format: "{{#Y Word}}\n\t
Spell this word:
\n\n\t
{{type:Y Word}}
\n\n\t
{{Picture}}\n{{/Y Word}}" 119 | answer_format: "{{FrontSide}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t\ 120 |
{{Y Pronunciation (Recording and/or IPA)}}\n\ 121 | {{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 123 | browser_question_format: TEST 124 | browser_answer_format: TEST 125 | deck_override_id: 2 126 | - name: X and Y Production 127 | question_format: "{{#X Word}}\n{{#Y Word}}\n\t{{Picture}}\n{{/Y Word}}\n{{/X Word}}" 128 | answer_format: "{{FrontSide}}\n\n
\n\n
{{text:X\ 129 | \ Word}}
\n
{{text:Y Word}}
\n\n{{#X Pronunciation\ 130 | \ (Recording and/or IPA)}}\n\t
{{X Pronunciation\ 131 | \ (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\ 132 | \n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording\ 134 | \ and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n\ 135 | {{/Extra}}" 136 | browser_question_format: TEST 137 | browser_answer_format: TEST 138 | deck_override_id: 1 139 | tags: 140 | - TEST 141 | version: 142 | - TEST 143 | __type__: NoteModelTEST 144 | required_fields_per_template: [] -------------------------------------------------------------------------------- /tests/test_files/deck_parts/yaml/note_models/LL-Word-Only-Required.yaml: -------------------------------------------------------------------------------- 1 | name: LL Word 2 | id: 057a8d66-bc4e-11e9-9822-d8cb8ac9abf0 3 | css: ".card {\n font-family: arial;\n font-size: 20px;\n text-align: center;\n color:\ 4 | \ black;\n background-color: white;\n}\n\n.card1,.card3, .card5 { background-color:\ 5 | \ #B60F2D; }\n.card2,.card4, .card6 { background-color: #2E9017; }\n.card7 { background:\ 6 | \ linear-gradient(90deg, #B60F2D 49.9%, #2E9017 50.1%); }\n\n.word {\n font-size:1.5em;\n\ 7 | }\n\n.pronunciation{\n color:blue;\n}\n\n.extrainfo{\n color:lightgrey;\n}" 8 | fields: 9 | - name: Word 10 | font_size: 12 11 | - name: X Word 12 | font: Arial 13 | - name: Y Word 14 | font: Arial 15 | - name: Picture 16 | font: Arial 17 | font_size: 6 18 | - name: Extra 19 | font: Arial 20 | - name: X Pronunciation (Recording and/or IPA) 21 | font: Arial 22 | - name: Y Pronunciation (Recording and/or IPA) 23 | font: Arial 24 | templates: 25 | - name: X Comprehension 26 | question_format: "{{#X Word}}\n\t{{text:X Word}}\n{{/X\ 27 | \ Word}}" 28 | answer_format: "{{#X Word}}\n\t{{X Word}}\n{{/X Word}}\n\ 29 | \n
\n\n{{Picture}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\ 30 | \t
{{X Pronunciation (Recording and/or IPA)}}\n\ 31 | {{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 33 | - name: Y Comprehension 34 | question_format: "{{#Y Word}}\n\t{{text:Y Word}}\n{{/Y\ 35 | \ Word}}" 36 | answer_format: "{{#Y Word}}\n\t{{Y Word}}\n{{/Y Word}}\n\ 37 | \n
\n\n{{Picture}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\ 38 | \t
{{Y Pronunciation (Recording and/or IPA)}}\n\ 39 | {{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 41 | - name: X Production 42 | question_format: "{{#X Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/X Word}}" 43 | answer_format: "{{FrontSide}}\n\n
\n\n{{X Word}}\n\ 44 | \n{{#X Pronunciation (Recording and/or IPA)}}\n\t
{{X Pronunciation (Recording and/or IPA)}}\n{{/X Pronunciation (Recording\ 46 | \ and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n\ 47 | {{/Extra}}" 48 | - name: Y Production 49 | question_format: "{{#Y Word}}{{#Picture}}\n\t{{Picture}}\n{{/Picture}}{{/Y Word}}" 50 | answer_format: "{{FrontSide}}\n\n
\n\n{{Y Word}}\n\ 51 | \n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording\ 53 | \ and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n\ 54 | {{/Extra}}" 55 | - name: X Spelling 56 | question_format: "{{#X Word}}\n\t
Spell this word:
\n\n\t
{{type:X Word}}
\n\n\t
{{Picture}}\n{{/X Word}}" 58 | answer_format: "{{FrontSide}}\n\n{{#X Pronunciation (Recording and/or IPA)}}\n\t\ 59 |
{{X Pronunciation (Recording and/or IPA)}}\n\ 60 | {{/X Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 62 | - name: Y Spelling 63 | question_format: "{{#Y Word}}\n\t
Spell this word:
\n\n\t
{{type:Y Word}}
\n\n\t
{{Picture}}\n{{/Y Word}}" 65 | answer_format: "{{FrontSide}}\n\n{{#Y Pronunciation (Recording and/or IPA)}}\n\t\ 66 |
{{Y Pronunciation (Recording and/or IPA)}}\n\ 67 | {{/Y Pronunciation (Recording and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n{{/Extra}}" 69 | - name: X and Y Production 70 | question_format: "{{#X Word}}\n{{#Y Word}}\n\t{{Picture}}\n{{/Y Word}}\n{{/X Word}}" 71 | answer_format: "{{FrontSide}}\n\n
\n\n
{{text:X\ 72 | \ Word}}
\n
{{text:Y Word}}
\n\n{{#X Pronunciation\ 73 | \ (Recording and/or IPA)}}\n\t
{{X Pronunciation\ 74 | \ (Recording and/or IPA)}}\n{{/X Pronunciation (Recording and/or IPA)}}\n\ 75 | \n{{#Y Pronunciation (Recording and/or IPA)}}\n\t
{{Y Pronunciation (Recording and/or IPA)}}\n{{/Y Pronunciation (Recording\ 77 | \ and/or IPA)}}\n\n
\n{{#Extra}}\n\t
{{Extra}}\n\ 78 | {{/Extra}}" 79 | required_fields_per_template: [] -------------------------------------------------------------------------------- /tests/test_files/deck_parts/yaml/notes/note1.yaml: -------------------------------------------------------------------------------- 1 | note_groupings: 2 | - note_model: LL Noun 3 | tags: 4 | - noun 5 | - english 6 | notes: 7 | - guid: 7ysf7ysd8f8 8 | fields: 9 | - test 10 | - blah 11 | - another one 12 | - guid: sfkdsfhsd 13 | fields: 14 | - first 15 | - second 16 | - third 17 | - note_model: LL Verb 18 | tags: 19 | - verb 20 | - english 21 | notes: 22 | - guid: dhdfhsdf 23 | fields: 24 | - verby 25 | - boo 26 | - another one 27 | - guid: dfgdfhgjs 28 | fields: 29 | - first 30 | - second 31 | - third 32 | -------------------------------------------------------------------------------- /tests/test_files/media_files/buried/even_more/signals2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/test_files/media_files/buried/even_more/signals2.png -------------------------------------------------------------------------------- /tests/test_files/media_files/signals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ohare93/brain-brew/dbe106954cd2c610959a41b459460961c7d9444b/tests/test_files/media_files/signals.png -------------------------------------------------------------------------------- /tests/test_files/tsv/test1.tsv: -------------------------------------------------------------------------------- 1 | guid English Danish Esperanto Danish Audio Esperanto Audio Japanese Tags 2 | AAAA you du vi [sound:pronunciation_da_du.mp3] funny 3 | BBBB healthy rask test tag2 tag3 4 | CCCC tired træt besttag 5 | DDDD banana en banan banano [sound:pronunciation_da_banan.mp3] 6 | EEEE cat en kat 7 | FFFF dog en hund hundo 8 | GGGG fish en fisk 9 | HHHH bird en fugl birdo 10 | IIII cow en ko 11 | JJJJ pig et svin 12 | KKKK mouse en mus 13 | LLLL horse en hest 14 | MMMM to learn at lære lerni [sound:pronunciation_da_lære.mp3] 15 | NNNN to eat at spise manĝi 16 | OOOO to drink at drikke drinki 17 | -------------------------------------------------------------------------------- /tests/test_helpers.py: -------------------------------------------------------------------------------- 1 | from brain_brew.configuration.part_holder import PartHolder 2 | 3 | 4 | def debug_write_part_to_file(part, filepath: str): 5 | dp = PartHolder("Blah", filepath, part) 6 | dp.save_to_file = filepath 7 | dp.write_to_file() 8 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brain_brew.representation.yaml.note_model_template import html_separator_regex 4 | from brain_brew.utils import find_media_in_field, str_to_lowercase_no_separators, split_tags, split_by_regex 5 | 6 | 7 | class TestFindMedia: 8 | @pytest.mark.parametrize("field_value, expected_results", [ 9 | (r'', ["image.png"]), 10 | (r'', ["image.png"]), 11 | (r'< img src="image.png">', ["image.png"]), 12 | (r'< img src="image.png">', ["image.png"]), 13 | (r'', ["image.png"]), 14 | (r'', ["image.png"]), 15 | (r'', ["image.png"]), 16 | (r'words in the field end other stuff', ["image.png"]), 17 | (r'', ["ug-map-saint_barthelemy.png"]), 18 | (r'', 19 | ["ug-map-saint_barthelemy.png", "image.png"]), 20 | (r'[sound:test.mp3]', ["test.mp3"]), 21 | (r'[sound:test.mp3][sound:othersound.mp3]', ["test.mp3", "othersound.mp3"]), 22 | (r'[sound:test.mp3] [sound:othersound.mp3]', ["test.mp3", "othersound.mp3"]), 23 | (r'words in the field [sound:test.mp3] other stuff too [sound:othersound.mp3] end', ["test.mp3", "othersound.mp3"]), 24 | (r'[sound:unfinished-bracket.mp3', []) 25 | ]) 26 | def test_find_media_in_field(self, field_value, expected_results): 27 | assert find_media_in_field(field_value) == expected_results 28 | 29 | 30 | class TestHelperFunctions: 31 | @pytest.mark.parametrize("str_to_tidy", [ 32 | 'Generate Csv Blah Blah', 33 | 'Generate__Csv_Blah-Blah', 34 | 'Generate Csv Blah Blah', 35 | 'generateCsvBlahBlah' 36 | ]) 37 | def test_remove_spacers_from_str(self, str_to_tidy): 38 | assert str_to_lowercase_no_separators(str_to_tidy) == "generatecsvblahblah" 39 | 40 | 41 | class TestSplitTags: 42 | @pytest.mark.parametrize("str_to_split, expected_result", [ 43 | ("tags1, tags2", ["tags1", "tags2"]), 44 | ("tags1 tags2", ["tags1", "tags2"]), 45 | ("tags1; tags2", ["tags1", "tags2"]), 46 | ("tags1 tags2", ["tags1", "tags2"]), 47 | ("tags1, tags2, tags3, tags4, tags5, tags6, tags7, tags8, tags9", 48 | ["tags1", "tags2", "tags3", "tags4", "tags5", "tags6", "tags7", "tags8", "tags9"]), 49 | ("tags1, tags2; tags3 tags4 tags5, tags6; tags7 tags8, tags9", 50 | ["tags1", "tags2", "tags3", "tags4", "tags5", "tags6", "tags7", "tags8", "tags9"]), 51 | ("tags1,tags2", ["tags1", "tags2"]), 52 | ("tags1;tags2", ["tags1", "tags2"]), 53 | ("tags1, tags2", ["tags1", "tags2"]), 54 | ("tags1; tags2", ["tags1", "tags2"]), 55 | ]) 56 | def test_runs(self, str_to_split, expected_result): 57 | assert split_tags(str_to_split) == expected_result 58 | 59 | 60 | class TestSplitByRegex: 61 | @pytest.mark.parametrize("str_to_split, split_by, expected_result", [ 62 | ("testbabyhighfive", "baby", ["test", "highfive"]), 63 | ("testbabyhighfive", "(baby)", ["test", "baby", "highfive"]), 64 | ("testbabyhighfive", html_separator_regex, ["testbabyhighfive"]), 65 | ("test\n---\nhighfive", html_separator_regex, ["test", "highfive"]), 66 | ("test\n---\n\nhighfive", html_separator_regex, ["test", "highfive"]), 67 | ("test\n-\nhighfive", html_separator_regex, ["test", "highfive"]), 68 | ("test\n\n\n\n-\nhighfive", html_separator_regex, ["test", "highfive"]), 69 | ("test\n\n\n\n---\n\n\n\nhighfive", html_separator_regex, ["test", "highfive"]), 70 | ("test\n\n\n\n---\n\n\n\nhighfive\n\n--\n\nbackflip", html_separator_regex, ["test", "highfive", "backflip"]), 71 | ]) 72 | def test_runs(self, str_to_split, split_by, expected_result): 73 | assert split_by_regex(str_to_split, split_by) == expected_result 74 | 75 | 76 | # class TestJoinTags: 77 | # @pytest.mark.parametrize("join_with, expected_result", [ 78 | # (", ", "test, test1, test2") 79 | # ]) 80 | # def test_joins(self, global_config, join_with, expected_result): 81 | # list_to_join = ["test", "test1", "test2"] 82 | # global_config.flags.join_values_with = join_with 83 | # 84 | # assert join_tags(list_to_join) == expected_result 85 | --------------------------------------------------------------------------------