├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── README.rst ├── TODO ├── beansoup ├── __init__.py ├── importers │ ├── __init__.py │ ├── amex.py │ ├── csv.py │ ├── filing.py │ ├── mixins.py │ └── td.py ├── plugins │ ├── __init__.py │ ├── clear_transactions.py │ ├── config.py │ └── deposit_in_transit.py ├── transactions.py ├── utils │ ├── __init__.py │ ├── dates.py │ ├── links.py │ ├── periods.py │ └── testing.py └── version.py ├── docs ├── Makefile ├── beansoup.importers.rst ├── beansoup.plugins.rst ├── beansoup.rst ├── beansoup.utils.rst ├── conf.py ├── index.rst └── make.bat ├── pylintrc ├── pytest.ini ├── requirements └── rtd.txt ├── setup.cfg ├── setup.py └── tests ├── example.beancount ├── importers ├── test_amex.py ├── test_filing.py ├── test_mixins.py └── test_td.py ├── plugins ├── test_clear_transactions.py ├── test_config.py └── test_deposit_in_transit.py ├── test_transactions.py └── utils ├── test_dates.py ├── test_links.py ├── test_periods.py └── test_testing.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | #Ipython Notebook 62 | .ipynb_checkpoints 63 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - 3.5 4 | sudo: false 5 | install: 6 | - travis_retry python setup.py develop 7 | - travis_retry pip install coveralls 8 | script: 9 | - python setup.py coverage 10 | after_success: 11 | coveralls 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # beansoup: A Companion to beancount 2 | 3 | [![PyPi Package](https://badge.fury.io/py/beansoup.svg)](https://badge.fury.io/py/beansoup) [![Coverage Status](https://coveralls.io/repos/github/fxtlabs/beansoup/badge.svg?branch=master)](https://coveralls.io/github/fxtlabs/beansoup?branch=master) [![Build Status](https://travis-ci.org/fxtlabs/beansoup.svg?branch=master)](https://travis-ci.org/fxtlabs/beansoup) [![Documentation Status](https://readthedocs.org/projects/beansoup/badge/?version=latest)](http://beansoup.readthedocs.io/en/latest/?badge=latest) 4 | 5 | 6 | ## Description 7 | 8 | [beansoup](https://github.com/fxtlabs/beansoup) offers a collection of plugins 9 | and tools that extend the functionality of 10 | [beancount](http://furius.ca/beancount) to facilitate my accounting and 11 | investment tracking activities. 12 | 13 | 14 | ## Copyright and License 15 | 16 | Copyright (C) 2016 Filippo Tampieri. All Rights Reserved. 17 | 18 | _beansoup_ is distributed under the [GNU General Public License](LICENSE), the 19 | same license used by _beancount_. 20 | 21 | 22 | ## Author 23 | 24 | Filippo Tampieri 25 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================================== 2 | beansoup: A Companion to beancount 3 | ======================================== 4 | 5 | |PyPi Package| |Coverage Status| |Build Status| |Documentation Status| 6 | 7 | 8 | .. contents:: 9 | .. 10 | 1 Description 11 | 2 Copyright and License 12 | 3 Author 13 | 14 | 15 | Description 16 | =========== 17 | 18 | `beansoup `_ offers a collection of plugins 19 | and tools that extend the functionality of 20 | `beancount `_ to facilitate my accounting and 21 | investment tracking activities. 22 | 23 | 24 | Copyright and License 25 | ===================== 26 | 27 | Copyright (C) 2016 Filippo Tampieri. All Rights Reserved. 28 | 29 | *beansoup* is distributed under the `GNU General Public License `_, the 30 | same license used by *beancount*. 31 | 32 | 33 | Author 34 | ====== 35 | 36 | Filippo Tampieri 37 | 38 | .. |PyPi Package| image:: https://badge.fury.io/py/beansoup.svg 39 | :target: https://badge.fury.io/py/beansoup 40 | :alt: PyPi Status 41 | 42 | .. |Coverage Status| image:: https://coveralls.io/repos/github/fxtlabs/beansoup/badge.svg?branch=master 43 | :target: https://coveralls.io/github/fxtlabs/beansoup?branch=master 44 | :alt: Coverage Status 45 | 46 | .. |Build Status| image:: https://travis-ci.org/fxtlabs/beansoup.svg?branch=master 47 | :target: https://travis-ci.org/fxtlabs/beansoup 48 | :alt: Build Status 49 | 50 | .. |Documentation Status| image:: https://readthedocs.org/projects/beansoup/badge/?version=latest 51 | :target: http://beansoup.readthedocs.io/en/latest/?badge=latest 52 | :alt: Documentation Status 53 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | ==== 2 | TODO 3 | ==== 4 | 5 | * DIT plugin should have option to skip zero-amount DIT transactions 6 | 7 | * Reformat TODO as TODO.md or move items to Issues tracker on github 8 | 9 | * Create a DEVNOTES.md document with how-to pointers for coding style, documentation, 10 | continous integration, test coverage, versioning, pylint, etc. 11 | 12 | * Should I have release notes? 13 | 14 | * Document exceptions 15 | 16 | * Write package level docs 17 | 18 | * Use doctest in .rst doc examples 19 | https://pytest.org/latest/doctest.html 20 | 21 | * Improve docs with Google style docstrings, type hints, and examples 22 | 23 | * Consider rewriting tests as functions rather than classes using more 24 | of pytest facilities 25 | 26 | * Write an import filter to pull the payee out of the narration based on 27 | past example; run it before the transaction completer! 28 | 29 | * Make the transaction completer customizable 30 | ** User can specify a separator string to ask to ignore the narration 31 | starting from the separator pattern 32 | ** User can specify pairs of a regex to match a token and a string to replace 33 | the token with; use it to normalize dates (e.g. 2016-03-27 -> XXXX-XX-XX) 34 | and confirmation codes (e.g. H3Z2J7 -> A1A1A1) to improve matches 35 | 36 | * Update readme to point to docs on 37 | https://pythonhosted.org/beansoup/ and 38 | 39 | * Write docstrings for everything 40 | 41 | * Write beansoup.scripts.beansoup script (installed in bin/beansoup) 42 | to run various commands (in a way, an extension of bean-doctor). 43 | First command is to detect inactive accounts (no activity in the past X days) 44 | Print account name, open date, last transaction date, days since last 45 | transaction, and balance 46 | It could even have an option to print close directives 47 | It should also have an option to limit it to a certain type of directives 48 | (i.e. only balance sheet accounts) 49 | Maybe it should be a script on its own; call it bean-inactive 50 | 51 | * Write auto_close plugin to close any account (still open) that has not seen 52 | any activity in the past X days 53 | Trivial using beancount.core.getters.get_accounts_use_map 54 | (see auto_accounts plugin for an example) 55 | Not sure how usuful this plugin would be; the reports already exclude 56 | accounts that have no activity for the period covered. 57 | Maybe the plugin could just warn the user that there are accounts that 58 | could be closed and could show the last used dates. 59 | (see nounused plugin for an example of error reporting) 60 | 61 | * Sort rows by date at the beginning of sort_rows(). 62 | May add option to disable the fancy sorting and only decide whether to 63 | reverse the list of not (still checking the balance values) 64 | 65 | * Move transactions.py to importers/filters/completion.py 66 | 67 | * Should importers/filters/__init__.py import all filters to expose them 68 | more conveniently (e.g. 69 | from beansoup.importers import filters 70 | then filters.Completion, filters.Whatever 71 | instead of 72 | from beansoup.importers.filters import completion, whatever 73 | then completion.Completion, whatever.Whatever or 74 | completion.Filter, whatever.Filter) 75 | 76 | * Consider adding an option to transaction completer to add posting for 77 | all matching accounts even though they would not balance 78 | 79 | * Allow transaction completer to look at alternative accounts for matches 80 | (e.g. completing a transaction from a Visa statement could also look at 81 | transactions involving an Amex card since you may have charged for the 82 | same payee on one card or the other) 83 | Matches in alternative accounts would have a smaller score 84 | 85 | * Consider using Baysian inference to improve the transaction completer 86 | https://tomszilagyi.github.io/payment-matching/ 87 | Some of my accounts also use codes structured like Canadian postal codes 88 | 89 | * Improve the narrative similarity algorithm used by transaction completer 90 | Try something from difflib: 91 | difflib.SequenceMatcher(lambda x: x == ' ', 'string1', 'string2').ratio() 92 | Or try Levenshtein distance 93 | https://pypi.python.org/pypi/python-Levenshtein/0.12.0 94 | distance(), ratio(), and jaro_winkler() look interesting 95 | Probably best to lowercase everything before 96 | http://stackoverflow.com/questions/6690739/fuzzy-string-comparison-in-python-confused-with-which-library-to-use 97 | http://chairnerd.seatgeek.com/fuzzywuzzy-fuzzy-string-matching-in-python/ 98 | https://github.com/seatgeek/fuzzywuzzy 99 | https://marcobonzanini.com/2015/02/25/fuzzy-string-matching-in-python/ 100 | https://pypi.python.org/pypi/Distance/ 101 | 102 | * Add a customizable normalization step on the imported narration and 103 | existing narrations to improve robustness of similarity measure. 104 | For example, the zip-like electronic transfer code at the end of some TD 105 | entries could be normalized to A1B2C3. 106 | Maybe use a normalization dict to map accounts to normalization callables. 107 | 108 | * Maybe ignored_tag of clear_transactions plugin should be a regexp 109 | 110 | * Finish clear_transactions plugin; add documentation and example 111 | 112 | * Should clear_transactions plugin be able to print out pending transactions? 113 | 114 | * Write OFX importer (banking only?) that can name files according to the 115 | billing period 116 | 117 | * Write tests for everything 118 | 119 | * Consider adding examples 120 | 121 | * Should I write a plugin to support annotating transactions with doc:? 122 | 123 | -------------------------------------------------------------------------------- /beansoup/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | -------------------------------------------------------------------------------- /beansoup/importers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fxtlabs/beansoup/678a74fe558a4ed14fd6626a6d3b74ebaa68558a/beansoup/importers/__init__.py -------------------------------------------------------------------------------- /beansoup/importers/amex.py: -------------------------------------------------------------------------------- 1 | """Importers for American Express statements.""" 2 | 3 | import calendar 4 | 5 | from beansoup.importers import filing 6 | 7 | 8 | class PdfFilingImporter(filing.Importer): 9 | """A filing importer for American Express PDF monthly statements.""" 10 | filename_regexp = ('^Statement_(?P%s) (?P\d{4})\.pdf$' % 11 | '|'.join(calendar.month_abbr[1:])) 12 | -------------------------------------------------------------------------------- /beansoup/importers/csv.py: -------------------------------------------------------------------------------- 1 | """Utilities to implement CSV importers.""" 2 | 3 | import collections 4 | import csv 5 | import datetime 6 | import io 7 | import itertools 8 | import logging 9 | from os import path 10 | import re 11 | 12 | from beancount.core import account_types as atypes 13 | from beancount.core import amount 14 | from beancount.core import data 15 | from beancount.ingest import importer 16 | 17 | from beansoup.utils import periods 18 | 19 | 20 | # A row of the parsed CSV file. 21 | # 22 | # Attributes: 23 | # lineno: An int identifying the line of the CSV file where this row is found. 24 | # date: A datetime.date object; the date of the transaction. 25 | # description: A string; a description of the transaction. 26 | # amount: A beancount.core.number.Decimal object; the value of the transaction. 27 | # The sign of its value should be the same as normally used by beancount entries. 28 | # balance: A beancount.core.number.Decimal object; the balance of the account 29 | # immediately following the transaction. The sign of its value should be the 30 | # same as normally used by beancount entries. 31 | Row = collections.namedtuple('Row', 'lineno date description amount balance') 32 | 33 | 34 | class Importer(importer.ImporterProtocol): 35 | """An importer base class for CSV bank and credit card statements. 36 | 37 | Unfortunately, CSV files often do not contain any information to easily 38 | identify the account; for this reason, this importer relies on the name 39 | of the file to associate it to a particular account. 40 | 41 | Derived classes need to implement the 'parse' method. 42 | 43 | See beansoup.importers.td.Importer for a full example of how to derive a 44 | concrete importer from this class. 45 | """ 46 | def __init__(self, account, currency='CAD', basename=None, 47 | first_day=None, filename_regexp=None, account_types=None): 48 | """Create a new importer for the given account. 49 | 50 | Args: 51 | account: An account string, the account to associate to the files. 52 | account_types: An AccountTypes object or None to use the default ones. 53 | basename: An optional string, the name of the new files. 54 | currency: A string, the currency for all extracted transactions. 55 | filename_regexp: A regular expression string used to match the 56 | basename (no path) of the target file. 57 | first_day: An int in [1,28]; the first day of the statement/billing 58 | period or None. If None, the file date will be the date of the 59 | last extracted entry; otherwise, it will be the date of the end 60 | of the monthly period containing the last extracted entry. 61 | Also, if a balance directive can be generated, if None, the balance 62 | directive will be set to the day following the date of the last 63 | extracted entry; otherwise, it will be set to the day following the 64 | end of the statement period. 65 | """ 66 | self.filename_re = re.compile(filename_regexp or self.filename_regexp) 67 | self.account = account 68 | self.currency = currency.upper() 69 | self.basename = basename 70 | self.first_day = first_day 71 | self.account_sign = atypes.get_account_sign(account, account_types) 72 | 73 | def name(self): 74 | """Include the account in the name.""" 75 | return '{}: "{}"'.format(super().name(), self.file_account(None)) 76 | 77 | def identify(self, file): 78 | """Identify whether the file can be processed by this importer.""" 79 | # Match for a compatible MIME type. 80 | if file.mimetype() != 'text/csv': 81 | return False 82 | 83 | # Match the file name. 84 | return self.filename_re.match(path.basename(file.name)) 85 | 86 | def file_account(self, _): 87 | """Return the account associated with the file""" 88 | return self.account 89 | 90 | def file_name(self, file): 91 | """Return the optional renamed account file name.""" 92 | if self.basename: 93 | return self.basename + path.splitext(file.name)[1] 94 | 95 | def file_date(self, file): 96 | """Return the filing date for the file.""" 97 | rows = self.parse(file) 98 | date = max(row.date for row in rows) 99 | if self.first_day is not None: 100 | date = periods.lowest_end(date, first_day=self.first_day) 101 | return date 102 | 103 | def extract(self, file): 104 | """Return extracted entries and errors.""" 105 | rows = self.parse(file) 106 | rows, error_lineno = sort_rows(rows) 107 | new_entries = [] 108 | if len(rows) == 0: 109 | return new_entries 110 | 111 | for index, row in enumerate(rows): 112 | posting = data.Posting( 113 | self.account, 114 | amount.Amount(row.amount, self.currency), 115 | None, None, None, None) 116 | # Use the final positional index rather than the lineno of the row because 117 | # bean-extract will sort the entries returned by its importers; doing that 118 | # using the original line number of the parsed CSV row would undo all the 119 | # effort we did to find their correct chronological order. 120 | meta = data.new_metadata(file.name, index) 121 | payee = None 122 | narration = row.description 123 | entry = data.Transaction( 124 | meta, 125 | row.date, 126 | self.FLAG, 127 | payee, 128 | narration, 129 | data.EMPTY_SET, 130 | data.EMPTY_SET, 131 | [posting]) 132 | new_entries.append(entry) 133 | 134 | # Extract balance, but only if we can trust it 135 | if error_lineno is not None: 136 | logging.warning('{}:{}: cannot reorder rows to agree with balance values'.format( 137 | file.name, error_lineno)) 138 | elif self.first_day is None: 139 | # Create one single balance entry on the day following the last transaction 140 | last_row = rows[-1] 141 | date = last_row.date + datetime.timedelta(days=1) 142 | balance_entry = self.create_balance_entry( 143 | file.name, date, last_row.balance) 144 | new_entries.append(balance_entry) 145 | else: 146 | # Create monthly balance entries starting from the most recent one 147 | balance_date = periods.next(periods.greatest_start(rows[-1].date, 148 | first_day=self.first_day)) 149 | for row in reversed(rows): 150 | if row.date < balance_date: 151 | new_entries.append(self.create_balance_entry( 152 | file.name, balance_date, row.balance)) 153 | balance_date = periods.prev(balance_date) 154 | 155 | return new_entries 156 | 157 | def create_balance_entry(self, filename, date, balance): 158 | # Balance directives will be sorted in front of transactions, so there is no need 159 | # to have a line number to break ties. 160 | meta = data.new_metadata(filename, 0) 161 | balance_entry = data.Balance(meta, date, self.account, 162 | amount.Amount(balance, self.currency), 163 | None, None) 164 | return balance_entry 165 | 166 | def parse(self, file): 167 | """Parse the CSV file. 168 | 169 | Derived classes must implement this method to parse their CSV files. 170 | 171 | Consider using the helper function 'beansoup.importers.csv.parse' to implement 172 | your custom CSV parser. 173 | 174 | Args: 175 | file: A cache.FileMemo object. 176 | Returns: 177 | A list of Row objects; one object per row. 178 | The order of the parsed rows is irrelevant; they will be sorted in ascending 179 | chronological order in a way that agrees with the balance values associated to 180 | each row. It that is not possible, the balance values will be ignored and the 181 | importer will be unable to extract balance directive, but will otherwise work 182 | as expected. 183 | """ 184 | raise NotImplementedError('Derived classes must implement this method.') 185 | 186 | 187 | def parse(file, dialect, parse_row): 188 | """Parse a CSV file. 189 | 190 | This utility function makes it easy to parse a CSV file format for 191 | bank or credit card accounts. 192 | 193 | It takes advantage of the ability to cache the file contents, but it 194 | does not attempt to cache the parsed result. Be careful when you consider 195 | caching the result of your parser in a cache.FileMemo object; often your 196 | row parser will adjust the sign of the row balance according to the sign 197 | of the account associated with the importer using the parser; this means 198 | that CSV importers for accounts of opposite signs should not share the 199 | parsed results! 200 | 201 | Args: 202 | file: A cache.FileMemo object; the CSV file to be parsed. 203 | dialect: The name of a registered CSV dialect to use for parsing. 204 | parse_row: A function taking a row (a list of values) and its line number in 205 | the input file and returning a Row object. 206 | Returns: 207 | A list of Row objects in the same order as encountered in the CSV file. 208 | """ 209 | with io.StringIO(file.contents()) as stream: 210 | reader = csv.reader(stream, dialect) 211 | try: 212 | rows = [parse_row(row, reader.line_num) for row in reader if row] 213 | except (csv.Error, ValueError) as exc: 214 | logging.error('{}:{}: {}'.format(file.name, reader.line_num, exc)) 215 | rows = [] 216 | return rows 217 | 218 | 219 | def sort_rows(rows): 220 | """Sort the rows of a CSV file. 221 | 222 | This function can sort the rows of a CSV file in ascending chronological order 223 | such that the balance values of each row match the sequence of transactions. 224 | 225 | Args: 226 | rows: A list of objects with a lineno, date, amount, and balance attributes. 227 | Returns 228 | A pair with a sorted list of rows and an error. The error is None if the function 229 | could find an ordering agreeing with the balance values of its rows; otherwise, 230 | it is the line number in the CSV file corresponding to the first row not agreeing 231 | with its balance value. 232 | """ 233 | if len(rows) <= 1: 234 | return rows, None 235 | 236 | # If there is more than one row sharing the earliest date of the statement, we do not 237 | # know for sure which one came first, so we have a number of opening balances and we 238 | # have to find out which one is the right one. 239 | first_date = rows[0].date 240 | opening_balances = [row.balance - row.amount for row in itertools.takewhile( 241 | lambda r: r.date == first_date, rows)] 242 | 243 | error_lineno = 0 244 | for opening_balance in opening_balances: 245 | # Given one choice of opening balance, we try to find an ordering of the rows 246 | # that agrees with the balance amount they show 247 | stack = list(reversed(rows)) 248 | prev_balance = opening_balance 249 | unbalanced_rows = [] 250 | balanced_rows = [] 251 | while stack: 252 | row = stack.pop() 253 | # Check if the current row balances with the previous one 254 | if prev_balance + row.amount == row.balance: 255 | # The current row is in the correct chronological order 256 | balanced_rows.append(row) 257 | prev_balance = row.balance 258 | if unbalanced_rows: 259 | # Put unbalanced rows back on the stack so they get another chance 260 | stack.extend(unbalanced_rows) 261 | unbalanced_rows.clear() 262 | else: 263 | # The current row is out of chronological order 264 | if unbalanced_rows and unbalanced_rows[0].date != row.date: 265 | # No ordering can be found that agrees with the 266 | # balance values of the rows 267 | break 268 | # Skip the current row for the time being 269 | unbalanced_rows.append(row) 270 | if len(balanced_rows) == len(rows): 271 | return balanced_rows, None 272 | error_lineno = unbalanced_rows[0].lineno 273 | 274 | # The rows could not be ordered in any way that would agree with the balance values 275 | return rows, error_lineno 276 | -------------------------------------------------------------------------------- /beansoup/importers/filing.py: -------------------------------------------------------------------------------- 1 | """A file-only importer.""" 2 | 3 | import calendar 4 | import datetime 5 | import logging 6 | from os import path 7 | import re 8 | 9 | from beancount.ingest import importer 10 | 11 | from beansoup.utils import dates 12 | 13 | 14 | class Importer(importer.ImporterProtocol): 15 | """A document-filing class for monthly files; it does not import anything. 16 | 17 | This importer only supports bean-identify and bean-file. It does not 18 | extract any transactions; in fact, it does not even open the file. 19 | It uses a regular expression to match a filename to an account and to 20 | a date (interpreted as the last day of a billing period). 21 | """ 22 | def __init__(self, account, basename=None, 23 | first_day=1, filename_regexp=None): 24 | """Create a new filing importer for the given account. 25 | 26 | Args: 27 | account: An account string, the account to associate to the files. 28 | basename: An optional string, the name of the new files. 29 | filename_regexp: A regular expression string used to match the 30 | basename (no path) of the target file. This regexp should include 31 | capturing groups for `year`, `month`, and (optional) `day` of 32 | the end of the period covered by the file; if `day` is 33 | missing, the `first_day` argument will be used to compute the 34 | end date. 35 | Example: '^Statement_(?P\w{3}) (?P\d{4}).pdf$' 36 | first_day: An int in [1,28]; the first day of the billing period. 37 | """ 38 | self.filename_re = re.compile(filename_regexp or self.filename_regexp) 39 | self.account = account 40 | self.basename = basename 41 | self.first_day = first_day 42 | 43 | def name(self): 44 | """Include the filing account in the name.""" 45 | return '{}: "{}"'.format(super().name(), self.file_account(None)) 46 | 47 | def identify(self, file): 48 | """Identify whether the file can be processed by this importer.""" 49 | # Match the file name. 50 | return self.filename_re.match(path.basename(file.name)) 51 | 52 | def file_account(self, _): 53 | """Return the account associated with the file""" 54 | return self.account 55 | 56 | def file_name(self, file): 57 | """Return the optional renamed account file name.""" 58 | if self.basename: 59 | return self.basename + path.splitext(file.name)[1] 60 | 61 | def file_date(self, file): 62 | """Return the filing date for the file.""" 63 | matches = self.filename_re.match(path.basename(file.name)) 64 | if matches: 65 | groups = matches.groupdict() 66 | today = datetime.date.today() 67 | year = int(groups['year']) if 'year' in groups else today.year 68 | month = dates.month_number(groups.get('month')) or today.month 69 | if 'day' in groups: 70 | # The filename fully specifies the document date 71 | day = int(groups['day']) 72 | date = datetime.date(year, month, day) 73 | else: 74 | # Use the first day of the billing cycle to compute the 75 | # last day of the period for the given year and month 76 | if self.first_day > 1: 77 | date = (datetime.date(year, month, self.first_day) - 78 | datetime.timedelta(days=1)) 79 | else: 80 | _, month_last_day = calendar.monthrange(year, month) 81 | date = datetime.date(year, month, month_last_day) 82 | return date 83 | 84 | def extract(self, file): 85 | """Do not attempt to extract any transactions from the file.""" 86 | logging.warning( 87 | "Cannot extract entries from file '{}'. " 88 | "Please use a proper importer and data format".format(file.name)) 89 | return [] 90 | -------------------------------------------------------------------------------- /beansoup/importers/mixins.py: -------------------------------------------------------------------------------- 1 | """Mixins for importer classes.""" 2 | 3 | 4 | class FilterChain: 5 | """A mixin to pass imported entries through a pipeline of filters. 6 | 7 | This mixin modifies the extract method of a concrete instance of 8 | ImporterProtocol to run the extracted entries through a chain of 9 | arbitrary filters. 10 | """ 11 | def __init__(self, *args, **kwargs): 12 | """Set up the filter chain and pass the rest of the arguments to the 13 | base class. 14 | 15 | Args: 16 | filters: A list of callables taking a list of entries and returning 17 | a subset of them. 18 | """ 19 | self.filters = kwargs.pop('filters', []) 20 | super(FilterChain, self).__init__(*args, **kwargs) 21 | 22 | def extract(self, file): 23 | """Extract the entries using the main importer and then run all 24 | the filters on them. 25 | """ 26 | entries = super(FilterChain, self).extract(file) 27 | for filter in self.filters: 28 | entries = filter(entries) 29 | return entries 30 | -------------------------------------------------------------------------------- /beansoup/importers/td.py: -------------------------------------------------------------------------------- 1 | """Importers for TD Canada Trust.""" 2 | 3 | import csv as csvlib 4 | import datetime 5 | 6 | from beancount.core.number import D 7 | 8 | from beansoup.importers import csv 9 | 10 | 11 | csvlib.register_dialect('tdcanadatrust', delimiter=',', quoting=csvlib.QUOTE_MINIMAL) 12 | 13 | 14 | class Importer(csv.Importer): 15 | """An importer for TD Canada Trust CSV statements.""" 16 | def parse(self, file): 17 | """Parse a TD Canada Trust CSV file. 18 | 19 | Args: 20 | file: A beansoup.ingest.cache.FileMemo instance; the CSV file to be parsed. 21 | Returns: 22 | A list of Row objects. 23 | """ 24 | return csv.parse(file, 'tdcanadatrust', self.parse_row) 25 | 26 | def parse_row(self, row, lineno): 27 | """Parse a row of a TD Canada Trust CSV file. 28 | 29 | Args: 30 | row: A list of field values for the row. 31 | lineno: The line number where the row appears in the CSV file 32 | Returns: 33 | A beansoup.importers.csv.Row object. 34 | """ 35 | if len(row) != 5: 36 | raise csvlib.Error('Invalid row; expecting 5 values: {}'.format(row)) 37 | date = datetime.datetime.strptime(row[0], '%m/%d/%Y').date() 38 | description = row[1] 39 | amount = -D(row[2]) if row[2] else D(row[3]) 40 | balance = self.account_sign * D(row[4]) 41 | return csv.Row(lineno, date, description, amount, balance) 42 | -------------------------------------------------------------------------------- /beansoup/plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fxtlabs/beansoup/678a74fe558a4ed14fd6626a6d3b74ebaa68558a/beansoup/plugins/__init__.py -------------------------------------------------------------------------------- /beansoup/plugins/clear_transactions.py: -------------------------------------------------------------------------------- 1 | """Work in progress. It works, but needs documentation and some cleaning. 2 | """ 3 | 4 | import argparse 5 | import collections 6 | import datetime 7 | import itertools 8 | 9 | from beancount.core import data, getters, flags 10 | 11 | from beansoup.plugins import config 12 | from beansoup.utils import dates 13 | 14 | __plugins__ = ('clear_transactions',) 15 | 16 | 17 | class AccountPairType: 18 | def __init__(self, entries): 19 | self.existing_accounts = getters.get_accounts(entries) 20 | 21 | def __call__(self, string): 22 | accounts = string.split(',') 23 | if len(accounts) != 2: 24 | msg = "invalid account pair: '{}'; expecting clearing and main account names separated by a comma (no spaces)".format(string) 25 | raise argparse.ArgumentTypeError(msg) 26 | for account in accounts: 27 | if account not in self.existing_accounts: 28 | msg = "account '{}' does not exist".format(account) 29 | raise argparse.ArgumentTypeError(msg) 30 | return tuple(accounts) 31 | 32 | 33 | def clear_transactions(entries, options_map, config_string): 34 | # Parse plugin config; report errors if any 35 | parser = config.ArgumentParser( 36 | prog=__name__, 37 | description='A plugin that automatically tags cleared and pending transactions.', 38 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 39 | add_help=False, 40 | entries_filename=options_map['filename']) 41 | parser.add_argument( 42 | '--flag_pending', action='store_true', default=False, 43 | help='annotate pending transactions with a {} flag'.format(flags.FLAG_WARNING)) 44 | parser.add_argument( 45 | '--cleared_tag', metavar='TAG', default='CLEARED', 46 | help='tag cleared transactions with %(metavar)s') 47 | parser.add_argument( 48 | '--pending_tag', metavar='TAG', default='PENDING', 49 | help='tag pending transactions with %(metavar)s') 50 | parser.add_argument( 51 | '--ignored_tag', metavar='TAG', default='PRE_CLEARED', 52 | help='ignore transactions that have a %(metavar)s tag') 53 | parser.add_argument( 54 | '--link_prefix', metavar='PREFIX', default='cleared', 55 | help='link pairs of cleared transactions with %(metavar)s string followed by increasing count') 56 | parser.add_argument( 57 | '--max_days', metavar='N', type=int, default=7, 58 | help='only pair transactions if they occurred no more than %(metavar)s days apart') 59 | parser.add_argument( 60 | '--skip_weekends', action='store_true', default=False, 61 | help='skip weekends when measuring the time gap between transactions') 62 | parser.add_argument( 63 | 'account_pairs', metavar='CLEARING_ACCOUNT,MAIN_ACCOUNT', nargs='+', 64 | type=AccountPairType(entries), 65 | help='the names of a clearing account and its main account, separated by a comma (no space)') 66 | 67 | try: 68 | args = parser.parse_args((config_string or '').split()) 69 | except config.ParseError as error: 70 | return entries, [error] 71 | 72 | processor = Processor(args) 73 | 74 | modified_entries, errors = processor.clear_transactions(entries) 75 | 76 | # FIXME: Consider printing the pending entries. Maybe return errors for them. 77 | 78 | return [modified_entries.get(id(entry), entry) for entry in entries], errors 79 | 80 | 81 | class Processor: 82 | def __init__(self, args): 83 | self.flag_pending = args.flag_pending 84 | self.cleared_tag_name = args.cleared_tag 85 | self.pending_tag_name = args.pending_tag 86 | self.ignored_tag_name = args.ignored_tag 87 | self.cleared_link_prefix = args.link_prefix 88 | self.max_delta_days = args.max_days 89 | self.skip_weekends = args.skip_weekends 90 | self.clearing_accounts = dict(args.account_pairs) 91 | 92 | self.modified_entries = None 93 | self.link_count = itertools.count(start=1) 94 | 95 | def clear_transactions(self, entries): 96 | errors = [] 97 | self.modified_entries = {} 98 | groups = collections.defaultdict(list) 99 | for entry in entries: 100 | if (not isinstance(entry, data.Transaction) or 101 | (entry.tags and self.ignored_tag_name in entry.tags)): 102 | continue 103 | posting = self.get_txn_clearing_posting(entry) 104 | if posting: 105 | groups[posting.account].append(data.TxnPosting(entry, posting)) 106 | 107 | # NOTE: sorting is only needed to support testing 108 | for _, txn_postings in sorted(groups.items(), key=lambda x: x[0]): 109 | self.clear_transaction_group(txn_postings) 110 | 111 | return self.modified_entries, errors 112 | 113 | def get_txn_clearing_posting(self, txn): 114 | # This code implicitly assumes that a transaction can only have 115 | # one posting to a clearing account 116 | for posting in txn.postings: 117 | if posting.account in self.clearing_accounts: 118 | return posting 119 | 120 | def clear_transaction_group(self, txn_postings): 121 | # Make sure the transactions are sorted; 122 | # other plugins could have changed their order 123 | txn_postings = collections.deque( 124 | sorted(txn_postings, key=lambda x: data.entry_sortkey(x.txn))) 125 | 126 | while txn_postings: 127 | txn_posting = txn_postings.popleft() 128 | if id(txn_posting.txn) in self.modified_entries: 129 | # This transaction has already been cleared 130 | continue 131 | # Look for matching transactions within a maximum time delta 132 | max_date = self.max_matching_date(txn_posting.txn) 133 | for txn_posting2 in itertools.takewhile(lambda x: x.txn.date <= max_date, txn_postings): 134 | if id(txn_posting2.txn) in self.modified_entries: 135 | # This transaction has already been cleared 136 | continue 137 | if self.match_txn_postings(txn_posting, txn_posting2): 138 | # Found match; link the transactions and tag them as cleared 139 | link_name = '{}-{}'.format(self.cleared_link_prefix, 140 | next(self.link_count)) 141 | txn = txn_posting.txn 142 | self.modified_entries[id(txn)] = txn._replace( 143 | tags=(txn.tags or set()) | set((self.cleared_tag_name,)), 144 | links=(txn.links or set()) | set((link_name,))) 145 | txn2 = txn_posting2.txn 146 | self.modified_entries[id(txn2)] = txn2._replace( 147 | tags=(txn2.tags or set()) | set((self.cleared_tag_name,)), 148 | links=(txn2.links or set()) | set((link_name,))) 149 | break 150 | else: 151 | # No match; mark the transaction as pending 152 | txn = txn_posting.txn 153 | self.modified_entries[id(txn)] = txn._replace( 154 | flag=flags.FLAG_WARNING if self.flag_pending else txn.flag, 155 | tags=(txn.tags or set()) | set((self.pending_tag_name,))) 156 | 157 | def max_matching_date(self, txn): 158 | if self.skip_weekends: 159 | return dates.add_biz_days(txn.date, self.max_delta_days) 160 | return txn.date + datetime.timedelta(days=self.max_delta_days) 161 | 162 | def match_txn_postings(self, txn_posting, txn_posting2): 163 | # We already know the two transactions are within the max time gap 164 | # and share a clearing account 165 | 166 | # We can have a match only if the postings to the clearing account 167 | # on the two transactions balance out to 0 168 | if txn_posting.posting.units != -txn_posting2.posting.units: 169 | return False 170 | 171 | # We can have a match only if one and only one of the two transactions 172 | # has a posting to the main account related to their common clearing 173 | # account 174 | main_account = self.clearing_accounts[txn_posting.posting.account] 175 | num_main_account_postings = len( 176 | [posting for posting in (txn_posting.txn.postings + txn_posting2.txn.postings) if posting.account == main_account]) 177 | return num_main_account_postings == 1 178 | -------------------------------------------------------------------------------- /beansoup/plugins/config.py: -------------------------------------------------------------------------------- 1 | """Utilities to help parse a plugin configuration string. 2 | """ 3 | 4 | import argparse 5 | import re 6 | 7 | from beancount.core import data 8 | 9 | 10 | class ParseError(Exception): 11 | def __init__(self, source, message): 12 | self.source = source 13 | self.message = message 14 | self.entry = None 15 | 16 | 17 | class ArgumentParser(argparse.ArgumentParser): 18 | def __init__(self, *args, **kwargs): 19 | entries_filename = kwargs.pop('entries_filename', '') 20 | self.source = data.new_metadata(entries_filename, 0) 21 | super(ArgumentParser, self).__init__(*args, **kwargs) 22 | 23 | def error(self, message): 24 | full_message = '{}\n\n{}'.format(message, self.format_help()) 25 | raise ParseError(self.source, full_message) 26 | 27 | def exit(self, status=0, message=None): 28 | self.error(message) 29 | 30 | 31 | def re_type(string): 32 | """Argument type for regular expressions. 33 | 34 | It returns a compiled regular expression if string is not empty; 35 | None, otherwise. It raises argparse.ArgumentTypeError if the 36 | string is not a valid regular expression. 37 | """ 38 | if string: 39 | try: 40 | string_re = re.compile(string) 41 | except re.error: 42 | msg = "invalid regular expression: '{}'".format(string) 43 | raise argparse.ArgumentTypeError(msg) 44 | return string_re 45 | -------------------------------------------------------------------------------- /beansoup/plugins/deposit_in_transit.py: -------------------------------------------------------------------------------- 1 | """Work in progress. It works, but needs documentation and some cleaning. 2 | 3 | A plugin that automatically ties split deposit-in-transit transactions. 4 | 5 | usage: beansoup.plugins.deposit_in_transit [--dit_component NAME] 6 | [--auto_open] [--same_day_merge] 7 | [--flag_pending] 8 | [--cleared_tag TAG] 9 | [--pending_tag TAG] 10 | [--ignored_tag TAG] 11 | [--link_prefix PREFIX] 12 | [--skip_re REGEX] 13 | 14 | optional arguments: 15 | --dit_component NAME use NAME as the component name distinguishing deposit- 16 | in-transit accounts (default: DIT) 17 | --auto_open automatically open deposit-in-transit accounts 18 | (default: False) 19 | --same_day_merge merge same-day transactions with matching deposit-in- 20 | transit postings (default: False) 21 | --flag_pending annotate pending transactions with a ! flag (default: 22 | False) 23 | --cleared_tag TAG tag cleared transactions with TAG (default: DEPOSITED) 24 | --pending_tag TAG tag pending transactions with TAG (default: IN- 25 | TRANSIT) 26 | --ignored_tag TAG ignore transactions that have a TAG tag (default: 27 | IGNORED) 28 | --link_prefix PREFIX link pairs of cleared transactions with PREFIX string 29 | followed by increasing count; otherwise it uses UUIDs 30 | (default: None) 31 | --skip_re REGEX disable plugin if REGEX matches any sys.argv (default: 32 | None) 33 | """ 34 | 35 | import argparse 36 | import collections 37 | import itertools 38 | import sys 39 | 40 | from beancount.core import data, flags, getters 41 | from beancount.core.account import has_component 42 | 43 | from beansoup.plugins import config 44 | from beansoup.utils import links 45 | 46 | __plugins__ = ('plugin',) 47 | 48 | 49 | DITError = collections.namedtuple('DITError', 'source message entry') 50 | 51 | 52 | def plugin(entries, options_map, config_string): 53 | # Parse plugin config; report errors if any 54 | parser = config.ArgumentParser( 55 | prog=__name__, 56 | description='A plugin that automatically ties split deposit-in-transit transactions.', 57 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 58 | add_help=False, 59 | entries_filename=options_map['filename']) 60 | parser.add_argument( 61 | '--dit_component', metavar='NAME', default='DIT', 62 | help='use %(metavar)s as the component name distinguishing deposit-in-transit accounts') 63 | parser.add_argument( 64 | '--auto_open', action='store_true', default=False, 65 | help='automatically open deposit-in-transit accounts') 66 | parser.add_argument( 67 | '--same_day_merge', action='store_true', default=False, 68 | help='merge same-day transactions with matching deposit-in-transit postings') 69 | parser.add_argument( 70 | '--flag_pending', action='store_true', default=False, 71 | help='annotate pending transactions with a {} flag'.format(flags.FLAG_WARNING)) 72 | parser.add_argument( 73 | '--cleared_tag', metavar='TAG', default='DEPOSITED', 74 | help='tag cleared transactions with %(metavar)s') 75 | parser.add_argument( 76 | '--pending_tag', metavar='TAG', default='IN-TRANSIT', 77 | help='tag pending transactions with %(metavar)s') 78 | parser.add_argument( 79 | '--ignored_tag', metavar='TAG', default='IGNORED', 80 | help='ignore transactions that have a %(metavar)s tag') 81 | parser.add_argument( 82 | '--link_prefix', metavar='PREFIX', default=None, 83 | help='link pairs of cleared transactions with %(metavar)s string followed by increasing count; otherwise it uses UUIDs') 84 | parser.add_argument( 85 | '--skip_re', metavar='REGEX', default=None, type=config.re_type, 86 | help='disable plugin if %(metavar)s matches any sys.argv') 87 | 88 | try: 89 | args = parser.parse_args((config_string or '').split()) 90 | except config.ParseError as error: 91 | return entries, [error] 92 | 93 | # If the plugin was called with the --skip_re option and the given 94 | # regular expression matches any of the arguments in sys.argv, 95 | # do not run the plugin and return the original entries instead. 96 | if args.skip_re and any(args.skip_re.match(arg) for arg in sys.argv): 97 | return entries, [] 98 | 99 | unchanged_entries, new_entries, errors = process_entries(entries, args) 100 | 101 | # FIXME: Consider printing the pending entries. Maybe return errors for them. 102 | 103 | return sorted(unchanged_entries + new_entries, key=data.entry_sortkey), errors 104 | 105 | 106 | def process_entries(entries, args): 107 | new_entries = [] 108 | 109 | if args.auto_open: 110 | new_entries.extend(open_dit_accounts(entries, args.dit_component)) 111 | 112 | # Find all DIT transactions to be processed; their original entries 113 | # will be replaced by new ones 114 | dits, unchanged_entries, errors = split_entries( 115 | entries, 116 | dit_component=args.dit_component, 117 | ignored_tag=args.ignored_tag) 118 | 119 | pairs, singletons, pairing_errors = pair_dits( 120 | dits, dit_component=args.dit_component) 121 | errors.extend(pairing_errors) 122 | 123 | cleared_links = links.count(args.link_prefix) 124 | for pair in pairs: 125 | new_entries.extend(process_pair( 126 | pair, 127 | cleared_tag=args.cleared_tag, 128 | cleared_links=cleared_links, 129 | same_day_merge=args.same_day_merge)) 130 | 131 | new_entries.extend( 132 | [process_singleton(singleton, 133 | flag_pending=args.flag_pending, 134 | pending_tag=args.pending_tag) for singleton in singletons]) 135 | 136 | return unchanged_entries, new_entries, errors 137 | 138 | 139 | def open_dit_accounts(entries, dit_component): 140 | """ 141 | Minimally adapted from beancount.plugins.auto_accounts. 142 | """ 143 | opened_accounts = {entry.account 144 | for entry in entries 145 | if isinstance(entry, data.Open)} 146 | 147 | new_entries = [] 148 | accounts_first, _ = getters.get_accounts_use_map(entries) 149 | for index, (account, date_first_used) in enumerate(sorted(accounts_first.items())): 150 | if ((account not in opened_accounts) and 151 | has_component(account, dit_component)): 152 | meta = data.new_metadata(__name__, index) 153 | new_entry = data.Open(meta, date_first_used, account, None, None) 154 | new_entries.append(new_entry) 155 | 156 | return new_entries 157 | 158 | 159 | def split_entries(entries, dit_component, ignored_tag): 160 | dits, unchanged_entries, errors = [], [], [] 161 | for entry in entries: 162 | if (isinstance(entry, data.Transaction) and 163 | not (entry.tags and ignored_tag in entry.tags)): 164 | dit_postings = [posting for posting in entry.postings if has_component(posting.account, dit_component)] 165 | num_dit_postings = len(dit_postings) 166 | else: 167 | num_dit_postings = 0 168 | if num_dit_postings == 0: 169 | unchanged_entries.append(entry) 170 | else: 171 | dits.append(data.TxnPosting(entry, dit_postings[0])) 172 | if num_dit_postings > 1: 173 | errors.append(DITError( 174 | entry.meta, 175 | "(deposit_in_transit) Found entry with multiple postings to DIT accounts; " 176 | "only processing posting to {} account".format( 177 | dit_postings[0].account), 178 | entry)) 179 | return dits, unchanged_entries, errors 180 | 181 | 182 | def pair_dits(dits, dit_component): 183 | # A map from amounts to all DIT postings (as a TxnPosting) sharing 184 | # that amount 185 | units_map = collections.defaultdict(list) 186 | for dit in dits: 187 | units_map[dit.posting.units].append(dit) 188 | 189 | pairs, singletons, errors = [], [], [] 190 | skip_ids = set() 191 | for dit in dits: 192 | if id(dit.txn) in skip_ids: 193 | continue 194 | units_map[dit.posting.units].remove(dit) 195 | dit2 = match_dit(dit, units_map.get(-dit.posting.units), dit_component) 196 | if dit2: 197 | # Found matching DIT transaction 198 | pairs.append((dit, dit2)) 199 | skip_ids |= {id(dit2.txn)} 200 | units_map[dit2.posting.units].remove(dit2) 201 | else: 202 | singletons.append(dit) 203 | 204 | return pairs, singletons, errors 205 | 206 | 207 | def match_dit(dit, candidate_dits, dit_component): 208 | # FIXME: check DIT- and base-accounts match 209 | return candidate_dits[0] if candidate_dits else None 210 | 211 | 212 | def process_pair(pair, cleared_tag, cleared_links, same_day_merge): 213 | def tag_and_link(entry, cleared_link): 214 | tags = (entry.tags or set()) | {cleared_tag} 215 | links = (entry.links or set()) | {cleared_link} 216 | return entry._replace(tags=tags, links=links) 217 | 218 | def xform_posting(posting): 219 | return data.Posting(posting.account, 220 | -posting.units, 221 | None, None, None, None) 222 | 223 | # The first in the pair should be the sender; the second, the receiver. 224 | if pair[0].posting.units < pair[1].posting.units: 225 | pair = (pair[1], pair[0]) 226 | 227 | date = max(pair[0].txn.date, pair[1].txn.date) 228 | if pair[0].txn.narration == pair[1].txn.narration: 229 | narration = pair[0].txn.narration 230 | else: 231 | narration = '{} / {}'.format(pair[0].txn.narration, pair[1].txn.narration) 232 | if pair[0].txn.payee is None: 233 | payee = pair[1].txn.payee 234 | elif pair[1].txn.payee is None: 235 | payee = pair[0].txn.payee 236 | elif pair[0].txn.payee == pair[1].txn.payee: 237 | payee = pair[0].txn.payee 238 | else: 239 | payee = '{} / {}'.format(pair[0].txn.payee, pair[1].txn.payee) 240 | 241 | if same_day_merge and is_pair_mergeable(pair): 242 | # Merge the two transactions 243 | meta = pair[0].txn.meta 244 | flag = pair[0].txn.flag 245 | tags = ((pair[0].txn.tags or set()) | 246 | (pair[1].txn.tags or set()) | 247 | {cleared_tag}) 248 | links = ((pair[0].txn.links or set()) | 249 | (pair[1].txn.links or set())) or data.EMPTY_SET 250 | postings = ([posting for posting in pair[0].txn.postings if posting is not pair[0].posting] + 251 | [posting for posting in pair[1].txn.postings if posting is not pair[1].posting]) 252 | new_entry = data.Transaction( 253 | meta, 254 | date, 255 | flag, 256 | payee, 257 | narration, 258 | tags, 259 | links, 260 | postings) 261 | return (new_entry, ) 262 | 263 | # Make sure the connecting entry will be shown between the two existing 264 | # ones when looking at the list of entries for their common link 265 | lineno = int((pair[0].txn.meta.get('lineno', 0) + 266 | pair[1].txn.meta.get('lineno', 0)) / 2) 267 | meta = data.new_metadata(__name__, lineno) 268 | cleared_link = next(cleared_links) 269 | new_entry = data.Transaction( 270 | meta, 271 | date, 272 | flags.FLAG_OKAY, 273 | payee, 274 | narration, 275 | {cleared_tag}, 276 | {cleared_link}, 277 | [xform_posting(pair[0].posting), xform_posting(pair[1].posting)]) 278 | 279 | return (tag_and_link(pair[0].txn, cleared_link), 280 | tag_and_link(pair[1].txn, cleared_link), 281 | new_entry) 282 | 283 | 284 | def is_pair_mergeable(pair): 285 | if pair[0].txn.flag != pair[1].txn.flag: 286 | return False 287 | 288 | if pair[0].txn.date != pair[1].txn.date: 289 | return False 290 | 291 | if (pair[0].posting.cost or pair[0].posting.price or 292 | pair[1].posting.cost or pair[1].posting.price): 293 | return False 294 | 295 | return True 296 | 297 | 298 | def process_singleton(singleton, flag_pending, pending_tag): 299 | entry = singleton.txn 300 | flag = flags.FLAG_WARNING if flag_pending else entry.flag 301 | tags = (entry.tags or set()) | {pending_tag} 302 | return entry._replace(flag=flag, tags=tags) 303 | -------------------------------------------------------------------------------- /beansoup/transactions.py: -------------------------------------------------------------------------------- 1 | """Utilities to work with beancount.core.data.Transaction objects.""" 2 | 3 | import datetime 4 | import itertools 5 | from os import path 6 | 7 | from beancount.core import data, flags, number 8 | 9 | 10 | class TransactionCompleter: 11 | """A class capable of completing partial transactions. 12 | 13 | Importers typically generate incomplete transactions with a single 14 | posting to the main account related to the imported data. This class 15 | attempts to complete those transaction by adding a second posting to 16 | an account chosen based on the existing transaction history for the 17 | main account. 18 | 19 | It looks for existing transactions that have exactly two postings 20 | and where one of the two postings is to the main account. It scores 21 | each of these transactions based on the similarity of its payee and 22 | narration fields to the narration field of the incomplete 23 | transaction and selects the one with the highest score as a model to 24 | fill in the missing posting of the incomplete transaction. Equal 25 | scores are broken by selecting the most recent transaction. 26 | """ 27 | 28 | def __init__(self, existing_entries, account, min_score=0.5, max_age=None, 29 | interpolated=False): 30 | """Initialization. 31 | 32 | Args: 33 | existing_entries: The existing entries, ordered by increasing date. 34 | account: The main account of the incomplete transactions 35 | (i.e. the account of their only posting). 36 | min_score: The minimum score an existing transaction must 37 | have to be used as a model for an incomplete transaction. 38 | max_age: A datetime.timedelta object giving the maximum age 39 | (measure from datetime.date.today()) a transaction can 40 | have in order to be used as a model to fill in an incomplete 41 | transaction. 42 | interpolated: If True, the missing posting will include an 43 | interpolated amount; otherwise, the amount will be left blank. 44 | """ 45 | def is_model(entry): 46 | """A predicate asking whether an entry can be used as a model. 47 | 48 | An entry can be considered a model for incomplete transactions 49 | if it is a transaction with exactly two postings and it 50 | involves the main account. 51 | """ 52 | return (isinstance(entry, data.Transaction) and 53 | len(entry.postings) == 2 and 54 | any(posting.account == account for posting in entry.postings)) 55 | 56 | if max_age: 57 | min_date = datetime.date.today() - max_age 58 | entries = itertools.takewhile(lambda entry: entry.date >= min_date, 59 | reversed(existing_entries or [])) 60 | else: 61 | entries = existing_entries or [] 62 | self.model_txns = [entry for entry in entries if is_model(entry)] 63 | self.account = account 64 | self.min_score = min_score 65 | self.interpolated = interpolated 66 | 67 | def __call__(self, entries): 68 | """Same as `complete_entries` method.""" 69 | return self.complete_entries(entries) 70 | 71 | def complete_entries(self, entries): 72 | """Complete the given entries. 73 | 74 | Only transactions with a single posting to the account bound to the 75 | completer may be modified. 76 | 77 | Args: 78 | entries: The entries to be completed. 79 | Returns: 80 | A list of completed entries 81 | """ 82 | for entry in entries: 83 | self.complete_entry(entry) 84 | return entries 85 | 86 | def complete_entry(self, entry): 87 | """Complete the given entry. 88 | 89 | This method attempts to complete the entry only if it is a transaction 90 | with a single posting to the account bound to the completer. 91 | The entry will be completed only if a suitable model transaction can 92 | be found. 93 | If multiple model transactions are found that balance the transaction 94 | against different account, the missing posting will be flagged for 95 | review. 96 | 97 | Args: 98 | entry: The entry to be completed. 99 | Returns: True is the entry was completed; False, otherwise. 100 | """ 101 | if (isinstance(entry, data.Transaction) and 102 | len(entry.postings) == 1 and 103 | entry.postings[0].account == self.account): 104 | model_txn, model_accounts = self.find_best_model(entry) 105 | if model_txn: 106 | # If past transactions similar to this one were posted against 107 | # different accounts, flag the posting in the new entry. 108 | flag = flags.FLAG_WARNING if len(model_accounts) > 1 else None 109 | # Add the missing posting to balance the transaction 110 | for posting in model_txn.postings: 111 | if posting.account != self.account: 112 | units = -entry.postings[0].units if self.interpolated else None 113 | missing_posting = data.Posting( 114 | posting.account, units, None, None, flag, None) 115 | entry.postings.append(missing_posting) 116 | return True 117 | return False 118 | 119 | def find_best_model(self, txn): 120 | """Return the best model for the given incomplete transaction. 121 | 122 | Args: 123 | txn: A beancount.core.data.Transaction object; 124 | an incomplete transaction with a single posting. 125 | Returns: 126 | A pair of a beancount.core.data.Transaction object and a set of 127 | account strings; the first part is the model transaction or 128 | None, if no suitable model could be found; the second part is a 129 | set of the different accounts used by top-scoring transaction to 130 | balance the posting to the target account. 131 | """ 132 | scored_model_txns = [(self.score_model(model_txn, txn), model_txn) 133 | for model_txn in self.model_txns] 134 | # Discard low-score transactions 135 | scored_model_txns = [(score, model_txn) for score, model_txn in scored_model_txns if score >= self.min_score] 136 | if scored_model_txns: 137 | # Sort the scored transaction by descending score and date. 138 | # The first transaction in the sorted list is the best model. 139 | scored_model_txns = sorted(scored_model_txns, 140 | key=lambda p: (p[0], p[1].date), 141 | reverse=True) 142 | # Look at the other top-scoring transaction and count how many 143 | # different accounts they post to; if they post to more than one 144 | # account (other than the target account), the model is ambiguous. 145 | best_score, best_model_txn = scored_model_txns[0] 146 | top_model_txns = itertools.takewhile(lambda p: p[0] == best_score, 147 | scored_model_txns) 148 | accounts = set([posting.account for _, txn in top_model_txns for posting in txn.postings if posting.account != self.account]) 149 | return (best_model_txn, accounts) 150 | return (None, set()) 151 | 152 | def score_model(self, model_txn, txn): 153 | """Score an existing transaction for its ability to provide a model 154 | for an incomplete transaction. 155 | 156 | Args: 157 | model_txn: The transaction to be scored. 158 | txn: The incomplete transaction. 159 | Returns: 160 | A float number representing the score, normalized in [0,1]. 161 | """ 162 | def get_description(txn): 163 | return ('{} {}'.format(txn.payee or '', txn.narration or '')).strip() 164 | 165 | # If the target transaction does not have a description, there is 166 | # nothing we can do 167 | txn_description = get_description(txn) 168 | n_max = len(txn_description) 169 | if n_max > 1: 170 | # Only consider model transactions whose posting to the target 171 | # account has the same sign as the transaction to be completed 172 | posting = [p for p in model_txn.postings if p.account == self.account][0] 173 | if number.same_sign(posting.units.number, txn.postings[0].units.number): 174 | model_txn_description = get_description(model_txn) 175 | n_match = len(path.commonprefix( 176 | [model_txn_description, txn_description])) 177 | score = float(n_match) / float(n_max) 178 | return score 179 | return 0 180 | -------------------------------------------------------------------------------- /beansoup/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fxtlabs/beansoup/678a74fe558a4ed14fd6626a6d3b74ebaa68558a/beansoup/utils/__init__.py -------------------------------------------------------------------------------- /beansoup/utils/dates.py: -------------------------------------------------------------------------------- 1 | """Utilities for working with dates. 2 | 3 | Attributes: 4 | MONTHS (Dict[str, int]): a map from month names to 5 | their ordinal values, starting at 1. The names are lowercase and 6 | can be full names, three-letter abbreviations, or one- or two-digit 7 | representations. 8 | """ 9 | 10 | import calendar 11 | import datetime 12 | import itertools 13 | 14 | 15 | MONTHS = dict((name.lower(), i) for i, name in itertools.chain( 16 | enumerate(calendar.month_abbr[1:], start=1), 17 | enumerate(calendar.month_name[1:], start=1), 18 | enumerate(['{0:1d}'.format(i) for i in range(1, 13)], start=1), 19 | enumerate(['{0:02d}'.format(i) for i in range(1, 13)], start=1))) 20 | 21 | 22 | def month_number(month): 23 | """Turns a month name into its corresponding month number. 24 | 25 | It recognizes full and abbreviated (three letters) English month names 26 | (case insensitive) as well as month number with or without a leading 0. 27 | 28 | Args: 29 | month (str): The name of a month or its three-letter abbreviation or 30 | its numerical equivalent. 31 | 32 | Returns: 33 | Optional[int]: The number in [1,12] corresponding to the given month name, 34 | or None if it does not recognize the given name. 35 | """ 36 | return MONTHS.get(month.lower()) if isinstance(month, str) else None 37 | 38 | 39 | def add_biz_days(date, num_biz_days): 40 | """Add a number of business days to a date. 41 | 42 | If the starting date falls on a weekend, it is moved to the next business 43 | day before adding the delta. 44 | 45 | Args: 46 | date (datetime.date): The starting date. 47 | num_biz_days (int): The number of business days to add to the starting date; 48 | it must be non-negative. 49 | 50 | Returns: 51 | datetime.date: the offset date. 52 | """ 53 | assert num_biz_days >= 0, 'Invalid num_biz_days value ({}): must be non-negative'.format(num_biz_days) 54 | 55 | # Break the delta into a number of full weeks and a remainder 56 | num_weeks, num_biz_days_left = divmod(num_biz_days, 5) 57 | num_days = num_weeks * 7 + num_biz_days_left 58 | weekday = date.weekday() # Monday is weekday 0 59 | # If the starting date falls on a weekend, move it to the next business day 60 | if weekday >= 5: 61 | num_days += 7 - weekday 62 | weekday = 0 63 | # If the number of business days left in the delta spans a weekend, add 64 | # that in a well 65 | if weekday + num_biz_days_left >= 5: 66 | num_days += 2 67 | return date + datetime.timedelta(days=num_days) 68 | -------------------------------------------------------------------------------- /beansoup/utils/links.py: -------------------------------------------------------------------------------- 1 | """Utilities for working with links.""" 2 | 3 | import uuid 4 | 5 | 6 | def count(link_prefix=None, start=1): 7 | """A generator of unique link names. 8 | 9 | Args: 10 | link_prefix (Optional[str]): If a string, link names will be of 11 | the form link_prefix-#; otherwise, they will be UUIDs. 12 | start (int): The start of the number sequence when used with 13 | a link prefix. 14 | 15 | Yields: 16 | str: the next link name in the sequence. 17 | """ 18 | if link_prefix: 19 | num = start 20 | while True: 21 | yield '{}-{}'.format(link_prefix, num) 22 | num += 1 23 | else: 24 | while True: 25 | yield str(uuid.uuid4()) 26 | -------------------------------------------------------------------------------- /beansoup/utils/periods.py: -------------------------------------------------------------------------------- 1 | """Utilities to work with monthly billing periods.""" 2 | 3 | import calendar 4 | import datetime 5 | 6 | 7 | def enclose_date(date, first_day=1): 8 | """Compute the monthly period containing the given date. 9 | 10 | Args: 11 | date (datetime.date): The date to be contained. 12 | first_day (int): The first day of the monthly cycle. It must fall 13 | in the interval [1,28]. 14 | 15 | Returns: 16 | Tuple[datetime.date, datetime.date]: The start and end dates (inclusives) of the 17 | monthly period containing the given date. 18 | """ 19 | start = greatest_start(date, first_day=first_day) 20 | _, length = calendar.monthrange(start.year, start.month) 21 | return start, start + datetime.timedelta(days=length-1) 22 | 23 | 24 | def greatest_start(date, first_day=1): 25 | """Compute the starting date of the monthly period containing the given date. 26 | 27 | More formally, given a monthly cycle starting on `first_day` day of the month, 28 | it computes the greatest starting date that is less than or equal to the given 29 | `date`. 30 | 31 | Args: 32 | date (datetime.date): An arbitrary date. 33 | first_day (int): The first day of the monthly cycle. It must fall 34 | in the interval [1,28]. 35 | 36 | Returns: 37 | datetime.date: The starting date of the monthly period containing the given date. 38 | """ 39 | assert 0 < first_day < 29, "Invalid 'first_day' value {}: first day of monthly cycle must be in [1,28]".format(first_day) 40 | 41 | if date.day >= first_day: 42 | year, month = date.year, date.month 43 | elif date.month > 1: 44 | year, month = date.year, date.month - 1 45 | else: 46 | year, month = date.year - 1, 12 47 | return datetime.date(year, month, first_day) 48 | 49 | 50 | def lowest_end(date, first_day=1): 51 | """Compute the ending date of the monthly period containing the given date. 52 | 53 | More formally, given a monthly cycle starting on `first_day` day of the month, 54 | it computes the lowest ending date that is greater than or equal to the given 55 | `date`. Note that the ending date is inclusive, i.e. it is included in the 56 | monthly period. 57 | 58 | Args: 59 | date (datetime.date): An arbitrary date. 60 | first_day (int): The first day of the monthly cycle. It must fall 61 | in the interval [1,28]. 62 | 63 | Returns: 64 | datetime.date: The ending date of the monthly period containing the given date. 65 | """ 66 | start = greatest_start(date, first_day=first_day) 67 | _, length = calendar.monthrange(start.year, start.month) 68 | return start + datetime.timedelta(days=length-1) 69 | 70 | 71 | def next(date): 72 | """Add one month to the given date. 73 | 74 | Args: 75 | date (datetime.date): The starting date. 76 | 77 | Returns: 78 | datetime.date: One month after the starting date unless the starting 79 | date falls on a day that is not in the next month; in that case, it 80 | returns the last day of the next month. 81 | 82 | Example: 83 | 84 | >>> import datetime 85 | >>> next(datetime.date(2016, 1, 31)) 86 | datetime.date(2016, 2, 29) 87 | 88 | """ 89 | year, month = (date.year, date.month + 1) if date.month < 12 else (date.year + 1, 1) 90 | _, length = calendar.monthrange(year, month) 91 | day = min(length, date.day) 92 | return datetime.date(year, month, day) 93 | 94 | 95 | def prev(date): 96 | """Subtract one month from the given date. 97 | 98 | Args: 99 | date (datetime.date): The starting date. 100 | 101 | Returns: 102 | datetime.date: One month before the starting date unless the starting 103 | date falls on a day that is not in the previous month; in that case, it 104 | returns the last day of the previous month. 105 | 106 | Example: 107 | 108 | >>> import datetime 109 | >>> prev(datetime.date(2016, 3, 31)) 110 | datetime.date(2016, 2, 29) 111 | 112 | """ 113 | year, month = (date.year, date.month - 1) if date.month > 1 else (date.year - 1, 12) 114 | _, length = calendar.monthrange(year, month) 115 | day = min(length, date.day) 116 | return datetime.date(year, month, day) 117 | 118 | 119 | def count(date, reverse=False): 120 | """A generator of monthly-spaced dates. 121 | 122 | It enumerates monthly-spaced dates, starting at the given `date`. 123 | If the starting date falls on a day that is not in a given month, the date for 124 | that month will be the last day of that month. 125 | 126 | Args: 127 | date (datetime.date): The starting date. 128 | reverse (bool): If True, it generates dates in reverse chronological order. 129 | 130 | Yields: 131 | datetime.date: the next date in the sequence. 132 | 133 | Example: 134 | >>> import datetime 135 | >>> import itertools 136 | >>> start = datetime.date(2016, 1, 31) 137 | >>> [date.isoformat() for date in itertools.islice(count(start), 5)] 138 | ['2016-01-31', '2016-02-29', '2016-03-31', '2016-04-30', '2016-05-31'] 139 | 140 | """ 141 | preferred_day = date.day 142 | if reverse: 143 | while True: 144 | yield date 145 | year, month = (date.year, date.month - 1) if date.month > 1 else \ 146 | (date.year - 1, 12) 147 | _, length = calendar.monthrange(year, month) 148 | day = min(length, preferred_day) 149 | date = datetime.date(year, month, day) 150 | else: 151 | while True: 152 | yield date 153 | year, month = (date.year, date.month + 1) if date.month < 12 else \ 154 | (date.year + 1, 1) 155 | _, length = calendar.monthrange(year, month) 156 | day = min(length, preferred_day) 157 | date = datetime.date(year, month, day) 158 | -------------------------------------------------------------------------------- /beansoup/utils/testing.py: -------------------------------------------------------------------------------- 1 | """Utilities to facilitate testing.""" 2 | 3 | import functools 4 | import tempfile 5 | import textwrap 6 | 7 | from beancount.ingest import importer 8 | 9 | 10 | def docfile(*args, **kwargs): 11 | """A decorator that creates a temporary file from the function's docstring. 12 | 13 | This is actually a decorator builder that returns a decorator that 14 | writes the function's docstring to a temporary file and calls the 15 | decorated function with the temporary filename. This is 16 | useful for writing tests. 17 | 18 | Args: 19 | Any argument accepted by `tempfile.NamedTemporaryFile`. 20 | Returns: 21 | A decorator. 22 | """ 23 | def _docfile(function): 24 | @functools.wraps(function) 25 | def new_function(self): 26 | with tempfile.NamedTemporaryFile(*args, **kwargs) as f: 27 | f.write(textwrap.dedent(function.__doc__)) 28 | f.flush() 29 | return function(self, f.name) 30 | new_function.__doc__ = None 31 | return new_function 32 | return _docfile 33 | 34 | 35 | class ConstImporter(importer.ImporterProtocol): 36 | """ 37 | A helper importer whose extract method simply returns the entries 38 | passed to its constructor. 39 | """ 40 | def __init__(self, entries, account): 41 | self.entries = entries 42 | self.account = account 43 | 44 | def identify(self, _): 45 | return True 46 | 47 | def file_account(self, _): 48 | return self.account 49 | 50 | def file_date(self, _): 51 | if self.entries: 52 | return max([entry.date for entry in self.entries]) 53 | 54 | def extract(self, _): 55 | return self.entries 56 | -------------------------------------------------------------------------------- /beansoup/version.py: -------------------------------------------------------------------------------- 1 | """Project version.""" 2 | 3 | # Version Identification complient with PEP 440 4 | # https://www.python.org/dev/peps/pep-0440/ 5 | 6 | __version__ = '1.0a6' 7 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help 23 | help: 24 | @echo "Please use \`make ' where is one of" 25 | @echo " html to make standalone HTML files" 26 | @echo " dirhtml to make HTML files named index.html in directories" 27 | @echo " singlehtml to make a single large HTML file" 28 | @echo " pickle to make pickle files" 29 | @echo " json to make JSON files" 30 | @echo " htmlhelp to make HTML files and a HTML help project" 31 | @echo " qthelp to make HTML files and a qthelp project" 32 | @echo " applehelp to make an Apple Help Book" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " epub3 to make an epub3" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | @echo " dummy to check syntax errors of document sources" 51 | 52 | .PHONY: clean 53 | clean: 54 | rm -rf $(BUILDDIR)/* 55 | 56 | .PHONY: html 57 | html: 58 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 61 | 62 | .PHONY: dirhtml 63 | dirhtml: 64 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 65 | @echo 66 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 67 | 68 | .PHONY: singlehtml 69 | singlehtml: 70 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 71 | @echo 72 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 73 | 74 | .PHONY: pickle 75 | pickle: 76 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 77 | @echo 78 | @echo "Build finished; now you can process the pickle files." 79 | 80 | .PHONY: json 81 | json: 82 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 83 | @echo 84 | @echo "Build finished; now you can process the JSON files." 85 | 86 | .PHONY: htmlhelp 87 | htmlhelp: 88 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 89 | @echo 90 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 91 | ".hhp project file in $(BUILDDIR)/htmlhelp." 92 | 93 | .PHONY: qthelp 94 | qthelp: 95 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 96 | @echo 97 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 98 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 99 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/beansoup.qhcp" 100 | @echo "To view the help file:" 101 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/beansoup.qhc" 102 | 103 | .PHONY: applehelp 104 | applehelp: 105 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 106 | @echo 107 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 108 | @echo "N.B. You won't be able to view it unless you put it in" \ 109 | "~/Library/Documentation/Help or install it in your application" \ 110 | "bundle." 111 | 112 | .PHONY: devhelp 113 | devhelp: 114 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 115 | @echo 116 | @echo "Build finished." 117 | @echo "To view the help file:" 118 | @echo "# mkdir -p $$HOME/.local/share/devhelp/beansoup" 119 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/beansoup" 120 | @echo "# devhelp" 121 | 122 | .PHONY: epub 123 | epub: 124 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 125 | @echo 126 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 127 | 128 | .PHONY: epub3 129 | epub3: 130 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 131 | @echo 132 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 133 | 134 | .PHONY: latex 135 | latex: 136 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 137 | @echo 138 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 139 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 140 | "(use \`make latexpdf' here to do that automatically)." 141 | 142 | .PHONY: latexpdf 143 | latexpdf: 144 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 145 | @echo "Running LaTeX files through pdflatex..." 146 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 147 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 148 | 149 | .PHONY: latexpdfja 150 | latexpdfja: 151 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 152 | @echo "Running LaTeX files through platex and dvipdfmx..." 153 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 154 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 155 | 156 | .PHONY: text 157 | text: 158 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 159 | @echo 160 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 161 | 162 | .PHONY: man 163 | man: 164 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 165 | @echo 166 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 167 | 168 | .PHONY: texinfo 169 | texinfo: 170 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 171 | @echo 172 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 173 | @echo "Run \`make' in that directory to run these through makeinfo" \ 174 | "(use \`make info' here to do that automatically)." 175 | 176 | .PHONY: info 177 | info: 178 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 179 | @echo "Running Texinfo files through makeinfo..." 180 | make -C $(BUILDDIR)/texinfo info 181 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 182 | 183 | .PHONY: gettext 184 | gettext: 185 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 186 | @echo 187 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 188 | 189 | .PHONY: changes 190 | changes: 191 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 192 | @echo 193 | @echo "The overview file is in $(BUILDDIR)/changes." 194 | 195 | .PHONY: linkcheck 196 | linkcheck: 197 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 198 | @echo 199 | @echo "Link check complete; look for any errors in the above output " \ 200 | "or in $(BUILDDIR)/linkcheck/output.txt." 201 | 202 | .PHONY: doctest 203 | doctest: 204 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 205 | @echo "Testing of doctests in the sources finished, look at the " \ 206 | "results in $(BUILDDIR)/doctest/output.txt." 207 | 208 | .PHONY: coverage 209 | coverage: 210 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 211 | @echo "Testing of coverage in the sources finished, look at the " \ 212 | "results in $(BUILDDIR)/coverage/python.txt." 213 | 214 | .PHONY: xml 215 | xml: 216 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 217 | @echo 218 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 219 | 220 | .PHONY: pseudoxml 221 | pseudoxml: 222 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 223 | @echo 224 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 225 | 226 | .PHONY: dummy 227 | dummy: 228 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 229 | @echo 230 | @echo "Build finished. Dummy builder generates no files." 231 | -------------------------------------------------------------------------------- /docs/beansoup.importers.rst: -------------------------------------------------------------------------------- 1 | beansoup.importers package 2 | ========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | beansoup.importers.amex module 8 | ------------------------------ 9 | 10 | .. automodule:: beansoup.importers.amex 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | beansoup.importers.csv module 16 | ----------------------------- 17 | 18 | .. automodule:: beansoup.importers.csv 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | beansoup.importers.filing module 24 | -------------------------------- 25 | 26 | .. automodule:: beansoup.importers.filing 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | beansoup.importers.mixins module 32 | -------------------------------- 33 | 34 | .. automodule:: beansoup.importers.mixins 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | beansoup.importers.td module 40 | ---------------------------- 41 | 42 | .. automodule:: beansoup.importers.td 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: beansoup.importers 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /docs/beansoup.plugins.rst: -------------------------------------------------------------------------------- 1 | beansoup.plugins package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | beansoup.plugins.clear_transactions module 8 | ------------------------------------------ 9 | 10 | .. automodule:: beansoup.plugins.clear_transactions 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | beansoup.plugins.config module 16 | ------------------------------ 17 | 18 | .. automodule:: beansoup.plugins.config 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | beansoup.plugins.deposit_in_transit module 24 | ------------------------------------------ 25 | 26 | .. automodule:: beansoup.plugins.deposit_in_transit 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: beansoup.plugins 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/beansoup.rst: -------------------------------------------------------------------------------- 1 | beansoup package 2 | ================ 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | beansoup.importers 10 | beansoup.plugins 11 | beansoup.utils 12 | 13 | Submodules 14 | ---------- 15 | 16 | beansoup.transactions module 17 | ---------------------------- 18 | 19 | .. automodule:: beansoup.transactions 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | beansoup.version module 25 | ----------------------- 26 | 27 | .. automodule:: beansoup.version 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | 32 | 33 | Module contents 34 | --------------- 35 | 36 | .. automodule:: beansoup 37 | :members: 38 | :undoc-members: 39 | :show-inheritance: 40 | -------------------------------------------------------------------------------- /docs/beansoup.utils.rst: -------------------------------------------------------------------------------- 1 | beansoup.utils package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | beansoup.utils.dates module 8 | --------------------------- 9 | 10 | .. automodule:: beansoup.utils.dates 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | beansoup.utils.links module 16 | --------------------------- 17 | 18 | .. automodule:: beansoup.utils.links 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | beansoup.utils.periods module 24 | ----------------------------- 25 | 26 | .. automodule:: beansoup.utils.periods 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: beansoup.utils 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # beansoup documentation build configuration file, created by 5 | # sphinx-quickstart on Wed May 4 23:13:04 2016. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import sys 17 | import os 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | sys.path.insert(0, os.path.abspath('..')) 23 | 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | needs_sphinx = '1.3' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = [ 34 | 'sphinx.ext.autodoc', 35 | 'sphinx.ext.napoleon', 36 | 'sphinx.ext.todo', 37 | 'sphinx.ext.viewcode', 38 | ] 39 | 40 | # Add any paths that contain templates here, relative to this directory. 41 | templates_path = ['_templates'] 42 | 43 | # The suffix(es) of source filenames. 44 | # You can specify multiple suffix as a list of string: 45 | # source_suffix = ['.rst', '.md'] 46 | source_suffix = '.rst' 47 | 48 | # The encoding of source files. 49 | #source_encoding = 'utf-8-sig' 50 | 51 | # The master toctree document. 52 | master_doc = 'index' 53 | 54 | # General information about the project. 55 | project = 'beansoup' 56 | copyright = '2016, Filippo Tampieri' 57 | author = 'Filippo Tampieri' 58 | 59 | # The version info for the project you're documenting, acts as replacement for 60 | # |version| and |release|, also used in various other places throughout the 61 | # built documents. 62 | # 63 | import beansoup 64 | # The short X.Y version. 65 | version = '.'.join(beansoup.__version__.split('.')[:2]) 66 | # The full version, including alpha/beta/rc tags. 67 | release = beansoup.__version__ 68 | 69 | # The language for content autogenerated by Sphinx. Refer to documentation 70 | # for a list of supported languages. 71 | # 72 | # This is also used if you do content translation via gettext catalogs. 73 | # Usually you set "language" from the command line for these cases. 74 | language = 'en' 75 | 76 | # There are two options for replacing |today|: either, you set today to some 77 | # non-false value, then it is used: 78 | #today = '' 79 | # Else, today_fmt is used as the format for a strftime call. 80 | #today_fmt = '%B %d, %Y' 81 | 82 | # List of patterns, relative to source directory, that match files and 83 | # directories to ignore when looking for source files. 84 | # This patterns also effect to html_static_path and html_extra_path 85 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'tests', 'museum'] 86 | 87 | # The reST default role (used for this markup: `text`) to use for all 88 | # documents. 89 | #default_role = None 90 | 91 | # If true, '()' will be appended to :func: etc. cross-reference text. 92 | #add_function_parentheses = True 93 | 94 | # If true, the current module name will be prepended to all description 95 | # unit titles (such as .. function::). 96 | #add_module_names = True 97 | 98 | # If true, sectionauthor and moduleauthor directives will be shown in the 99 | # output. They are ignored by default. 100 | #show_authors = False 101 | 102 | # The name of the Pygments (syntax highlighting) style to use. 103 | pygments_style = 'sphinx' 104 | 105 | # A list of ignored prefixes for module index sorting. 106 | #modindex_common_prefix = [] 107 | 108 | # If true, keep warnings as "system message" paragraphs in the built documents. 109 | #keep_warnings = False 110 | 111 | # If true, `todo` and `todoList` produce output, else they produce nothing. 112 | todo_include_todos = True 113 | 114 | 115 | # -- Options for HTML output ---------------------------------------------- 116 | 117 | # The theme to use for HTML and HTML Help pages. See the documentation for 118 | # a list of builtin themes. 119 | html_theme = 'nature' 120 | 121 | # Theme options are theme-specific and customize the look and feel of a theme 122 | # further. For a list of options available for each theme, see the 123 | # documentation. 124 | #html_theme_options = {} 125 | 126 | # Add any paths that contain custom themes here, relative to this directory. 127 | #html_theme_path = [] 128 | 129 | # The name for this set of Sphinx documents. 130 | # " v documentation" by default. 131 | #html_title = 'beansoup v' 132 | 133 | # A shorter title for the navigation bar. Default is the same as html_title. 134 | #html_short_title = None 135 | 136 | # The name of an image file (relative to this directory) to place at the top 137 | # of the sidebar. 138 | #html_logo = None 139 | 140 | # The name of an image file (relative to this directory) to use as a favicon of 141 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 142 | # pixels large. 143 | #html_favicon = None 144 | 145 | # Add any paths that contain custom static files (such as style sheets) here, 146 | # relative to this directory. They are copied after the builtin static files, 147 | # so a file named "default.css" will overwrite the builtin "default.css". 148 | html_static_path = ['_static'] 149 | 150 | # Add any extra paths that contain custom files (such as robots.txt or 151 | # .htaccess) here, relative to this directory. These files are copied 152 | # directly to the root of the documentation. 153 | #html_extra_path = [] 154 | 155 | # If not None, a 'Last updated on:' timestamp is inserted at every page 156 | # bottom, using the given strftime format. 157 | # The empty string is equivalent to '%b %d, %Y'. 158 | #html_last_updated_fmt = None 159 | 160 | # If true, SmartyPants will be used to convert quotes and dashes to 161 | # typographically correct entities. 162 | #html_use_smartypants = True 163 | 164 | # Custom sidebar templates, maps document names to template names. 165 | #html_sidebars = {} 166 | 167 | # Additional templates that should be rendered to pages, maps page names to 168 | # template names. 169 | #html_additional_pages = {} 170 | 171 | # If false, no module index is generated. 172 | #html_domain_indices = True 173 | 174 | # If false, no index is generated. 175 | #html_use_index = True 176 | 177 | # If true, the index is split into individual pages for each letter. 178 | #html_split_index = False 179 | 180 | # If true, links to the reST sources are added to the pages. 181 | #html_show_sourcelink = True 182 | 183 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 184 | #html_show_sphinx = True 185 | 186 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 187 | #html_show_copyright = True 188 | 189 | # If true, an OpenSearch description file will be output, and all pages will 190 | # contain a tag referring to it. The value of this option must be the 191 | # base URL from which the finished HTML is served. 192 | #html_use_opensearch = '' 193 | 194 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 195 | #html_file_suffix = None 196 | 197 | # Language to be used for generating the HTML full-text search index. 198 | # Sphinx supports the following languages: 199 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 200 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' 201 | #html_search_language = 'en' 202 | 203 | # A dictionary with options for the search language support, empty by default. 204 | # 'ja' uses this config value. 205 | # 'zh' user can custom change `jieba` dictionary path. 206 | #html_search_options = {'type': 'default'} 207 | 208 | # The name of a javascript file (relative to the configuration directory) that 209 | # implements a search results scorer. If empty, the default will be used. 210 | #html_search_scorer = 'scorer.js' 211 | 212 | # Output file base name for HTML help builder. 213 | htmlhelp_basename = 'beansoupdoc' 214 | 215 | # -- Options for LaTeX output --------------------------------------------- 216 | 217 | latex_elements = { 218 | # The paper size ('letterpaper' or 'a4paper'). 219 | #'papersize': 'letterpaper', 220 | 221 | # The font size ('10pt', '11pt' or '12pt'). 222 | #'pointsize': '10pt', 223 | 224 | # Additional stuff for the LaTeX preamble. 225 | #'preamble': '', 226 | 227 | # Latex figure (float) alignment 228 | #'figure_align': 'htbp', 229 | } 230 | 231 | # Grouping the document tree into LaTeX files. List of tuples 232 | # (source start file, target name, title, 233 | # author, documentclass [howto, manual, or own class]). 234 | latex_documents = [ 235 | (master_doc, 'beansoup.tex', 'beansoup Documentation', 236 | author, 'manual'), 237 | ] 238 | 239 | # The name of an image file (relative to this directory) to place at the top of 240 | # the title page. 241 | #latex_logo = None 242 | 243 | # For "manual" documents, if this is true, then toplevel headings are parts, 244 | # not chapters. 245 | #latex_use_parts = False 246 | 247 | # If true, show page references after internal links. 248 | #latex_show_pagerefs = False 249 | 250 | # If true, show URL addresses after external links. 251 | #latex_show_urls = False 252 | 253 | # Documents to append as an appendix to all manuals. 254 | #latex_appendices = [] 255 | 256 | # If false, no module index is generated. 257 | #latex_domain_indices = True 258 | 259 | 260 | # -- Options for manual page output --------------------------------------- 261 | 262 | # One entry per manual page. List of tuples 263 | # (source start file, name, description, authors, manual section). 264 | man_pages = [ 265 | (master_doc, 'beansoup', 'beansoup Documentation', 266 | [author], 1) 267 | ] 268 | 269 | # If true, show URL addresses after external links. 270 | #man_show_urls = False 271 | 272 | 273 | # -- Options for Texinfo output ------------------------------------------- 274 | 275 | # Grouping the document tree into Texinfo files. List of tuples 276 | # (source start file, target name, title, author, 277 | # dir menu entry, description, category) 278 | texinfo_documents = [ 279 | (master_doc, 'beansoup', 'beansoup Documentation', 280 | author, 'beansoup', 'A companion to beancount, a command-line double-entry accounting tool.', 281 | 'Financial'), 282 | ] 283 | 284 | # Documents to append as an appendix to all manuals. 285 | #texinfo_appendices = [] 286 | 287 | # If false, no module index is generated. 288 | #texinfo_domain_indices = True 289 | 290 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 291 | #texinfo_show_urls = 'footnote' 292 | 293 | # If true, do not generate a @detailmenu in the "Top" node's menu. 294 | #texinfo_no_detailmenu = False 295 | 296 | 297 | # -- Options for Epub output ---------------------------------------------- 298 | 299 | # Bibliographic Dublin Core info. 300 | epub_title = project 301 | epub_author = author 302 | epub_publisher = author 303 | epub_copyright = copyright 304 | 305 | # The basename for the epub file. It defaults to the project name. 306 | #epub_basename = project 307 | 308 | # The HTML theme for the epub output. Since the default themes are not 309 | # optimized for small screen space, using the same theme for HTML and epub 310 | # output is usually not wise. This defaults to 'epub', a theme designed to save 311 | # visual space. 312 | #epub_theme = 'epub' 313 | 314 | # The language of the text. It defaults to the language option 315 | # or 'en' if the language is not set. 316 | #epub_language = '' 317 | 318 | # The scheme of the identifier. Typical schemes are ISBN or URL. 319 | #epub_scheme = '' 320 | 321 | # The unique identifier of the text. This can be a ISBN number 322 | # or the project homepage. 323 | #epub_identifier = '' 324 | 325 | # A unique identification for the text. 326 | #epub_uid = '' 327 | 328 | # A tuple containing the cover image and cover page html template filenames. 329 | #epub_cover = () 330 | 331 | # A sequence of (type, uri, title) tuples for the guide element of content.opf. 332 | #epub_guide = () 333 | 334 | # HTML files that should be inserted before the pages created by sphinx. 335 | # The format is a list of tuples containing the path and title. 336 | #epub_pre_files = [] 337 | 338 | # HTML files that should be inserted after the pages created by sphinx. 339 | # The format is a list of tuples containing the path and title. 340 | #epub_post_files = [] 341 | 342 | # A list of files that should not be packed into the epub file. 343 | epub_exclude_files = ['search.html'] 344 | 345 | # The depth of the table of contents in toc.ncx. 346 | #epub_tocdepth = 3 347 | 348 | # Allow duplicate toc entries. 349 | #epub_tocdup = True 350 | 351 | # Choose between 'default' and 'includehidden'. 352 | #epub_tocscope = 'default' 353 | 354 | # Fix unsupported image types using the Pillow. 355 | #epub_fix_images = False 356 | 357 | # Scale large images. 358 | #epub_max_image_width = 0 359 | 360 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 361 | #epub_show_urls = 'inline' 362 | 363 | # If false, no index is generated. 364 | #epub_use_index = True 365 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. beansoup documentation master file, created by 2 | sphinx-quickstart on Wed May 4 23:13:04 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to beansoup's documentation! 7 | ==================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 4 13 | 14 | beansoup 15 | 16 | 17 | Indices and tables 18 | ================== 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | * :ref:`search` 23 | 24 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. epub3 to make an epub3 31 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 32 | echo. text to make text files 33 | echo. man to make manual pages 34 | echo. texinfo to make Texinfo files 35 | echo. gettext to make PO message catalogs 36 | echo. changes to make an overview over all changed/added/deprecated items 37 | echo. xml to make Docutils-native XML files 38 | echo. pseudoxml to make pseudoxml-XML files for display purposes 39 | echo. linkcheck to check all external links for integrity 40 | echo. doctest to run all doctests embedded in the documentation if enabled 41 | echo. coverage to run coverage check of the documentation if enabled 42 | echo. dummy to check syntax errors of document sources 43 | goto end 44 | ) 45 | 46 | if "%1" == "clean" ( 47 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 48 | del /q /s %BUILDDIR%\* 49 | goto end 50 | ) 51 | 52 | 53 | REM Check if sphinx-build is available and fallback to Python version if any 54 | %SPHINXBUILD% 1>NUL 2>NUL 55 | if errorlevel 9009 goto sphinx_python 56 | goto sphinx_ok 57 | 58 | :sphinx_python 59 | 60 | set SPHINXBUILD=python -m sphinx.__init__ 61 | %SPHINXBUILD% 2> nul 62 | if errorlevel 9009 ( 63 | echo. 64 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 65 | echo.installed, then set the SPHINXBUILD environment variable to point 66 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 67 | echo.may add the Sphinx directory to PATH. 68 | echo. 69 | echo.If you don't have Sphinx installed, grab it from 70 | echo.http://sphinx-doc.org/ 71 | exit /b 1 72 | ) 73 | 74 | :sphinx_ok 75 | 76 | 77 | if "%1" == "html" ( 78 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 79 | if errorlevel 1 exit /b 1 80 | echo. 81 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 82 | goto end 83 | ) 84 | 85 | if "%1" == "dirhtml" ( 86 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 87 | if errorlevel 1 exit /b 1 88 | echo. 89 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 90 | goto end 91 | ) 92 | 93 | if "%1" == "singlehtml" ( 94 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 95 | if errorlevel 1 exit /b 1 96 | echo. 97 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 98 | goto end 99 | ) 100 | 101 | if "%1" == "pickle" ( 102 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 103 | if errorlevel 1 exit /b 1 104 | echo. 105 | echo.Build finished; now you can process the pickle files. 106 | goto end 107 | ) 108 | 109 | if "%1" == "json" ( 110 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 111 | if errorlevel 1 exit /b 1 112 | echo. 113 | echo.Build finished; now you can process the JSON files. 114 | goto end 115 | ) 116 | 117 | if "%1" == "htmlhelp" ( 118 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 119 | if errorlevel 1 exit /b 1 120 | echo. 121 | echo.Build finished; now you can run HTML Help Workshop with the ^ 122 | .hhp project file in %BUILDDIR%/htmlhelp. 123 | goto end 124 | ) 125 | 126 | if "%1" == "qthelp" ( 127 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 128 | if errorlevel 1 exit /b 1 129 | echo. 130 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 131 | .qhcp project file in %BUILDDIR%/qthelp, like this: 132 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\beansoup.qhcp 133 | echo.To view the help file: 134 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\beansoup.ghc 135 | goto end 136 | ) 137 | 138 | if "%1" == "devhelp" ( 139 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 140 | if errorlevel 1 exit /b 1 141 | echo. 142 | echo.Build finished. 143 | goto end 144 | ) 145 | 146 | if "%1" == "epub" ( 147 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 148 | if errorlevel 1 exit /b 1 149 | echo. 150 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 151 | goto end 152 | ) 153 | 154 | if "%1" == "epub3" ( 155 | %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 156 | if errorlevel 1 exit /b 1 157 | echo. 158 | echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. 159 | goto end 160 | ) 161 | 162 | if "%1" == "latex" ( 163 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 164 | if errorlevel 1 exit /b 1 165 | echo. 166 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdf" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "latexpdfja" ( 181 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 182 | cd %BUILDDIR%/latex 183 | make all-pdf-ja 184 | cd %~dp0 185 | echo. 186 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 187 | goto end 188 | ) 189 | 190 | if "%1" == "text" ( 191 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 192 | if errorlevel 1 exit /b 1 193 | echo. 194 | echo.Build finished. The text files are in %BUILDDIR%/text. 195 | goto end 196 | ) 197 | 198 | if "%1" == "man" ( 199 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 200 | if errorlevel 1 exit /b 1 201 | echo. 202 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 203 | goto end 204 | ) 205 | 206 | if "%1" == "texinfo" ( 207 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 208 | if errorlevel 1 exit /b 1 209 | echo. 210 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 211 | goto end 212 | ) 213 | 214 | if "%1" == "gettext" ( 215 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 216 | if errorlevel 1 exit /b 1 217 | echo. 218 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 219 | goto end 220 | ) 221 | 222 | if "%1" == "changes" ( 223 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 224 | if errorlevel 1 exit /b 1 225 | echo. 226 | echo.The overview file is in %BUILDDIR%/changes. 227 | goto end 228 | ) 229 | 230 | if "%1" == "linkcheck" ( 231 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 232 | if errorlevel 1 exit /b 1 233 | echo. 234 | echo.Link check complete; look for any errors in the above output ^ 235 | or in %BUILDDIR%/linkcheck/output.txt. 236 | goto end 237 | ) 238 | 239 | if "%1" == "doctest" ( 240 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 241 | if errorlevel 1 exit /b 1 242 | echo. 243 | echo.Testing of doctests in the sources finished, look at the ^ 244 | results in %BUILDDIR%/doctest/output.txt. 245 | goto end 246 | ) 247 | 248 | if "%1" == "coverage" ( 249 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 250 | if errorlevel 1 exit /b 1 251 | echo. 252 | echo.Testing of coverage in the sources finished, look at the ^ 253 | results in %BUILDDIR%/coverage/python.txt. 254 | goto end 255 | ) 256 | 257 | if "%1" == "xml" ( 258 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 259 | if errorlevel 1 exit /b 1 260 | echo. 261 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 262 | goto end 263 | ) 264 | 265 | if "%1" == "pseudoxml" ( 266 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 267 | if errorlevel 1 exit /b 1 268 | echo. 269 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 270 | goto end 271 | ) 272 | 273 | if "%1" == "dummy" ( 274 | %SPHINXBUILD% -b dummy %ALLSPHINXOPTS% %BUILDDIR%/dummy 275 | if errorlevel 1 exit /b 1 276 | echo. 277 | echo.Build finished. Dummy builder generates no files. 278 | goto end 279 | ) 280 | 281 | :end 282 | -------------------------------------------------------------------------------- /pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # Specify a configuration file. 4 | #rcfile= 5 | 6 | # Python code to execute, usually for sys.path manipulation such as 7 | # pygtk.require(). 8 | #init-hook= 9 | 10 | # Add files or directories to the blacklist. They should be base names, not 11 | # paths. 12 | ignore=.hg 13 | 14 | # Pickle collected data for later comparisons. 15 | persistent=yes 16 | 17 | # List of plugins (as comma separated values of python modules names) to load, 18 | # usually to register additional checkers. 19 | load-plugins= 20 | 21 | # DEPRECATED 22 | #include-ids=no 23 | 24 | # DEPRECATED 25 | #symbols=no 26 | 27 | 28 | [REPORTS] 29 | 30 | # Set the output format. Available formats are text, parseable, colorized, msvs 31 | # (visual studio) and html. You can also give a reporter class, eg 32 | # mypackage.mymodule.MyReporterClass. 33 | output-format=text 34 | 35 | # Put messages in a separate file for each module / package specified on the 36 | # command line instead of printing them on stdout. Reports (if any) will be 37 | # written in a file name "pylint_global.[txt|html]". 38 | files-output=no 39 | 40 | # Tells whether to display a full report or only the messages 41 | reports=no 42 | 43 | # Python expression which should return a note less than 10 (10 is the highest 44 | # note). You have access to the variables errors warning, statement which 45 | # respectively contain the number of errors / warnings messages and the total 46 | # number of statements analyzed. This is used by the global evaluation report 47 | # (RP0004). 48 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 49 | 50 | # Template used to display messages. This is a python new-style format string 51 | # used to format the message information. See doc for all details 52 | msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" 53 | 54 | 55 | [MESSAGES CONTROL] 56 | 57 | # Enable the message, report, category or checker with the given id(s). You can 58 | # either give multiple identifier separated by comma (,) or put this option 59 | # multiple time. See also the "--disable" option for examples. 60 | 61 | enable=all 62 | 63 | 64 | # Disable the message, report, category or checker with the given id(s). You 65 | # can either give multiple identifiers separated by comma (,) or put this 66 | # option multiple times (only on the command line, not in the configuration 67 | # file where it should appear only once).You can also use "--disable=all" to 68 | # disable everything first and then reenable specific checks. For example, if 69 | # you want to run only the similarities checker, you can use "--disable=all 70 | # --enable=similarities". If you want to run only the classes checker, but have 71 | # no Warning level messages displayed, use"--disable=all --enable=classes 72 | # --disable=W" 73 | 74 | disable=locally-disabled, 75 | suppressed-message, 76 | missing-docstring, 77 | too-many-lines, 78 | multiple-statements, 79 | superfluous-parens, 80 | bad-continuation, 81 | ungrouped-imports, 82 | wrong-import-position, 83 | no-self-argument, 84 | no-member, 85 | no-value-for-parameter, 86 | too-many-function-args, 87 | unsubscriptable-object, 88 | too-many-nested-blocks, 89 | no-self-use, 90 | redefined-variable-type, 91 | duplicate-code, 92 | too-few-public-methods, 93 | too-many-public-methods, 94 | too-many-branches, 95 | too-many-arguments, 96 | too-many-locals, 97 | too-many-statements, 98 | attribute-defined-outside-init, 99 | protected-access, 100 | arguments-differ, 101 | abstract-method, 102 | fixme, 103 | global-variable-undefined, 104 | global-statement, 105 | unused-variable, 106 | unused-argument, 107 | redefined-outer-name, 108 | redefined-builtin, 109 | undefined-loop-variable, 110 | broad-except, 111 | logging-format-interpolation, 112 | anomalous-backslash-in-string 113 | 114 | 115 | 116 | 117 | [VARIABLES] 118 | 119 | # Tells whether we should check for unused import in __init__ files. 120 | init-import=no 121 | 122 | # A regular expression matching the name of dummy variables (i.e. expectedly 123 | # not used). 124 | dummy-variables-rgx=_$|dummy 125 | 126 | # List of additional names supposed to be defined in builtins. Remember that 127 | # you should avoid to define new builtins when possible. 128 | additional-builtins= 129 | 130 | 131 | [TYPECHECK] 132 | 133 | # Tells whether missing members accessed in mixin class should be ignored. A 134 | # mixin class is detected if its name ends with "mixin" (case insensitive). 135 | ignore-mixin-members=yes 136 | 137 | # List of module names for which member attributes should not be checked 138 | # (useful for modules/projects where namespaces are manipulated during runtime 139 | # and thus existing member attributes cannot be deduced by static analysis 140 | ignored-modules= 141 | 142 | # List of classes names for which member attributes should not be checked 143 | # (useful for classes with attributes dynamically set). 144 | ignored-classes= 145 | 146 | # List of members which are set dynamically and missed by pylint inference 147 | # system, and so shouldn't trigger E0201 when accessed. Python regular 148 | # expressions are accepted. 149 | #generated-members=REQUEST,acl_users,aq_parent 150 | generated-members= 151 | 152 | 153 | [LOGGING] 154 | 155 | # Logging modules to check that the string format arguments are in logging 156 | # function parameter format 157 | logging-modules=logging 158 | 159 | 160 | [MISCELLANEOUS] 161 | 162 | # List of note tags to take in consideration, separated by a comma. 163 | notes=FIXME,XXX,TODO 164 | 165 | 166 | [BASIC] 167 | 168 | # List of builtins function names that should not be used, separated by a comma 169 | bad-functions= 170 | 171 | # Good variable names which should always be accepted, separated by a comma 172 | good-names=i,j,k,ex,Run,_ 173 | 174 | # Bad variable names which should always be refused, separated by a comma 175 | bad-names=foo,bar,baz,toto,tutu,tata 176 | 177 | # Colon-delimited sets of names that determine each other's naming style when 178 | # the name regexes allow several styles. 179 | name-group= 180 | 181 | # Include a hint for the correct naming format with invalid-name 182 | include-naming-hint=yes 183 | 184 | # Regular expression matching correct module names 185 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 186 | 187 | # Naming hint for module names 188 | module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 189 | 190 | # Regular expression matching correct method names 191 | method-rgx=[a-z_][a-zA-Z0-9_]{2,64}$ 192 | 193 | # Naming hint for method names 194 | method-name-hint=[a-z_][a-zA-Z0-9_]{2,64}$ 195 | 196 | # Regular expression matching correct class names 197 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 198 | 199 | # Naming hint for class names 200 | class-name-hint=[A-Z_][a-zA-Z0-9]+$ 201 | 202 | # Regular expression matching correct class attribute names 203 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 204 | 205 | # Naming hint for class attribute names 206 | class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 207 | 208 | # Regular expression matching correct inline iteration names 209 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 210 | 211 | # Naming hint for inline iteration names 212 | inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ 213 | 214 | # Regular expression matching correct variable names 215 | variable-rgx=([a-z_][a-z0-9_]{2,30}|_+|mu)$ 216 | 217 | # Naming hint for variable names 218 | variable-name-hint=[a-z_][a-z0-9_]{2,30}$ 219 | 220 | # Regular expression matching correct constant names 221 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 222 | 223 | # Naming hint for constant names 224 | const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 225 | 226 | # Regular expression matching correct attribute names 227 | attr-rgx=[a-z_][a-z0-9_]{2,30}$ 228 | 229 | # Naming hint for attribute names 230 | attr-name-hint=[a-z_][a-z0-9_]{2,30}$ 231 | 232 | # Regular expression matching correct argument names 233 | argument-rgx=([a-z_][a-z0-9_]{2,30}|_+|mu)$ 234 | 235 | # Naming hint for argument names 236 | argument-name-hint=[a-z_][a-z0-9_]{2,30}$ 237 | 238 | # Regular expression matching correct function names 239 | function-rgx=[a-z_][a-zA-Z0-9_]{2,64}$ 240 | 241 | # Naming hint for function names 242 | function-name-hint=[a-z_][a-zA-Z0-9_]{2,64}$ 243 | 244 | # Regular expression which should only match function or class names that do 245 | # not require a docstring. 246 | no-docstring-rgx=__.*__ 247 | 248 | # Minimum line length for functions/classes that require docstrings, shorter 249 | # ones are exempt. 250 | docstring-min-length=-1 251 | 252 | 253 | [FORMAT] 254 | 255 | # Maximum number of characters on a single line. 256 | max-line-length=92 257 | 258 | # Regexp for a line that is allowed to be longer than the limit. 259 | ignore-long-lines=^\s*(# )??$ 260 | 261 | # Allow the body of an if to be on the same line as the test if there is no 262 | # else. 263 | single-line-if-stmt=no 264 | 265 | # List of optional constructs for which whitespace checking is disabled 266 | no-space-check=trailing-comma,dict-separator 267 | 268 | # Maximum number of lines in a module 269 | max-module-lines=1000 270 | 271 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 272 | # tab). 273 | indent-string=' ' 274 | 275 | # Number of spaces of indent required inside a hanging or continued line. 276 | indent-after-paren=4 277 | 278 | 279 | [SIMILARITIES] 280 | 281 | # Minimum lines number of a similarity. 282 | min-similarity-lines=4 283 | 284 | # Ignore comments when computing similarities. 285 | ignore-comments=yes 286 | 287 | # Ignore docstrings when computing similarities. 288 | ignore-docstrings=yes 289 | 290 | # Ignore imports when computing similarities. 291 | ignore-imports=no 292 | 293 | 294 | [DESIGN] 295 | 296 | # Maximum number of arguments for function / method 297 | max-args=5 298 | 299 | # Argument names that match this expression will be ignored. Default to name 300 | # with leading underscore 301 | ignored-argument-names=_.* 302 | 303 | # Maximum number of locals for function / method body 304 | max-locals=20 305 | 306 | # Maximum number of return / yield for function / method body 307 | max-returns=6 308 | 309 | # Maximum number of branch for function / method body 310 | max-branches=12 311 | 312 | # Maximum number of statements in function / method body 313 | max-statements=50 314 | 315 | # Maximum number of parents for a class (see R0901). 316 | max-parents=7 317 | 318 | # Maximum number of attributes for a class (see R0902). 319 | max-attributes=7 320 | 321 | # Minimum number of public methods for a class (see R0903). 322 | min-public-methods=2 323 | 324 | # Maximum number of public methods for a class (see R0904). 325 | max-public-methods=20 326 | 327 | 328 | [IMPORTS] 329 | 330 | # Deprecated modules which should not be used, separated by a comma 331 | deprecated-modules=stringprep,optparse 332 | 333 | # Create a graph of every (i.e. internal and external) dependencies in the 334 | # given file (report RP0402 must not be disabled) 335 | import-graph= 336 | 337 | # Create a graph of external dependencies in the given file (report RP0402 must 338 | # not be disabled) 339 | ext-import-graph= 340 | 341 | # Create a graph of internal dependencies in the given file (report RP0402 must 342 | # not be disabled) 343 | int-import-graph= 344 | 345 | 346 | [CLASSES] 347 | 348 | # List of interface methods to ignore, separated by a comma. This is used for 349 | # instance to not check methods defines in Zope's Interface base class. 350 | ## ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by 351 | 352 | # List of method names used to declare (i.e. assign) instance attributes. 353 | defining-attr-methods=__init__,__new__,setUp 354 | 355 | # List of valid names for the first argument in a class method. 356 | valid-classmethod-first-arg=cls 357 | 358 | # List of valid names for the first argument in a metaclass class method. 359 | valid-metaclass-classmethod-first-arg=mcs 360 | 361 | 362 | [EXCEPTIONS] 363 | 364 | # Exceptions that will emit a warning when being caught. Defaults to 365 | # "Exception" 366 | overgeneral-exceptions=Exception 367 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests beansoup 3 | addopts = --doctest-modules -------------------------------------------------------------------------------- /requirements/rtd.txt: -------------------------------------------------------------------------------- 1 | beancount --no-binary :all: 2 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | coverage=pytest --addopts='--cov=beansoup --cov-report=term-missing' 4 | 5 | [upload_docs] 6 | upload-dir = docs/_build/html -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import re 3 | from setuptools import setup 4 | import sys 5 | 6 | # Check if the version is sufficient. 7 | if sys.version_info[:2] < (3,3): 8 | raise SystemExit("ERROR: Insufficient Python version; you need v3.3 or higher.") 9 | 10 | here = path.abspath(path.dirname(__file__)) 11 | 12 | # Get the version string without importing the package 13 | with open(path.join(here, 'beansoup', 'version.py'), 'rt') as f: 14 | version = re.search(r"__version__ = '(.*?)'", f.read()).group(1) 15 | 16 | with open(path.join(here, 'README.rst'), 'rt', encoding='utf-8') as f: 17 | long_description = f.read() 18 | 19 | setup( 20 | name='beansoup', 21 | 22 | version=version, 23 | 24 | description='A companion to beancount, a command-line double-entry accounting tool', 25 | long_description=long_description, 26 | 27 | # Project homepage 28 | url='https://github.com/fxtlabs/beansoup', 29 | 30 | # Author details 31 | author='Filippo Tampieri', 32 | author_email='fxt@fxtlabs.com', 33 | 34 | license='GPLv2', 35 | 36 | classifiers=[ 37 | 'Development Status :: 2 - Pre-Alpha', 38 | 39 | 'Intended Audience :: Developers', 40 | 'Intended Audience :: Financial and Insurance Industry', 41 | 'Topic :: Office/Business :: Financial', 42 | 'Topic :: Office/Business :: Financial :: Accounting', 43 | 'Topic :: Office/Business :: Financial :: Investment', 44 | 45 | # Use the same license as beancount 46 | 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 47 | 48 | 'Programming Language :: Python :: 3.3', 49 | 'Programming Language :: Python :: 3.4', 50 | 'Programming Language :: Python :: 3.5', 51 | 'Operating System :: OS Independent', 52 | ], 53 | 54 | keywords=['accounting', 'investing'], 55 | 56 | packages=['beansoup'], 57 | 58 | install_requires=['beancount'], 59 | setup_requires=['pytest-runner'], 60 | tests_require=['pytest', 'pytest-cov', 'coverage', 'python-dateutil'], 61 | ) 62 | -------------------------------------------------------------------------------- /tests/importers/test_amex.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.importers.amex module.""" 2 | 3 | import datetime 4 | import pytest 5 | from os import path 6 | import tempfile 7 | 8 | from beancount.ingest import cache 9 | 10 | from beansoup.importers import amex 11 | 12 | 13 | DATADIR = tempfile.gettempdir() 14 | 15 | 16 | pdf_filing_importer_data = [ 17 | (1, 'no-match.pdf', None), 18 | (1, 'Statement_Jan_2016.pdf', None), 19 | (1, 'Statement_January 2016.pdf', None), 20 | (1, 'Statement_Jan 2016.pdf', datetime.date(2016, 1, 31)), 21 | (28, 'Statement_Feb 2016.pdf', datetime.date(2016, 2, 27)), 22 | (15, 'Statement_Mar 2016.pdf', datetime.date(2016, 3, 14)), 23 | ] 24 | 25 | @pytest.mark.parametrize('first_day,filename,expected_date', 26 | pdf_filing_importer_data) 27 | def test_pdf_filing_importer(first_day, filename, expected_date): 28 | account = 'Liabilities:Amex' 29 | importer = amex.PdfFilingImporter(account, 30 | basename='amex', 31 | first_day=first_day) 32 | file = cache.get_file(path.join(DATADIR, filename)) 33 | 34 | assert importer.name() == 'beansoup.importers.amex.PdfFilingImporter: "{}"'.format(account) 35 | assert importer.file_account(file) == account 36 | assert importer.file_name(file) == 'amex.pdf' 37 | assert importer.extract(file) == [] 38 | 39 | if expected_date: 40 | assert importer.identify(file) 41 | assert importer.file_date(file) == expected_date 42 | else: 43 | assert not importer.identify(file) 44 | assert not importer.file_date(file) 45 | -------------------------------------------------------------------------------- /tests/importers/test_filing.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.importers.filing module.""" 2 | 3 | import datetime 4 | import pytest 5 | from os import path 6 | import tempfile 7 | 8 | from beancount.ingest import cache 9 | 10 | from beansoup.importers import filing 11 | 12 | 13 | DATADIR = tempfile.gettempdir() 14 | 15 | 16 | def test_basics(): 17 | account = 'Assets:Checking' 18 | importer = filing.Importer( 19 | account, basename=None, filename_regexp='test.pdf') 20 | file = cache.get_file(path.join(DATADIR, 'test.pdf')) 21 | 22 | assert importer.name() == 'beansoup.importers.filing.Importer: "{}"'.format(account) 23 | assert importer.file_account(file) == account 24 | assert importer.file_name(file) == None 25 | assert importer.extract(file) == [] 26 | 27 | account = 'Liabilities:Visa' 28 | importer = filing.Importer( 29 | account, basename='filed', filename_regexp='test.pdf') 30 | file = cache.get_file(path.join(DATADIR, 'test.pdf')) 31 | assert importer.name() == 'beansoup.importers.filing.Importer: "{}"'.format(account) 32 | assert importer.file_account(file) == account 33 | assert importer.file_name(file) == 'filed.pdf' 34 | assert importer.extract(file) == [] 35 | 36 | 37 | identify_data = [ 38 | ('test-01.pdf', True), 39 | ('test-02.csv', True), 40 | ('test-03.txt', False), 41 | ('test-ab.pdf', False), 42 | ] 43 | 44 | @pytest.mark.parametrize('filename,expected', identify_data) 45 | def test_identify(filename, expected): 46 | importer = filing.Importer('Assets:Testing', 47 | filename_regexp=r'test-\d{2}\.(pdf|csv)') 48 | file = cache.get_file(path.join(DATADIR, filename)) 49 | if expected: 50 | assert importer.identify(file) 51 | else: 52 | assert not importer.identify(file) 53 | 54 | 55 | file_date_data = [ 56 | (1, 'test.pdf', 'no-match.pdf', None), 57 | (1, '^test_(?P\w{3})_(?P\d{4}).pdf$', 58 | 'test_May_2016.pdf', datetime.date(2016, 5, 31)), 59 | (2, '^test_(?P\d{2})_(?P\d{4}).pdf$', 60 | 'test_05_2016.pdf', datetime.date(2016, 5, 1)), 61 | (31, '^test_(?P\d{4})_(?P\w{3}).pdf$', 62 | 'test_2016_may.pdf', datetime.date(2016, 5, 30)), 63 | (1, '^test_(?P\w{3,})_(?P\d{1,2})_(?P\d{4}).pdf$', 64 | 'test_April_12_2016.pdf', datetime.date(2016, 4, 12)), 65 | (30, '^test_(?P\d{4})-(?P\d{2})-(?P\d{2}).pdf$', 66 | 'test_2016-04-12.pdf', datetime.date(2016, 4, 12)), 67 | ] 68 | 69 | @pytest.mark.parametrize('first_day,filename_regexp,filename,expected', 70 | file_date_data) 71 | def test_file_date(first_day, filename_regexp, filename, expected): 72 | importer = filing.Importer('Assets:Testing', 73 | first_day=first_day, 74 | filename_regexp=filename_regexp) 75 | file = cache.get_file(path.join(DATADIR, filename)) 76 | assert importer.file_date(file) == expected 77 | -------------------------------------------------------------------------------- /tests/importers/test_mixins.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.importers.mixins module.""" 2 | 3 | from os import path 4 | import tempfile 5 | 6 | from beancount import loader 7 | from beancount.ingest import cache 8 | from beancount.parser import cmptest 9 | 10 | from beansoup.importers import mixins 11 | from beansoup.utils import testing 12 | 13 | 14 | class Importer(mixins.FilterChain, testing.ConstImporter): 15 | pass 16 | 17 | 18 | class TestFilterChainMixin(cmptest.TestCase): 19 | 20 | @loader.load_doc(expect_errors=True) 21 | def test_mixin(self, entries, errors, _): 22 | """ 23 | 2014-01-01 open Assets:US:BofA:Checking USD 24 | 25 | 2014-05-19 * "Verizon Wireless" "" 26 | Assets:US:BofA:Checking -44.34 USD 27 | 28 | 2014-05-23 * "Wine-Tarner Cable" "" 29 | Assets:US:BofA:Checking -80.17 USD 30 | 31 | 2014-06-04 * "BANK FEES" "Monthly bank fee" 32 | Assets:US:BofA:Checking -4.00 USD 33 | 34 | 2014-06-04 * "RiverBank Properties" "Paying the rent" 35 | Assets:US:BofA:Checking -2400.00 USD 36 | 37 | 2014-06-08 * "EDISON POWER" "" 38 | Assets:US:BofA:Checking -65.00 USD 39 | """ 40 | def filter_last_two(entries): 41 | return entries[-2:] 42 | 43 | file = cache.get_file(path.join(tempfile.gettempdir(), 'test')) 44 | 45 | # Running with no filters should return the extracted entries unchanged 46 | importer = Importer(entries, 'Assets:US:BofA:Checking', filters=[]) 47 | extracted_entries = importer.extract(file) 48 | self.assertEqualEntries(extracted_entries, entries) 49 | 50 | # Run with a filter that should pass only the last two entries 51 | importer = Importer(entries, 'Assets:US:BofA:Checking', 52 | filters=[filter_last_two]) 53 | extracted_entries = importer.extract(file) 54 | self.assertEqualEntries(extracted_entries, entries[-2:]) 55 | -------------------------------------------------------------------------------- /tests/importers/test_td.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.importers.td module.""" 2 | 3 | import datetime 4 | from os import path 5 | 6 | from beancount.ingest import cache 7 | from beancount.parser import cmptest 8 | 9 | from beansoup.importers import td 10 | from beansoup.utils import testing 11 | 12 | 13 | class TestTDImporter(cmptest.TestCase): 14 | 15 | @testing.docfile(mode='w', suffix='.csv') 16 | def test_importer_against_asset(self, filename): 17 | """ 18 | 04/01/2016,12-345 Smith RLS,404.38,,5194.21 19 | 04/05/2016,COSTCO #9876543,60.24,,5133.97 20 | 04/05/2016,METRO ETS 2020,34.90,,5099.07 21 | 04/05/2016,POISSONERIE DU,31.78,,5067.29 22 | 04/05/2016,LES DOUCEURS DU,12.39,,5054.90 23 | 04/05/2016,FROMAGERIE ATWA,42.17,,5012.73 24 | 04/07/2016,CHQ#00123-456789,16.00,,4996.73 25 | 04/12/2016,FROMAGERIE ATWA,39.46,,4957.27 26 | 04/12/2016,DAVID'S TEA,27.50,,4929.77 27 | 04/12/2016,PATISSERIE SAIN,32.00,,4897.77 28 | 04/12/2016,GAZ METRO BPY,247.26,,4650.51 29 | 04/14/2016,VIDEOTRON LTEE BPY,237.74,,4412.77 30 | 04/14/2016,TD VISA A1B2C3,74.37,,4338.40 31 | 04/16/2016,FRUITERIE ATWAT,24.65,,4313.75 32 | 04/16/2016,POISSONERIE NOU,64.79,,4248.96 33 | 04/16/2016,CHQ#00125-9876543,160.00,,4088.96 34 | 04/19/2016,CHQ#00124-9876543,900.00,,3188.96 35 | 04/22/2016,AMEX B2C3D4,734.59,,2454.37 36 | 04/23/2016,POISSONERIE DU,57.18,,2397.19 37 | 04/28/2016,BELL CANADA BPY,25.30,,2371.89 38 | 04/29/2016,CINEPLEX #9172,23.00,,2348.89 39 | 04/29/2016,CANADA RIT,,345.24,2694.13 40 | 04/29/2016,12345678900WIRE,,210.32,2904.45 41 | 04/30/2016,BARON SPORTS,21.28,,2883.17 42 | """ 43 | file = cache.get_file(filename) 44 | 45 | account = 'Assets:TD:Checking' 46 | importer = td.Importer(account, 'CAD', 'td-checking', 47 | first_day=1, 48 | filename_regexp=path.basename(filename)) 49 | 50 | assert importer.file_account(file) == account 51 | assert importer.file_name(file) == 'td-checking.csv' 52 | assert importer.identify(file) 53 | assert importer.file_date(file) == datetime.date(2016, 4, 30) 54 | 55 | entries = importer.extract(file) 56 | self.assertEqualEntries(""" 57 | 2016-04-01 * "12-345 Smith RLS" 58 | Assets:TD:Checking -404.38 CAD 59 | 60 | 2016-04-05 * "COSTCO #9876543" 61 | Assets:TD:Checking -60.24 CAD 62 | 63 | 2016-04-05 * "METRO ETS 2020" 64 | Assets:TD:Checking -34.90 CAD 65 | 66 | 2016-04-05 * "POISSONERIE DU" 67 | Assets:TD:Checking -31.78 CAD 68 | 69 | 2016-04-05 * "LES DOUCEURS DU" 70 | Assets:TD:Checking -12.39 CAD 71 | 72 | 2016-04-05 * "FROMAGERIE ATWA" 73 | Assets:TD:Checking -42.17 CAD 74 | 75 | 2016-04-07 * "CHQ#00123-456789" 76 | Assets:TD:Checking -16.00 CAD 77 | 78 | 2016-04-12 * "FROMAGERIE ATWA" 79 | Assets:TD:Checking -39.46 CAD 80 | 81 | 2016-04-12 * "DAVID'S TEA" 82 | Assets:TD:Checking -27.50 CAD 83 | 84 | 2016-04-12 * "PATISSERIE SAIN" 85 | Assets:TD:Checking -32.00 CAD 86 | 87 | 2016-04-12 * "GAZ METRO BPY" 88 | Assets:TD:Checking -247.26 CAD 89 | 90 | 2016-04-14 * "VIDEOTRON LTEE BPY" 91 | Assets:TD:Checking -237.74 CAD 92 | 93 | 2016-04-14 * "TD VISA A1B2C3" 94 | Assets:TD:Checking -74.37 CAD 95 | 96 | 2016-04-16 * "FRUITERIE ATWAT" 97 | Assets:TD:Checking -24.65 CAD 98 | 99 | 2016-04-16 * "POISSONERIE NOU" 100 | Assets:TD:Checking -64.79 CAD 101 | 102 | 2016-04-16 * "CHQ#00125-9876543" 103 | Assets:TD:Checking -160.00 CAD 104 | 105 | 2016-04-19 * "CHQ#00124-9876543" 106 | Assets:TD:Checking -900.00 CAD 107 | 108 | 2016-04-22 * "AMEX B2C3D4" 109 | Assets:TD:Checking -734.59 CAD 110 | 111 | 2016-04-23 * "POISSONERIE DU" 112 | Assets:TD:Checking -57.18 CAD 113 | 114 | 2016-04-28 * "BELL CANADA BPY" 115 | Assets:TD:Checking -25.30 CAD 116 | 117 | 2016-04-29 * "CINEPLEX #9172" 118 | Assets:TD:Checking -23.00 CAD 119 | 120 | 2016-04-29 * "CANADA RIT" 121 | Assets:TD:Checking 345.24 CAD 122 | 123 | 2016-04-29 * "12345678900WIRE" 124 | Assets:TD:Checking 210.32 CAD 125 | 126 | 2016-04-30 * "BARON SPORTS" 127 | Assets:TD:Checking -21.28 CAD 128 | 129 | 2016-05-01 balance Assets:TD:Checking 2883.17 CAD 130 | """, entries) 131 | 132 | @testing.docfile(mode='w', suffix='.csv') 133 | def test_importer_against_liability(self, filename): 134 | """ 135 | 12/06/2015,SKYPE 123456789,14.00,,97.62 136 | 12/07/2015,STM-LAURIER MONTREAL,22.50,,120.12 137 | 12/13/2015,PAYMENT - THANK YOU,,97.62,22.50 138 | 12/14/2015,RESTAURANT PHAYA THAI MONTREAL,40.00,,62.50 139 | 12/16/2015,STM-CHARLEVOIX MONTREAL,45.00,,107.50 140 | """ 141 | file = cache.get_file(filename) 142 | 143 | account = 'Liabilities:TD:Visa' 144 | importer = td.Importer(account, 'CAD', 'td-visa', 145 | first_day=4, 146 | filename_regexp=path.basename(filename)) 147 | 148 | assert importer.file_account(file) == account 149 | assert importer.file_name(file) == 'td-visa.csv' 150 | assert importer.identify(file) 151 | assert importer.file_date(file) == datetime.date(2016, 1, 3) 152 | 153 | entries = importer.extract(file) 154 | self.assertEqualEntries(""" 155 | 2015-12-06 * "SKYPE 123456789" 156 | Liabilities:TD:Visa -14.00 CAD 157 | 158 | 2015-12-07 * "STM-LAURIER MONTREAL" 159 | Liabilities:TD:Visa -22.50 CAD 160 | 161 | 2015-12-13 * "PAYMENT - THANK YOU" 162 | Liabilities:TD:Visa 97.62 CAD 163 | 164 | 2015-12-14 * "RESTAURANT PHAYA THAI MONTREAL" 165 | Liabilities:TD:Visa -40.00 CAD 166 | 167 | 2015-12-16 * "STM-CHARLEVOIX MONTREAL" 168 | Liabilities:TD:Visa -45.00 CAD 169 | 170 | 2016-01-04 balance Liabilities:TD:Visa -107.50 CAD 171 | """, entries) 172 | -------------------------------------------------------------------------------- /tests/plugins/test_clear_transactions.py: -------------------------------------------------------------------------------- 1 | """Unit tests for deposit_in_transit plugin.""" 2 | 3 | from beancount import loader 4 | from beancount.parser import cmptest 5 | 6 | from beansoup.plugins import config 7 | from beansoup.plugins import clear_transactions 8 | 9 | 10 | class TestClearTransactions(cmptest.TestCase): 11 | 12 | @loader.load_doc() 13 | def test_plugin(self, entries, errors, _): 14 | """ 15 | plugin "beansoup.plugins.clear_transactions" " 16 | --flag_pending 17 | Assets:Clearing:Checking,Assets:Checking 18 | Liabilities:Clearing:Visa,Liabilities:Visa" 19 | 20 | 2000-01-01 open Assets:Savings 21 | 2000-01-01 open Assets:Checking 22 | 2000-01-01 open Assets:Clearing:Checking 23 | 2000-01-01 open Liabilities:Visa 24 | 2000-01-01 open Liabilities:Clearing:Visa 25 | 26 | 2000-02-01 * "This transaction should be linked to the next" 27 | Assets:Savings -500.00 USD 28 | Assets:Clearing:Checking 29 | 30 | 2000-02-01 * "This transaction should be linked to the previous" 31 | Assets:Checking 500.00 USD 32 | Assets:Clearing:Checking 33 | 34 | 2000-03-01 * "This transaction should be ignored" #PRE_CLEARED 35 | Assets:Savings -400.00 USD 36 | Assets:Clearing:Checking 37 | 38 | 2000-03-01 * "This transaction should be ignored" #PRE_CLEARED 39 | Assets:Checking 400.00 USD 40 | Assets:Clearing:Checking 41 | 42 | 2000-02-07 * "Visa" "This transaction should be linked to the next" 43 | Assets:Checking -100.00 USD 44 | Liabilities:Clearing:Visa 45 | 46 | 2000-02-10 * "This transaction should be linked to the previous" 47 | Liabilities:Visa 100.00 USD 48 | Liabilities:Clearing:Visa 49 | 50 | 2000-02-08 * "Visa" "This transaction should be linked to the next" 51 | Assets:Checking -100.00 USD 52 | Liabilities:Clearing:Visa 53 | 54 | 2000-02-11 * "This transaction should be linked to the previous" 55 | Liabilities:Visa 100.00 USD 56 | Liabilities:Clearing:Visa 57 | 58 | 2000-02-09 * "Visa" "This transaction should be linked to the next" 59 | Assets:Checking -120.00 USD 60 | Liabilities:Clearing:Visa 61 | 62 | 2000-02-09 * "This transaction should be linked to the previous" 63 | Liabilities:Visa 120.00 USD 64 | Liabilities:Clearing:Visa 65 | 66 | 2000-03-07 * "Visa" "This transaction should not be linked to the next" 67 | Assets:Checking -100.00 USD 68 | Liabilities:Clearing:Visa 69 | 70 | 2000-03-19 * "This transaction should be not linked to the previous" 71 | Liabilities:Visa 100.00 USD 72 | Liabilities:Clearing:Visa 73 | 74 | 2000-04-08 * "This transaction should be tagged pending" 75 | Assets:Checking -150.00 USD 76 | Liabilities:Clearing:Visa 77 | """ 78 | self.assertFalse(errors) 79 | self.assertEqualEntries(""" 80 | 2000-01-01 open Assets:Savings 81 | 2000-01-01 open Assets:Checking 82 | 2000-01-01 open Assets:Clearing:Checking 83 | 2000-01-01 open Liabilities:Visa 84 | 2000-01-01 open Liabilities:Clearing:Visa 85 | 86 | 2000-02-01 * "This transaction should be linked to the next" #CLEARED ^cleared-1 87 | Assets:Savings -500.00 USD 88 | Assets:Clearing:Checking 500.00 USD 89 | 90 | 2000-02-01 * "This transaction should be linked to the previous" #CLEARED ^cleared-1 91 | Assets:Checking 500.00 USD 92 | Assets:Clearing:Checking -500.00 USD 93 | 94 | 2000-02-07 * "Visa" "This transaction should be linked to the next" #CLEARED ^cleared-2 95 | Assets:Checking -100.00 USD 96 | Liabilities:Clearing:Visa 100.00 USD 97 | 98 | 2000-02-08 * "Visa" "This transaction should be linked to the next" #CLEARED ^cleared-3 99 | Assets:Checking -100.00 USD 100 | Liabilities:Clearing:Visa 100.00 USD 101 | 102 | 2000-02-09 * "Visa" "This transaction should be linked to the next" #CLEARED ^cleared-4 103 | Assets:Checking -120.00 USD 104 | Liabilities:Clearing:Visa 120.00 USD 105 | 106 | 2000-02-09 * "This transaction should be linked to the previous" #CLEARED ^cleared-4 107 | Liabilities:Visa 120.00 USD 108 | Liabilities:Clearing:Visa -120.00 USD 109 | 110 | 2000-02-10 * "This transaction should be linked to the previous" #CLEARED ^cleared-2 111 | Liabilities:Visa 100.00 USD 112 | Liabilities:Clearing:Visa -100.00 USD 113 | 114 | 2000-02-11 * "This transaction should be linked to the previous" #CLEARED ^cleared-3 115 | Liabilities:Visa 100.00 USD 116 | Liabilities:Clearing:Visa -100.00 USD 117 | 118 | 2000-03-01 * "This transaction should be ignored" #PRE_CLEARED 119 | Assets:Savings -400.00 USD 120 | Assets:Clearing:Checking 400.00 USD 121 | 122 | 2000-03-01 * "This transaction should be ignored" #PRE_CLEARED 123 | Assets:Checking 400.00 USD 124 | Assets:Clearing:Checking -400.00 USD 125 | 126 | 2000-03-07 ! "Visa" "This transaction should not be linked to the next" #PENDING 127 | Assets:Checking -100.00 USD 128 | Liabilities:Clearing:Visa 100.00 USD 129 | 130 | 2000-03-19 ! "This transaction should be not linked to the previous" #PENDING 131 | Liabilities:Visa 100.00 USD 132 | Liabilities:Clearing:Visa -100.00 USD 133 | 134 | 2000-04-08 ! "This transaction should be tagged pending" #PENDING 135 | Assets:Checking -150.00 USD 136 | Liabilities:Clearing:Visa 150.00 USD 137 | """, entries) 138 | 139 | @loader.load_doc() 140 | def test_skip_weekends_option(self, entries, errors, _): 141 | """ 142 | plugin "beansoup.plugins.clear_transactions" " 143 | --skip_weekends --flag_pending 144 | Assets:Clearing:Checking,Assets:Checking 145 | Liabilities:Clearing:Visa,Liabilities:Visa" 146 | 147 | 2000-01-01 open Assets:Savings 148 | 2000-01-01 open Assets:Checking 149 | 2000-01-01 open Assets:Clearing:Checking 150 | 2000-01-01 open Liabilities:Visa 151 | 2000-01-01 open Liabilities:Clearing:Visa 152 | 153 | 2000-02-01 * "This transaction should be linked to the next" 154 | Assets:Savings -500.00 USD 155 | Assets:Clearing:Checking 156 | 157 | 2000-02-09 * "This transaction should be linked to the previous" 158 | Assets:Checking 500.00 USD 159 | Assets:Clearing:Checking 160 | 161 | 2000-03-07 * "Visa" "This transaction should not be linked to the next" 162 | Assets:Checking -100.00 USD 163 | Liabilities:Clearing:Visa 164 | 165 | 2000-03-17 * "This transaction should not be linked to the previous" 166 | Liabilities:Visa 100.00 USD 167 | Liabilities:Clearing:Visa 168 | """ 169 | self.assertFalse(errors) 170 | self.assertEqualEntries(""" 171 | 2000-01-01 open Assets:Savings 172 | 2000-01-01 open Assets:Checking 173 | 2000-01-01 open Assets:Clearing:Checking 174 | 2000-01-01 open Liabilities:Visa 175 | 2000-01-01 open Liabilities:Clearing:Visa 176 | 177 | 2000-02-01 * "This transaction should be linked to the next" #CLEARED ^cleared-1 178 | Assets:Savings -500.00 USD 179 | Assets:Clearing:Checking 500.00 USD 180 | 181 | 2000-02-09 * "This transaction should be linked to the previous" #CLEARED ^cleared-1 182 | Assets:Checking 500.00 USD 183 | Assets:Clearing:Checking -500.00 USD 184 | 185 | 2000-03-07 ! "Visa" "This transaction should not be linked to the next" #PENDING 186 | Assets:Checking -100.00 USD 187 | Liabilities:Clearing:Visa 100.00 USD 188 | 189 | 2000-03-17 ! "This transaction should not be linked to the previous" #PENDING 190 | Liabilities:Visa 100.00 USD 191 | Liabilities:Clearing:Visa -100.00 USD 192 | """, entries) 193 | 194 | @loader.load_doc(expect_errors=True) 195 | def test_options_parser_1(self, entries, errors, _): 196 | """ 197 | plugin "beansoup.plugins.clear_transactions" " 198 | --flag_pending 199 | Assets:Clearing:Checking;Assets:Checking 200 | Liabilities:Clearing:Visa,Liabilities:Visa" 201 | 202 | 2000-01-01 open Assets:Savings 203 | 2000-01-01 open Assets:Checking 204 | 2000-01-01 open Assets:Clearing:Checking 205 | 2000-01-01 open Liabilities:Visa 206 | 2000-01-01 open Liabilities:Clearing:Visa 207 | """ 208 | self.assertEqual(len(errors), 1) 209 | self.assertEqual(type(errors[0]), config.ParseError) 210 | self.assertEqualEntries(""" 211 | 2000-01-01 open Assets:Savings 212 | 2000-01-01 open Assets:Checking 213 | 2000-01-01 open Assets:Clearing:Checking 214 | 2000-01-01 open Liabilities:Visa 215 | 2000-01-01 open Liabilities:Clearing:Visa 216 | """, entries) 217 | 218 | @loader.load_doc(expect_errors=True) 219 | def test_options_parser_2(self, entries, errors, _): 220 | """ 221 | plugin "beansoup.plugins.clear_transactions" " 222 | --flag_pending 223 | Assets:Clearing:Checking,Assets:Checking 224 | Assets:Clearing:Visa,Liabilities:Visa" 225 | 226 | 2000-01-01 open Assets:Savings 227 | 2000-01-01 open Assets:Checking 228 | 2000-01-01 open Assets:Clearing:Checking 229 | 2000-01-01 open Liabilities:Visa 230 | 2000-01-01 open Liabilities:Clearing:Visa 231 | """ 232 | self.assertEqual(len(errors), 1) 233 | self.assertEqual(type(errors[0]), config.ParseError) 234 | self.assertEqualEntries(""" 235 | 2000-01-01 open Assets:Savings 236 | 2000-01-01 open Assets:Checking 237 | 2000-01-01 open Assets:Clearing:Checking 238 | 2000-01-01 open Liabilities:Visa 239 | 2000-01-01 open Liabilities:Clearing:Visa 240 | """, entries) 241 | -------------------------------------------------------------------------------- /tests/plugins/test_config.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.plugins.config module.""" 2 | 3 | import pytest 4 | import re 5 | 6 | from beansoup.plugins import config 7 | 8 | 9 | def test_error(): 10 | parser = config.ArgumentParser() 11 | invalid_option = '--not-an-option' 12 | 13 | with pytest.raises(config.ParseError) as excinfo: 14 | args = parser.parse_args([invalid_option]) 15 | assert 'unrecognized argument' in excinfo.value.message 16 | assert invalid_option in excinfo.value.message 17 | 18 | 19 | def test_exit(): 20 | parser = config.ArgumentParser() 21 | 22 | with pytest.raises(config.ParseError) as excinfo: 23 | args = parser.parse_args(['--help']) 24 | assert 'usage' in excinfo.value.message 25 | 26 | 27 | def test_re_type(): 28 | parser = config.ArgumentParser() 29 | parser.add_argument( 30 | '--test_re', metavar='REGEX', default=None, type=config.re_type) 31 | 32 | args = parser.parse_args('--test_re \\d'.split()) 33 | 34 | assert args.test_re 35 | assert args.test_re.match('3') 36 | assert not args.test_re.match('X') 37 | 38 | with pytest.raises(config.ParseError) as excinfo: 39 | args = parser.parse_args('--test_re [a-'.split()) 40 | assert 'invalid regular expression' in excinfo.value.message 41 | -------------------------------------------------------------------------------- /tests/plugins/test_deposit_in_transit.py: -------------------------------------------------------------------------------- 1 | """Unit tests for deposit_in_transit plugin.""" 2 | 3 | from beancount import loader 4 | from beancount.parser import cmptest 5 | 6 | from beansoup.plugins import config 7 | from beansoup.plugins import deposit_in_transit 8 | 9 | 10 | class TestDepositInTransit(cmptest.TestCase): 11 | 12 | @loader.load_doc() 13 | def test_plugin(self, entries, errors, _): 14 | """ 15 | plugin "beansoup.plugins.deposit_in_transit" " 16 | --auto_open --same_day_merge --flag_pending --link_prefix=deposited" 17 | 18 | 2000-01-01 open Assets:Checking 19 | 2000-01-01 open Assets:Savings 20 | 2000-01-01 open Liabilities:Visa 21 | 22 | 2000-02-01 * "This transaction should be merged with the next" 23 | Assets:Savings -500.00 USD 24 | Assets:DIT:Checking 25 | 26 | 2000-02-01 * "This transaction should be merged with the previous" 27 | Assets:Checking 500.00 USD 28 | Assets:DIT:Savings 29 | 30 | 2000-02-07 * "Visa" "This transaction should be linked to the next" 31 | Assets:Checking -100.00 USD 32 | Liabilities:DIT:Visa 33 | 34 | 2000-02-09 * "This transaction should be linked to the previous" 35 | Liabilities:Visa 100.00 USD 36 | Assets:DIT:Checking 37 | 38 | 2000-03-08 * "This transaction should be tagged pending" 39 | Assets:Checking -150.00 USD 40 | Liabilities:DIT:Visa 41 | 42 | 2000-03-01 ! "The Bank" "Cannot merge because of different flags" 43 | Assets:Savings -300.00 USD 44 | Assets:DIT:Checking 45 | 46 | 2000-03-01 * "The Bank" "Cannot merge because of different flags" 47 | Assets:Checking 300.00 USD 48 | Assets:DIT:Savings 49 | 50 | 2000-04-01 * "Checking" "Cannot merge because of price conversion" 51 | Assets:Savings -440.00 USD 52 | Assets:DIT:Checking 400.00 CAD @ 1.1000 USD 53 | 54 | 2000-04-01 * "Savings" "Cannot merge because of price conversion" 55 | Assets:Checking 400.00 CAD 56 | Assets:DIT:Savings 57 | 58 | 2000-05-01 * "This narration should appear after the next" 59 | Assets:Checking 123.00 USD 60 | Assets:DIT:Savings 61 | 62 | 2000-05-01 * "This narration should appear before the previous" 63 | Assets:Savings -123.00 USD 64 | Assets:DIT:Checking 65 | """ 66 | self.assertFalse(errors) 67 | self.assertEqualEntries(""" 68 | 2000-01-01 open Assets:Checking 69 | 2000-01-01 open Assets:Savings 70 | 2000-01-01 open Liabilities:Visa 71 | 2000-02-01 open Assets:DIT:Checking 72 | 2000-02-01 open Assets:DIT:Savings 73 | 74 | 2000-02-01 * "This transaction should be merged with the next / This transaction should be merged with the previous" #DEPOSITED 75 | Assets:Savings -500.00 USD 76 | Assets:Checking 500.00 USD 77 | 78 | 2000-02-07 open Liabilities:DIT:Visa 79 | 80 | 2000-02-07 * "Visa" "This transaction should be linked to the next" #DEPOSITED ^deposited-1 81 | Assets:Checking -100.00 USD 82 | Liabilities:DIT:Visa 100.00 USD 83 | 84 | 2000-02-09 * "Visa" "This transaction should be linked to the next / This transaction should be linked to the previous" #DEPOSITED ^deposited-1 85 | Liabilities:DIT:Visa -100.00 USD 86 | Assets:DIT:Checking 100.00 USD 87 | 88 | 2000-02-09 * "This transaction should be linked to the previous" #DEPOSITED ^deposited-1 89 | Liabilities:Visa 100.00 USD 90 | Assets:DIT:Checking -100.00 USD 91 | 92 | 2000-03-01 ! "The Bank" "Cannot merge because of different flags" #DEPOSITED ^deposited-2 93 | Assets:Savings -300.00 USD 94 | Assets:DIT:Checking 300.00 USD 95 | 96 | 2000-03-01 * "The Bank" "Cannot merge because of different flags" #DEPOSITED ^deposited-2 97 | Assets:DIT:Checking -300.00 USD 98 | Assets:DIT:Savings 300.00 USD 99 | 100 | 2000-03-01 * "The Bank" "Cannot merge because of different flags" #DEPOSITED ^deposited-2 101 | Assets:Checking 300.00 USD 102 | Assets:DIT:Savings -300.00 USD 103 | 104 | 2000-03-08 ! "This transaction should be tagged pending" #IN-TRANSIT 105 | Assets:Checking -150.00 USD 106 | Liabilities:DIT:Visa 150.00 USD 107 | 108 | 2000-04-01 * "Checking" "Cannot merge because of price conversion" #DEPOSITED ^deposited-3 109 | Assets:Savings -440.00 USD 110 | Assets:DIT:Checking 400.00 CAD @ 1.1000 USD 111 | 112 | 2000-04-01 * "Checking / Savings" "Cannot merge because of price conversion" #DEPOSITED ^deposited-3 113 | Assets:DIT:Checking -400.00 CAD 114 | Assets:DIT:Savings 400.00 CAD 115 | 116 | 2000-04-01 * "Savings" "Cannot merge because of price conversion" #DEPOSITED ^deposited-3 117 | Assets:Checking 400.00 CAD 118 | Assets:DIT:Savings -400.00 CAD 119 | 120 | 2000-05-01 * "This narration should appear before the previous / This narration should appear after the next" #DEPOSITED 121 | Assets:Savings -123.00 USD 122 | Assets:Checking 123.00 USD 123 | """, entries) 124 | 125 | @loader.load_doc(expect_errors=True) 126 | def test_options_parser(self, entries, errors, _): 127 | """ 128 | plugin "beansoup.plugins.deposit_in_transit" " 129 | --not_an_option --same_day_merge --flag_pending --link_prefix=deposited" 130 | 131 | 2000-01-01 open Assets:Checking 132 | 2000-01-01 open Assets:Savings 133 | 134 | 2000-02-07 * "Just an entry" 135 | Assets:Savings -100.00 USD 136 | Assets:Checking 137 | """ 138 | # There will be warnings about the DIT unknown accounts 139 | self.assertEqual(len(errors), 1) 140 | self.assertEqual(type(errors[0]), config.ParseError) 141 | self.assertEqualEntries(""" 142 | 2000-01-01 open Assets:Checking 143 | 2000-01-01 open Assets:Savings 144 | 145 | 2000-02-07 * "Just an entry" 146 | Assets:Savings -100.00 USD 147 | Assets:Checking 100.00 USD 148 | """, entries) 149 | 150 | @loader.load_doc(expect_errors=True) 151 | def test_disable_option(self, entries, errors, _): 152 | """ 153 | plugin "beansoup.plugins.deposit_in_transit" " 154 | --skip_re=setup.py --auto_open --same_day_merge --flag_pending" 155 | 156 | 2000-01-01 open Assets:Checking 157 | 2000-01-01 open Assets:Savings 158 | 2000-01-01 open Liabilities:Visa 159 | 160 | 2000-02-01 * "This transaction should be merged with the next" 161 | Assets:Savings -500.00 USD 162 | Assets:DIT:Checking 163 | 164 | 2000-02-01 * "This transaction should be merged with the previous" 165 | Assets:Checking 500.00 USD 166 | Assets:DIT:Savings 167 | """ 168 | # There will be warnings about the DIT unknown accounts 169 | self.assertEqual(len(errors), 2) 170 | self.assertEqualEntries(""" 171 | 2000-01-01 open Assets:Checking 172 | 2000-01-01 open Assets:Savings 173 | 2000-01-01 open Liabilities:Visa 174 | 175 | 2000-02-01 * "This transaction should be merged with the next" 176 | Assets:Savings -500.00 USD 177 | Assets:DIT:Checking 500.00 USD 178 | 179 | 2000-02-01 * "This transaction should be merged with the previous" 180 | Assets:Checking 500.00 USD 181 | Assets:DIT:Savings -500.00 USD 182 | """, entries) 183 | 184 | @loader.load_doc(expect_errors=True) 185 | def test_multiple_dits_entry(self, entries, errors, _): 186 | """ 187 | plugin "beansoup.plugins.deposit_in_transit" " 188 | --auto_open --same_day_merge --flag_pending --link_prefix=deposited" 189 | 190 | 2000-01-01 open Assets:Checking 191 | 2000-01-01 open Assets:Savings 192 | 2000-01-01 open Liabilities:Visa 193 | 194 | 2000-02-07 * "Visa" "This transaction should be linked to the next" 195 | Assets:Checking -200.00 USD 196 | Liabilities:DIT:Visa 100.00 USD 197 | Assets:DIT:Savings 198 | 199 | 2000-02-09 * "This transaction should be linked to the previous" 200 | Liabilities:Visa 100.00 USD 201 | Assets:DIT:Checking 202 | """ 203 | # There will be warnings about the DIT unknown accounts 204 | self.assertEqual(len(errors), 1) 205 | self.assertEqual(type(errors[0]), deposit_in_transit.DITError) 206 | self.assertEqualEntries(""" 207 | 2000-02-07 * "Visa" "This transaction should be linked to the next" 208 | Assets:Checking -200.00 USD 209 | Liabilities:DIT:Visa 100.00 USD 210 | Assets:DIT:Savings 100.00 USD 211 | """, [errors[0].entry]) 212 | self.assertEqualEntries(""" 213 | 2000-01-01 open Assets:Checking 214 | 2000-01-01 open Assets:Savings 215 | 2000-01-01 open Liabilities:Visa 216 | 2000-02-07 open Assets:DIT:Savings 217 | 2000-02-07 open Liabilities:DIT:Visa 218 | 219 | 2000-02-07 * "Visa" "This transaction should be linked to the next" #DEPOSITED ^deposited-1 220 | Assets:Checking -200.00 USD 221 | Liabilities:DIT:Visa 100.00 USD 222 | Assets:DIT:Savings 100.00 USD 223 | 224 | 2000-02-09 open Assets:DIT:Checking 225 | 226 | 2000-02-09 * "Visa" "This transaction should be linked to the next / This transaction should be linked to the previous" #DEPOSITED ^deposited-1 227 | Liabilities:DIT:Visa -100.00 USD 228 | Assets:DIT:Checking 100.00 USD 229 | 230 | 2000-02-09 * "This transaction should be linked to the previous" #DEPOSITED ^deposited-1 231 | Liabilities:Visa 100.00 USD 232 | Assets:DIT:Checking -100.00 USD 233 | """, entries) 234 | -------------------------------------------------------------------------------- /tests/test_transactions.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.transactions module.""" 2 | 3 | from os import path 4 | 5 | from beancount import loader 6 | from beancount.parser import cmptest 7 | 8 | from beansoup import transactions 9 | 10 | 11 | class TestTransactionCompleter(cmptest.TestCase): 12 | 13 | def setUp(self): 14 | filename = path.join(path.dirname(__file__), 'example.beancount') 15 | self.existing_entries, _, _ = loader.load_file(filename) 16 | 17 | @loader.load_doc(expect_errors=True) 18 | def test_basics_against_asset(self, entries, errors, _): 19 | """ 20 | 2016-04-04 * "BANK FEES" "Monthly bank fee" 21 | Assets:US:BofA:Checking -4.00 USD 22 | 23 | 2016-04-05 * "RiverBank Properties" "Paying the rent" 24 | Assets:US:BofA:Checking -2400.00 USD 25 | 26 | 2016-04-08 * "EDISON POWER" "" 27 | Assets:US:BofA:Checking -65.00 USD 28 | 29 | 2016-04-20 * "Verizon Wireless" "" 30 | Assets:US:BofA:Checking -72.02 USD 31 | 32 | 2016-04-23 * "Wine-Tarner Cable" "" 33 | Assets:US:BofA:Checking -79.90 USD 34 | 35 | 2016-04-26 balance Assets:US:BofA:Checking 3200.59 USD 36 | 37 | 2016-05-04 * "BANK FEES" "Monthly bank fee" 38 | Assets:US:BofA:Checking -4.00 USD 39 | 40 | 2016-05-06 * "Transfering accumulated savings to other account" 41 | Assets:US:BofA:Checking -4000 USD 42 | """ 43 | self.complete_basics('Assets:US:BofA:Checking', entries, """ 44 | 2016-04-04 * "BANK FEES" "Monthly bank fee" 45 | Assets:US:BofA:Checking -4.00 USD 46 | Expenses:Financial:Fees 4.00 USD 47 | 48 | 2016-04-05 * "RiverBank Properties" "Paying the rent" 49 | Assets:US:BofA:Checking -2400.00 USD 50 | Expenses:Home:Rent 2400.00 USD 51 | 52 | 2016-04-08 * "EDISON POWER" "" 53 | Assets:US:BofA:Checking -65.00 USD 54 | Expenses:Home:Electricity 65.00 USD 55 | 56 | 2016-04-20 * "Verizon Wireless" "" 57 | Assets:US:BofA:Checking -72.02 USD 58 | Expenses:Home:Phone 72.02 USD 59 | 60 | 2016-04-23 * "Wine-Tarner Cable" "" 61 | Assets:US:BofA:Checking -79.90 USD 62 | Expenses:Home:Internet 79.90 USD 63 | 64 | 2016-05-04 * "BANK FEES" "Monthly bank fee" 65 | Assets:US:BofA:Checking -4.00 USD 66 | Expenses:Financial:Fees 4.00 USD 67 | 68 | 2016-05-06 * "Transfering accumulated savings to other account" 69 | Assets:US:BofA:Checking -4000 USD 70 | Assets:US:ETrade:Cash 4000 USD 71 | """) 72 | 73 | @loader.load_doc(expect_errors=True) 74 | def test_basics_against_liability(self, entries, errors, _): 75 | """ 76 | 2016-05-03 * "Metro Transport Authority" "Tram tickets" 77 | Liabilities:US:Chase:Slate -120.00 USD 78 | 79 | 2016-05-04 * "Corner Deli" "Buying groceries" 80 | Liabilities:US:Chase:Slate -71.88 USD 81 | 82 | 2016-05-05 * "Kin Soy" "Eating out with Bill" 83 | Liabilities:US:Chase:Slate -34.35 USD 84 | 85 | 2016-05-07 * "China Garden" "Eating out with Joe" 86 | Liabilities:US:Chase:Slate -51.64 USD 87 | 88 | 2016-05-09 * "Kin Soy" "Eating out with Joe" 89 | Liabilities:US:Chase:Slate -29.27 USD 90 | """ 91 | self.complete_basics('Liabilities:US:Chase:Slate', entries, """ 92 | 2016-05-03 * "Metro Transport Authority" "Tram tickets" 93 | Liabilities:US:Chase:Slate -120.00 USD 94 | Expenses:Transport:Tram 120.00 USD 95 | 96 | 2016-05-04 * "Corner Deli" "Buying groceries" 97 | Liabilities:US:Chase:Slate -71.88 USD 98 | Expenses:Food:Groceries 71.88 USD 99 | 100 | 2016-05-05 * "Kin Soy" "Eating out with Bill" 101 | Liabilities:US:Chase:Slate -34.35 USD 102 | Expenses:Food:Restaurant 34.35 USD 103 | 104 | 2016-05-07 * "China Garden" "Eating out with Joe" 105 | Liabilities:US:Chase:Slate -51.64 USD 106 | Expenses:Food:Restaurant 51.64 USD 107 | 108 | 2016-05-09 * "Kin Soy" "Eating out with Joe" 109 | Liabilities:US:Chase:Slate -29.27 USD 110 | Expenses:Food:Restaurant 29.27 USD 111 | """) 112 | 113 | def complete_basics(self, account, entries, expected_entries): 114 | completer = transactions.TransactionCompleter( 115 | self.existing_entries, account, interpolated=True) 116 | completed_entries = completer(entries) 117 | self.assertEqualEntries(expected_entries, completed_entries) 118 | -------------------------------------------------------------------------------- /tests/utils/test_dates.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.utils.dates module.""" 2 | 3 | import datetime 4 | from dateutil import parser 5 | import pytest 6 | 7 | from beansoup.utils import dates 8 | 9 | 10 | month_number_data = [ 11 | ('january', 1), 12 | ('FEBRUARY', 2), 13 | ('March', 3), 14 | ('aPRIL', 4), 15 | ('mAy', 5), 16 | ('jun', 6), 17 | ('Jul', 7), 18 | ('AUG', 8), 19 | ('09', 9), 20 | ('10', 10), 21 | ('nov', 11), 22 | ('12', 12), 23 | (None, None), 24 | (3, None), 25 | ([4], None), 26 | ('', None), 27 | ('janua', None), 28 | ] 29 | 30 | @pytest.mark.parametrize('s,expected', month_number_data) 31 | def test_month_number(s, expected): 32 | assert dates.month_number(s) == expected 33 | 34 | 35 | add_biz_days_data = [ 36 | # 2016-01-05 falls on a Tuesday 37 | ('2016-01-05', 0, '2016-01-05'), 38 | ('2016-01-05', 1, '2016-01-06'), 39 | ('2016-01-05', 2, '2016-01-07'), 40 | ('2016-01-05', 3, '2016-01-08'), 41 | ('2016-01-05', 4, '2016-01-11'), 42 | ('2016-01-05', 5, '2016-01-12'), 43 | ('2016-01-05', 6, '2016-01-13'), 44 | ('2016-01-05', 7, '2016-01-14'), 45 | ('2016-01-05', 8, '2016-01-15'), 46 | ('2016-01-05', 9, '2016-01-18'), 47 | ('2016-01-05', 10, '2016-01-19'), 48 | # 2016-02-06 falls on a Saturday 49 | ('2016-02-06', 0, '2016-02-08'), 50 | ('2016-02-06', 1, '2016-02-09'), 51 | ('2016-02-06', 2, '2016-02-10'), 52 | ('2016-02-06', 3, '2016-02-11'), 53 | ('2016-02-06', 4, '2016-02-12'), 54 | ('2016-02-06', 5, '2016-02-15'), 55 | ('2016-02-06', 6, '2016-02-16'), 56 | ('2016-02-06', 7, '2016-02-17'), 57 | ('2016-02-06', 8, '2016-02-18'), 58 | ('2016-02-06', 9, '2016-02-19'), 59 | ('2016-02-06', 10, '2016-02-22'), 60 | # 2016-02-07 falls on a Sunday 61 | ('2016-02-07', 0, '2016-02-08'), 62 | ('2016-02-07', 1, '2016-02-09'), 63 | ('2016-02-07', 2, '2016-02-10'), 64 | ('2016-02-07', 3, '2016-02-11'), 65 | ('2016-02-07', 4, '2016-02-12'), 66 | ('2016-02-07', 5, '2016-02-15'), 67 | ('2016-02-07', 6, '2016-02-16'), 68 | ('2016-02-07', 7, '2016-02-17'), 69 | ('2016-02-07', 8, '2016-02-18'), 70 | ('2016-02-07', 9, '2016-02-19'), 71 | ('2016-02-07', 10, '2016-02-22'), 72 | ] 73 | 74 | @pytest.mark.parametrize('date_str,days,expected_str', add_biz_days_data) 75 | def test_add_biz_days(date_str, days, expected_str): 76 | date = parser.parse(date_str).date() 77 | expected = parser.parse(expected_str).date() 78 | assert dates.add_biz_days(date, days) == expected 79 | 80 | 81 | def test_add_biz_days_neg(): 82 | with pytest.raises(AssertionError): 83 | dates.add_biz_days(datetime.date.today(), -1) 84 | -------------------------------------------------------------------------------- /tests/utils/test_links.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.utils.links module.""" 2 | 3 | import itertools 4 | import pytest 5 | import re 6 | 7 | from beansoup.utils import links 8 | 9 | 10 | prefix_data = [ 11 | ('pre', 0, ['pre-0', 'pre-1', 'pre-2', 'pre-3']), 12 | ('prefix', 1, ['prefix-1', 'prefix-2', 'prefix-3']), 13 | ] 14 | 15 | @pytest.mark.parametrize('prefix,start,expected', prefix_data) 16 | def test_prefix_count(prefix, start, expected): 17 | assert list(itertools.islice(links.count(prefix, start), len(expected))) == expected 18 | 19 | 20 | def test_uuid_count(): 21 | uuid_re = re.compile( 22 | r'^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$', 23 | flags=re.IGNORECASE) 24 | for link in itertools.islice(links.count(), 10): 25 | assert uuid_re.match(link) 26 | -------------------------------------------------------------------------------- /tests/utils/test_periods.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.utils.periods module.""" 2 | 3 | import datetime 4 | from dateutil import parser 5 | import itertools 6 | import pytest 7 | 8 | from beansoup.utils import periods 9 | 10 | 11 | bounds_data = [ 12 | ('2016-01-01', 1, '2016-01-01', '2016-01-31'), 13 | ('2016-01-02', 1, '2016-01-01', '2016-01-31'), 14 | ('2016-01-31', 1, '2016-01-01', '2016-01-31'), 15 | ('2016-01-01', 15, '2015-12-15', '2016-01-14'), 16 | ('2016-01-14', 15, '2015-12-15', '2016-01-14'), 17 | ('2016-01-15', 15, '2016-01-15', '2016-02-14'), 18 | ('2016-01-16', 15, '2016-01-15', '2016-02-14'), 19 | ('2015-12-14', 15, '2015-11-15', '2015-12-14'), 20 | ('2015-12-15', 15, '2015-12-15', '2016-01-14'), 21 | ('2015-12-16', 15, '2015-12-15', '2016-01-14'), 22 | ] 23 | 24 | @pytest.mark.parametrize('date_str,first_day,lo_expected_str,hi_expected_str', 25 | bounds_data) 26 | def test_bounds(date_str, first_day, lo_expected_str, hi_expected_str): 27 | date = parser.parse(date_str).date() 28 | lo_expected = parser.parse(lo_expected_str).date() 29 | hi_expected = parser.parse(hi_expected_str).date() 30 | assert periods.enclose_date(date, first_day) == (lo_expected, hi_expected) 31 | assert periods.greatest_start(date, first_day) == lo_expected 32 | assert periods.lowest_end(date, first_day) == hi_expected 33 | 34 | 35 | @pytest.mark.parametrize('first_day', [-1, 0, 29]) 36 | def test_bounds_exc(first_day): 37 | today = datetime.date.today() 38 | with pytest.raises(AssertionError): 39 | periods.enclose_date(today, first_day) 40 | with pytest.raises(AssertionError): 41 | periods.greatest_start(today, first_day) 42 | with pytest.raises(AssertionError): 43 | periods.lowest_end(today, first_day) 44 | 45 | 46 | next_data = [ 47 | ('2016-01-01', '2016-02-01'), 48 | ('2016-01-29', '2016-02-29'), 49 | ('2016-01-31', '2016-02-29'), 50 | ('2016-02-28', '2016-03-28'), 51 | ('2016-02-29', '2016-03-29'), 52 | ('2016-12-15', '2017-01-15'), 53 | ] 54 | 55 | @pytest.mark.parametrize('date_str,expected_str', next_data) 56 | def test_next(date_str, expected_str): 57 | date = parser.parse(date_str).date() 58 | expected = parser.parse(expected_str).date() 59 | assert periods.next(date) == expected 60 | 61 | 62 | prev_data = [ 63 | ('2016-01-01', '2015-12-01'), 64 | ('2016-01-31', '2015-12-31'), 65 | ('2016-02-29', '2016-01-29'), 66 | ('2016-03-29', '2016-02-29'), 67 | ('2016-03-31', '2016-02-29') 68 | ] 69 | 70 | @pytest.mark.parametrize('date_str,expected_str', prev_data) 71 | def test_prev(date_str,expected_str): 72 | date = parser.parse(date_str).date() 73 | expected = parser.parse(expected_str).date() 74 | assert periods.prev(date) == expected 75 | 76 | 77 | count_data = [ 78 | ('2015-01-01', False, ['2015-01-01', '2015-02-01', '2015-03-01', '2015-04-01']), 79 | ('2015-01-15', False, ['2015-01-15', '2015-02-15', '2015-03-15', '2015-04-15']), 80 | ('2015-01-28', False, ['2015-01-28', '2015-02-28', '2015-03-28', '2015-04-28']), 81 | ('2015-01-29', False, ['2015-01-29', '2015-02-28', '2015-03-29', '2015-04-29']), 82 | ('2015-01-31', False, ['2015-01-31', '2015-02-28', '2015-03-31', '2015-04-30']), 83 | ('2015-11-30', False, ['2015-11-30', '2015-12-30', '2016-01-30', '2016-02-29']), 84 | ('2016-03-01', True, ['2016-03-01', '2016-02-01', '2016-01-01', '2015-12-01']), 85 | ('2016-03-15', True, ['2016-03-15', '2016-02-15', '2016-01-15', '2015-12-15']), 86 | ('2016-03-28', True, ['2016-03-28', '2016-02-28', '2016-01-28', '2015-12-28']), 87 | ('2016-03-29', True, ['2016-03-29', '2016-02-29', '2016-01-29', '2015-12-29']), 88 | ('2016-03-30', True, ['2016-03-30', '2016-02-29', '2016-01-30', '2015-12-30']), 89 | ('2016-03-31', True, ['2016-03-31', '2016-02-29', '2016-01-31', '2015-12-31']), 90 | ] 91 | 92 | @pytest.mark.parametrize('date_str,reverse,expected_strs', count_data) 93 | def test_count(date_str, reverse, expected_strs): 94 | start_date = parser.parse(date_str).date() 95 | for date, expected_str in zip(periods.count(start_date, reverse=reverse), 96 | expected_strs): 97 | expected = parser.parse(expected_str).date() 98 | assert date == expected 99 | -------------------------------------------------------------------------------- /tests/utils/test_testing.py: -------------------------------------------------------------------------------- 1 | """Unit tests for beansoup.utils.testing module.""" 2 | 3 | import datetime 4 | from os import path 5 | import tempfile 6 | import unittest 7 | 8 | from beancount import loader 9 | from beancount.ingest import cache 10 | from beancount.parser import cmptest 11 | 12 | from beansoup.utils import testing 13 | 14 | 15 | class TestDocfileDecorator(unittest.TestCase): 16 | 17 | @testing.docfile(mode='w', suffix='.txt') 18 | def test_decorator(self, filename): 19 | """25341344-AFEE-2CB4-C88B-72EEAFAD5ACA""" 20 | with open(filename) as f: 21 | content = f.read() 22 | assert content == "25341344-AFEE-2CB4-C88B-72EEAFAD5ACA" 23 | _, ext = path.splitext(filename) 24 | assert ext == '.txt' 25 | 26 | 27 | class TestConstImporter(cmptest.TestCase): 28 | 29 | @loader.load_doc(expect_errors=True) 30 | def test_importer(self, entries, errors, _): 31 | """ 32 | 2014-01-01 open Assets:US:BofA:Checking USD 33 | 34 | 2014-05-19 * "Verizon Wireless" "" 35 | Assets:US:BofA:Checking -44.34 USD 36 | 37 | 2014-05-23 * "Wine-Tarner Cable" "" 38 | Assets:US:BofA:Checking -80.17 USD 39 | 40 | 2014-06-04 * "BANK FEES" "Monthly bank fee" 41 | Assets:US:BofA:Checking -4.00 USD 42 | 43 | 2014-06-04 * "RiverBank Properties" "Paying the rent" 44 | Assets:US:BofA:Checking -2400.00 USD 45 | 46 | 2014-06-08 * "EDISON POWER" "" 47 | Assets:US:BofA:Checking -65.00 USD 48 | """ 49 | account = 'Assets:US:BofA:Checking' 50 | file = cache.get_file(path.join(tempfile.gettempdir(), 'test')) 51 | importer = testing.ConstImporter(entries, account) 52 | 53 | assert importer.file_account(file) == account 54 | assert importer.file_name(file) == None 55 | assert importer.identify(file) 56 | assert importer.file_date(file) == datetime.date(2014, 6, 8) 57 | 58 | extracted_entries = importer.extract(file) 59 | self.assertEqualEntries(extracted_entries, entries) 60 | --------------------------------------------------------------------------------