├── .gitignore ├── CHANGES.txt ├── DEV_NOTES.markdown ├── DEV_todo.markdown ├── EXAMPLES.markdown ├── MANIFEST.in ├── README.markdown ├── apiary.apib ├── docs ├── Makefile ├── api.rst ├── conf.py ├── diagrams │ ├── system.graffle │ └── system.png ├── index.rst └── make.bat ├── mm ├── __init__.py ├── color_converter.py ├── composer_base.py ├── composer_xls.py ├── config_base.py ├── contrib │ ├── __init__.py │ ├── django │ │ ├── __init__.py │ │ ├── data_model.py │ │ └── grid.py │ └── prettytable │ │ ├── __init__.py │ │ └── composers.py ├── document_base.py ├── document_writers.py ├── grid_base.py ├── lib │ ├── __init__.py │ ├── font_data │ │ ├── __init__.py │ │ ├── core.py │ │ ├── decorators.py │ │ ├── font_data_ms_fonts.bin │ │ └── tests.py │ ├── xldate │ │ ├── __init__.py │ │ ├── convert.py │ │ └── tests.py │ └── xlwt_0_7_2 │ │ ├── BIFFRecords.py │ │ ├── Bitmap.py │ │ ├── Cell.py │ │ ├── Column.py │ │ ├── CompoundDoc.py │ │ ├── ExcelFormula.py │ │ ├── ExcelFormulaLexer.py │ │ ├── ExcelFormulaParser.py │ │ ├── ExcelMagic.py │ │ ├── Formatting.py │ │ ├── Row.py │ │ ├── Style.py │ │ ├── UnicodeUtils.py │ │ ├── Utils.py │ │ ├── Workbook.py │ │ ├── Worksheet.py │ │ ├── __init__.py │ │ ├── antlr.py │ │ └── licences.py ├── model_base.py ├── serializer_base.py └── style_base.py ├── requirements.txt ├── requirements_extra.txt ├── requirements_unittest.txt ├── scripts └── textwidths.py ├── setup.py └── tests ├── api_tests.py ├── author.bmp ├── basic_tests.py ├── customize_tests.py ├── django_tests.py ├── dummy_django_project ├── __init__.py ├── manage.py ├── marmir_test │ ├── __init__.py │ ├── fixtures │ │ └── initial_data.json │ └── models.py └── settings.py ├── dup_type_fix_tests.py ├── formula_test.py ├── gdata_tests.py ├── generated_files └── .gitignore ├── model_base_tests.py ├── more_data_tests.py ├── multi_sheets.py ├── pretty_table_tests.py ├── psycopg2_tests.py └── xlrd_helper.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.xls 3 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | v0.1, 2013-01-20 -- Initial Release 2 | v0.1.1, 2013-01-25 -- Bug fixes 3 | 4 | * style compression .bmp path in tests, style_compression, check if date exists, included lib package in setup thx https://github.com/mbraak 5 | * added order argument, pointed out defect with duplicate column types https://github.com/Alexandre-D 6 | * fixes for max column widths https://github.com/python-excel/xlwt/issues/10 7 | * sepreated field titles out 8 | v0.1.2, 9 | v0.1.3, 2013-04-21 10 | 11 | * Child (multi) spreadsheet support 12 | * Support for dict-like objects 13 | * added ability to explicitly state column types 14 | * fixed logs statements by initializing basic configuration 15 | 16 | 17 | -------------------------------------------------------------------------------- /DEV_NOTES.markdown: -------------------------------------------------------------------------------- 1 | 2 | The basic top level elements are broken up around a Document instance. It is a two step process: 3 | 4 | 1. Constructing a *Document* with a DataModel and serialzed data via Serializers. 5 | 1. Writing of the document. 6 | 7 | Pseudo code example: 8 | 9 | # 1. construction 10 | data = dict | list 11 | optional: data_model = DataModel(data) 12 | optional: serializer_class = Serializer(data_model, data) 13 | doc = Document( data, data_model, serializer_class, config) 14 | 15 | # 2. generation 16 | doc.write() 17 | 18 | 19 | # 1. Document # 20 | 21 | doc.grid - serializer.serialize() # returns GridBase() 22 | 23 | ## GridBase population ## 24 | 25 | grid.populate(data) 26 | 27 | grid must have attirbutes: row_count, col_count, headers 28 | 29 | 30 | grid_data is a 2-D list of data instances of whatever class is found in grid.headers. Every cell has a instance. 31 | 32 | For example if grid.headers are: 33 | 34 | [, , ] 35 | 36 | In each of these classes they have an attribute 'header_title' assigned to the keys in the data: 37 | 38 | .header_title = id 39 | .header_title = msg 40 | .header_title = when 41 | 42 | The data looks like this: 43 | 44 | [{'msg': 'My first Cell', 'when': , 'id': 1}, {'msg': 'My second Cell', 'homepage': (), 'when': datetime.datetime(2013, 1, 1, 11, 12, 50, 194609), 'id': 2}] 45 | 46 | 47 | So, in the example above the first cell, in the first row (after the headers) is set to 1, the second column in that same row msg is 'My first Cell', and so on. 48 | 49 | The 'when' in the first is an actual user defined instance of a DateFieldType. 50 | 51 | 52 | 53 | # 2. Writing Documents # 54 | 55 | DocumentWriter class provides in which method a document is written: to disk, to a string, or (future) to a remote server or custom protocal. 56 | 57 | Composer classes decide the format that the spreadsheet is written. For example, the default, ComposerXLS writes a Excel document (the same format xlwt currently creates). 58 | 59 | Once the data grid is all set, the output is ready to be generated via the composer set to the DocumentWriter. Currently doc.write() or doc.writestr() which handles the data returned by composer.run(): 60 | 61 | composer = Composer(data_model, grid, doc) 62 | composer.run() 63 | 64 | 65 | # ComposerBase # 66 | 67 | comp.iterate_grid() is available to all Composer classes that inheritae from ComposerBase. It does most of the work. 68 | 69 | write_header() iterates over the field_headers and writes there header_titles. In the above example it would write: 'id', 'msg', 'when'. It calls write_cell but always points at the first row. 70 | 71 | It iterates ever row and calls 'start_new_row()' that can be overwritten in the ComposerXLS subclass. 'end_row' is called at the end of every row. 72 | 73 | The on each cell, the method 'write_cell' is called passing arguments: row_id, col_id, and the cell data. 74 | 75 | 76 | 77 | ## ComposerXLS Internals ## 78 | 79 | Wether or not headers are written is dependent on your config you pass in to Document. Otherwise, the default configuration will be used. 80 | 81 | 82 | 83 | 84 | # Configuation Settings # 85 | 86 | 87 | SuppressDebuggerExceptionDialogs=1 88 | 89 | 90 | # Running Tests # 91 | 92 | python -m unittest discover tests '*_tests.py' 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /DEV_todo.markdown: -------------------------------------------------------------------------------- 1 | TODO 2 | ==== 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /EXAMPLES.markdown: -------------------------------------------------------------------------------- 1 | Index of Marmir Examples 2 | ======================== 3 | 4 | ## General 5 | 6 | * [including images](tests/basic_tests.py#L106-L120) 7 | * [explicitly defining column data types](tests/basic_tests.py#L106-L120) 8 | * [Multiple Sheets - child spreadsheets](tests/multi_sheets.py#L13-L40) 9 | * [Custom Date Formats](tests/api_tests.py#L8) 10 | 11 | ## Styles 12 | 13 | * [Removing headers](tests/customize_tests.py#L25-L38) 14 | * [Removing all styles](tests/customize_tests.py#L41-L54) 15 | * [Setting font face on custom row](tests/customize_tests.py#L57-L69) 16 | 17 | ## Contributions 18 | 19 | * [Django Extension](tests/django_tests.py#L32-L40) 20 | * [Writing to Google Data gdata](tests/gdata_tests.py#L21-L45) 21 | * [Working with Psycopg cursor results sets](tests/psycopg2_tests.py#L34-L43) 22 | * [Output Tables as Text](tests/pretty_table_tests.py#L25-L49) 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include mm/lib/font_data/font_data_ms_fonts.bin -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | Marmir is powerful and fun 2 | ========================== 3 | 4 | [Marmir](http://brianray.github.com/mm/) takes Python data structures and turns them into spreadsheets. 5 | 6 | It is xlwt and google spreadsheets on steroids. 7 | 8 | It also supports: input from Django models; taking Psycopg cursors; writing out ascii tables (like psql does); and will soon support HTML tables as output. The goal is to make it easy to generate many types of useful table files with the least amount of configuration. 9 | 10 | ## Marmir melts in your mouth 11 | 12 | Installing: 13 | 14 | ``` 15 | $ pip install Marmir 16 | ``` 17 | 18 | Talk about simple to use, wow. Marmir is just this: 19 | 20 | ``` python 21 | 22 | import datetime 23 | import mm 24 | 25 | now = datetime.datetime.now().replace(microsecond=0) 26 | 27 | my_data = [ 28 | { 29 | 'msg': "My first Row", 30 | 'id': 1, 31 | 'when': now, 32 | }, 33 | { 34 | 'msg': "My second Row", 35 | 'id': 2, 36 | 'when': now, 37 | }, 38 | 39 | ] 40 | 41 | mm_doc = mm.Document(my_data) 42 | mm_doc.write("example.xls") 43 | ``` 44 | Same example above as lists (also node the 'order' argument works in the above as well: 45 | ```python 46 | my_headers = ('id', 'msg', 'when') 47 | my_data = ( 48 | (1, "My First Row", now), 49 | (2, "My Second Row", now) 50 | ) 51 | 52 | mm_doc = mm.Document(my_data, order=my_headers) 53 | mm_doc.write("example.xls") 54 | ``` 55 | Or you can get fancier: 56 | 57 | ``` python 58 | import datetime 59 | import mm 60 | 61 | my_data = [ 62 | { 63 | 'msg': "My first Row", 64 | 'id': 1, 65 | 'when': mm.Date(datetime.datetime.now(), "%Y-%m-%dT%H:%M:%S"), 66 | 'homepage': mm.URL("https://github.com/brianray") 67 | }, 68 | { 69 | 'msg': "My second Row", 70 | 'id': 2, 71 | 'when': datetime.datetime.now(), 72 | 'homepage': mm.URL("http://twitter.com/brianray", "Tweet Me") 73 | }, 74 | 75 | ] 76 | 77 | mm_doc = mm.Document(my_data) 78 | mm_doc.write("example.xls") 79 | 80 | # also you can publish to google spreadsheats 81 | mm_doc.write_gdata("Example Spreadsheet", "Username", "Pass") 82 | ``` 83 | 84 | Now for a little Django (https://www.djangoproject.com/) example: 85 | 86 | ``` python 87 | 88 | from yourproject.models import TestModel 89 | from mm.contrib.django.data_model import DjangoDataModel 90 | from mm.contrib.django.grid import DjangoGrid 91 | 92 | django_query_set = TestModel.objects.all() 93 | mm_doc = mm.Document(django_query_set, 94 | data_model_class=DjangoDataModel, 95 | grid_class=DjangoGrid) 96 | mm_doc.write("django_example.xls") 97 | ``` 98 | 99 | There is a lot more. Check out the [Examples](https://github.com/brianray/mm/blob/master/EXAMPLES.markdown). 100 | 101 | ## ... Not in your hand 102 | 103 | So the primary goals are: 104 | 105 | * make XLS spreadsheets better than xlwt 106 | * Create Spreadsheets in Google Docs 107 | * convert python types automagically, date is a date, int is a int, string is a string, ... 108 | * do stuff you expect like make columns wider to fit, wrap in some cases 109 | * make stuff pretty colors and easier to read 110 | * generate directly from Django queries 111 | 112 | Some other stuff: 113 | 114 | * do summaries and break out tables 115 | * add logic and math functions 116 | 117 | ## Marmir is written with love 118 | 119 | Brian Ray [@brianray](http://twitter.com/brianray) wrote Marmir. Brian: is the organizer of ChiPy 120 | (http://chipy.org); one of Chicago's Top Tech 25 (according to Crains Oct 121 | 2011); been professionally developing software for business for 15 years; and 122 | is sick of sub-par Python libraries for creating business spreadsheets. 123 | 124 | The name Marmir name from parts of the names Maura and Miranda, the author's girls. 125 | 126 | 127 | Copyright 128 | --------- 129 | 130 | Copyright (c) 2013 Brian Ray 131 | 132 | -------------------------------------------------------------------------------- /apiary.apib: -------------------------------------------------------------------------------- 1 | FORMAT: 1A 2 | HOST: http://www.google.com 3 | 4 | # marmir 5 | Notes API is a *short texts saving* service similar to its physical paper presence on your table. 6 | 7 | # Group Notes 8 | Notes related resources of the **Notes API** 9 | 10 | ## Notes Collection [/notes] 11 | ### List all Notes [GET] 12 | + Response 200 (application/json) 13 | 14 | [{ 15 | "id": 1, "title": "Jogging in park" 16 | }, { 17 | "id": 2, "title": "Pick-up posters from post-office" 18 | }] 19 | 20 | ### Create a Note [POST] 21 | + Request (application/json) 22 | 23 | { "title": "Buy cheese and bread for breakfast." } 24 | 25 | + Response 201 (application/json) 26 | 27 | { "id": 3, "title": "Buy cheese and bread for breakfast." } 28 | 29 | ## Note [/notes/{id}] 30 | A single Note object with all its details 31 | 32 | + Parameters 33 | + id (required, number, `1`) ... Numeric `id` of the Note to perform action with. Has example value. 34 | 35 | ### Retrieve a Note [GET] 36 | + Response 200 (application/json) 37 | 38 | + Header 39 | 40 | X-My-Header: The Value 41 | 42 | + Body 43 | 44 | { "id": 2, "title": "Pick-up posters from post-office" } 45 | 46 | ### Remove a Note [DELETE] 47 | + Response 204 48 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Marmir.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Marmir.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Marmir" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Marmir" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | 2 | API Documentation 3 | ================= 4 | 5 | 6 | .. automodule:: mm 7 | 8 | .. autoclass:: Document 9 | :members: 10 | 11 | 12 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Marmir documentation build configuration file, created by 4 | # sphinx-quickstart on Sat Nov 26 18:37:26 2011. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | sys.path.insert(0, os.path.abspath('..')) 20 | 21 | # -- General configuration ----------------------------------------------------- 22 | 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | #needs_sphinx = '1.0' 25 | 26 | # Add any Sphinx extension module names here, as strings. They can be extensions 27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 28 | extensions = ['sphinx.ext.autodoc',] 29 | 30 | # Add any paths that contain templates here, relative to this directory. 31 | templates_path = ['_templates'] 32 | 33 | # The suffix of source filenames. 34 | source_suffix = '.rst' 35 | 36 | # The encoding of source files. 37 | #source_encoding = 'utf-8-sig' 38 | 39 | # The master toctree document. 40 | master_doc = 'index' 41 | 42 | # General information about the project. 43 | project = u'Marmir' 44 | copyright = u'2011, Brian Ray' 45 | 46 | # The version info for the project you're documenting, acts as replacement for 47 | # |version| and |release|, also used in various other places throughout the 48 | # built documents. 49 | # 50 | # The short X.Y version. 51 | version = '0.1' 52 | # The full version, including alpha/beta/rc tags. 53 | release = '0.1' 54 | 55 | # The language for content autogenerated by Sphinx. Refer to documentation 56 | # for a list of supported languages. 57 | #language = None 58 | 59 | # There are two options for replacing |today|: either, you set today to some 60 | # non-false value, then it is used: 61 | #today = '' 62 | # Else, today_fmt is used as the format for a strftime call. 63 | #today_fmt = '%B %d, %Y' 64 | 65 | # List of patterns, relative to source directory, that match files and 66 | # directories to ignore when looking for source files. 67 | exclude_patterns = ['_build'] 68 | 69 | # The reST default role (used for this markup: `text`) to use for all documents. 70 | #default_role = None 71 | 72 | # If true, '()' will be appended to :func: etc. cross-reference text. 73 | #add_function_parentheses = True 74 | 75 | # If true, the current module name will be prepended to all description 76 | # unit titles (such as .. function::). 77 | #add_module_names = True 78 | 79 | # If true, sectionauthor and moduleauthor directives will be shown in the 80 | # output. They are ignored by default. 81 | #show_authors = False 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | # A list of ignored prefixes for module index sorting. 87 | #modindex_common_prefix = [] 88 | 89 | 90 | # -- Options for HTML output --------------------------------------------------- 91 | 92 | # The theme to use for HTML and HTML Help pages. See the documentation for 93 | # a list of builtin themes. 94 | html_theme = 'default' 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | #html_theme_options = {} 100 | 101 | # Add any paths that contain custom themes here, relative to this directory. 102 | #html_theme_path = [] 103 | 104 | # The name for this set of Sphinx documents. If None, it defaults to 105 | # " v documentation". 106 | #html_title = None 107 | 108 | # A shorter title for the navigation bar. Default is the same as html_title. 109 | #html_short_title = None 110 | 111 | # The name of an image file (relative to this directory) to place at the top 112 | # of the sidebar. 113 | #html_logo = None 114 | 115 | # The name of an image file (within the static path) to use as favicon of the 116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 117 | # pixels large. 118 | #html_favicon = None 119 | 120 | # Add any paths that contain custom static files (such as style sheets) here, 121 | # relative to this directory. They are copied after the builtin static files, 122 | # so a file named "default.css" will overwrite the builtin "default.css". 123 | html_static_path = ['_static'] 124 | 125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 126 | # using the given strftime format. 127 | #html_last_updated_fmt = '%b %d, %Y' 128 | 129 | # If true, SmartyPants will be used to convert quotes and dashes to 130 | # typographically correct entities. 131 | #html_use_smartypants = True 132 | 133 | # Custom sidebar templates, maps document names to template names. 134 | #html_sidebars = {} 135 | 136 | # Additional templates that should be rendered to pages, maps page names to 137 | # template names. 138 | #html_additional_pages = {} 139 | 140 | # If false, no module index is generated. 141 | #html_domain_indices = True 142 | 143 | # If false, no index is generated. 144 | #html_use_index = True 145 | 146 | # If true, the index is split into individual pages for each letter. 147 | #html_split_index = False 148 | 149 | # If true, links to the reST sources are added to the pages. 150 | #html_show_sourcelink = True 151 | 152 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 153 | #html_show_sphinx = True 154 | 155 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 156 | #html_show_copyright = True 157 | 158 | # If true, an OpenSearch description file will be output, and all pages will 159 | # contain a tag referring to it. The value of this option must be the 160 | # base URL from which the finished HTML is served. 161 | #html_use_opensearch = '' 162 | 163 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 164 | #html_file_suffix = None 165 | 166 | # Output file base name for HTML help builder. 167 | htmlhelp_basename = 'Marmirdoc' 168 | 169 | 170 | # -- Options for LaTeX output -------------------------------------------------- 171 | 172 | latex_elements = { 173 | # The paper size ('letterpaper' or 'a4paper'). 174 | #'papersize': 'letterpaper', 175 | 176 | # The font size ('10pt', '11pt' or '12pt'). 177 | #'pointsize': '10pt', 178 | 179 | # Additional stuff for the LaTeX preamble. 180 | #'preamble': '', 181 | } 182 | 183 | # Grouping the document tree into LaTeX files. List of tuples 184 | # (source start file, target name, title, author, documentclass [howto/manual]). 185 | latex_documents = [ 186 | ('index', 'Marmir.tex', u'Marmir Documentation', 187 | u'Brian Ray', 'manual'), 188 | ] 189 | 190 | # The name of an image file (relative to this directory) to place at the top of 191 | # the title page. 192 | #latex_logo = None 193 | 194 | # For "manual" documents, if this is true, then toplevel headings are parts, 195 | # not chapters. 196 | #latex_use_parts = False 197 | 198 | # If true, show page references after internal links. 199 | #latex_show_pagerefs = False 200 | 201 | # If true, show URL addresses after external links. 202 | #latex_show_urls = False 203 | 204 | # Documents to append as an appendix to all manuals. 205 | #latex_appendices = [] 206 | 207 | # If false, no module index is generated. 208 | #latex_domain_indices = True 209 | 210 | 211 | # -- Options for manual page output -------------------------------------------- 212 | 213 | # One entry per manual page. List of tuples 214 | # (source start file, name, description, authors, manual section). 215 | man_pages = [ 216 | ('index', 'marmir', u'Marmir Documentation', 217 | [u'Brian Ray'], 1) 218 | ] 219 | 220 | # If true, show URL addresses after external links. 221 | #man_show_urls = False 222 | 223 | 224 | # -- Options for Texinfo output ------------------------------------------------ 225 | 226 | # Grouping the document tree into Texinfo files. List of tuples 227 | # (source start file, target name, title, author, 228 | # dir menu entry, description, category) 229 | texinfo_documents = [ 230 | ('index', 'Marmir', u'Marmir Documentation', 231 | u'Brian Ray', 'Marmir', 'One line description of project.', 232 | 'Miscellaneous'), 233 | ] 234 | 235 | # Documents to append as an appendix to all manuals. 236 | #texinfo_appendices = [] 237 | 238 | # If false, no module index is generated. 239 | #texinfo_domain_indices = True 240 | 241 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 242 | #texinfo_show_urls = 'footnote' 243 | 244 | 245 | -------------------------------------------------------------------------------- /docs/diagrams/system.graffle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/docs/diagrams/system.graffle -------------------------------------------------------------------------------- /docs/diagrams/system.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/docs/diagrams/system.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Marmir documentation master file, created by 2 | sphinx-quickstart on Sat Nov 26 18:37:26 2011. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Marmir's documentation! 7 | ================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | api 15 | 16 | 17 | 18 | Indices and tables 19 | ================== 20 | 21 | * :ref:`genindex` 22 | * :ref:`modindex` 23 | * :ref:`search` 24 | 25 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. linkcheck to check all external links for integrity 37 | echo. doctest to run all doctests embedded in the documentation if enabled 38 | goto end 39 | ) 40 | 41 | if "%1" == "clean" ( 42 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 43 | del /q /s %BUILDDIR%\* 44 | goto end 45 | ) 46 | 47 | if "%1" == "html" ( 48 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 49 | if errorlevel 1 exit /b 1 50 | echo. 51 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 52 | goto end 53 | ) 54 | 55 | if "%1" == "dirhtml" ( 56 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 57 | if errorlevel 1 exit /b 1 58 | echo. 59 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 60 | goto end 61 | ) 62 | 63 | if "%1" == "singlehtml" ( 64 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 68 | goto end 69 | ) 70 | 71 | if "%1" == "pickle" ( 72 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished; now you can process the pickle files. 76 | goto end 77 | ) 78 | 79 | if "%1" == "json" ( 80 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished; now you can process the JSON files. 84 | goto end 85 | ) 86 | 87 | if "%1" == "htmlhelp" ( 88 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can run HTML Help Workshop with the ^ 92 | .hhp project file in %BUILDDIR%/htmlhelp. 93 | goto end 94 | ) 95 | 96 | if "%1" == "qthelp" ( 97 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 98 | if errorlevel 1 exit /b 1 99 | echo. 100 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 101 | .qhcp project file in %BUILDDIR%/qthelp, like this: 102 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Marmir.qhcp 103 | echo.To view the help file: 104 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Marmir.ghc 105 | goto end 106 | ) 107 | 108 | if "%1" == "devhelp" ( 109 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 110 | if errorlevel 1 exit /b 1 111 | echo. 112 | echo.Build finished. 113 | goto end 114 | ) 115 | 116 | if "%1" == "epub" ( 117 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 118 | if errorlevel 1 exit /b 1 119 | echo. 120 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 121 | goto end 122 | ) 123 | 124 | if "%1" == "latex" ( 125 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 129 | goto end 130 | ) 131 | 132 | if "%1" == "text" ( 133 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The text files are in %BUILDDIR%/text. 137 | goto end 138 | ) 139 | 140 | if "%1" == "man" ( 141 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 145 | goto end 146 | ) 147 | 148 | if "%1" == "texinfo" ( 149 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 150 | if errorlevel 1 exit /b 1 151 | echo. 152 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 153 | goto end 154 | ) 155 | 156 | if "%1" == "gettext" ( 157 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 158 | if errorlevel 1 exit /b 1 159 | echo. 160 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 161 | goto end 162 | ) 163 | 164 | if "%1" == "changes" ( 165 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 166 | if errorlevel 1 exit /b 1 167 | echo. 168 | echo.The overview file is in %BUILDDIR%/changes. 169 | goto end 170 | ) 171 | 172 | if "%1" == "linkcheck" ( 173 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 174 | if errorlevel 1 exit /b 1 175 | echo. 176 | echo.Link check complete; look for any errors in the above output ^ 177 | or in %BUILDDIR%/linkcheck/output.txt. 178 | goto end 179 | ) 180 | 181 | if "%1" == "doctest" ( 182 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 183 | if errorlevel 1 exit /b 1 184 | echo. 185 | echo.Testing of doctests in the sources finished, look at the ^ 186 | results in %BUILDDIR%/doctest/output.txt. 187 | goto end 188 | ) 189 | 190 | :end 191 | -------------------------------------------------------------------------------- /mm/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __version__ = "0.1.3" 4 | __author__ = [ 5 | "Brian Ray ", 6 | ] 7 | __license__ = "TBD" 8 | 9 | from document_base import * 10 | import config_base 11 | import model_base 12 | import logging 13 | logging.basicConfig() 14 | 15 | 16 | Date = model_base.DateFieldType 17 | URL = model_base.URLFieldType 18 | Image = model_base.ImageFieldType 19 | Formula = model_base.FormulaFieldType 20 | 21 | Config = config_base.ConfigBase 22 | -------------------------------------------------------------------------------- /mm/color_converter.py: -------------------------------------------------------------------------------- 1 | from lib.font_data.decorators import memoized 2 | 3 | excel_color_dict = {} 4 | 5 | excel_color_dict['0, 0, 0'] = 0x08 6 | excel_color_dict['255, 255, 255'] = 0x09 7 | excel_color_dict['255, 0, 0'] = 0x0A 8 | excel_color_dict['0, 255, 0'] = 0x0B 9 | excel_color_dict['0, 0, 255'] = 0x0C 10 | excel_color_dict['255, 255, 0'] = 0x0D 11 | excel_color_dict['255, 0, 255'] = 0x0E 12 | excel_color_dict['0, 255, 255'] = 0x0F 13 | excel_color_dict['128, 0, 0'] = 0x10 14 | excel_color_dict['0, 128, 0'] = 0x11 15 | excel_color_dict['0, 0, 128'] = 0x12 16 | excel_color_dict['128, 128, 0'] = 0x13 17 | excel_color_dict['128, 0, 128'] = 0x14 18 | excel_color_dict['0, 128, 128'] = 0x15 19 | excel_color_dict['192, 192, 192'] = 0x16 20 | excel_color_dict['128, 128, 128'] = 0x17 21 | excel_color_dict['153, 153, 255'] = 0x18 22 | excel_color_dict['153, 51, 102'] = 0x1A 23 | excel_color_dict['255, 255, 204'] = 0x1C 24 | excel_color_dict['204, 255, 255'] = 0x1D 25 | excel_color_dict['102, 0, 102'] = 0x1E 26 | excel_color_dict['255, 128, 128'] = 0x1F 27 | excel_color_dict['0, 102, 204'] = 0x28 28 | excel_color_dict['204, 204, 255'] = 0x29 29 | excel_color_dict['0, 204, 255'] = 0x2A 30 | excel_color_dict['204, 255, 204'] = 0x2B 31 | excel_color_dict['255, 255, 153'] = 0x2C 32 | excel_color_dict['153, 204, 255'] = 0x2D 33 | excel_color_dict['255, 153, 204'] = 0x2E 34 | excel_color_dict['204, 153, 255'] = 0x2F 35 | excel_color_dict['255, 204, 153'] = 0x30 36 | excel_color_dict['51, 102, 255'] = 0x31 37 | excel_color_dict['51, 204, 204'] = 0x32 38 | excel_color_dict['153, 204, 0'] = 0x33 39 | excel_color_dict['255, 204, 0'] = 0x34 40 | excel_color_dict['255, 153, 0'] = 0x35 41 | excel_color_dict['255, 102, 0'] = 0x36 42 | excel_color_dict['102, 102, 153'] = 0x37 43 | excel_color_dict['150, 150, 150'] = 0x38 44 | excel_color_dict['0, 51, 102'] = 0x39 45 | excel_color_dict['51, 153, 102'] = 0x3A 46 | excel_color_dict['0, 51, 0'] = 0x3B 47 | excel_color_dict['51, 51, 0'] = 0x3C 48 | excel_color_dict['153, 51, 0'] = 0x3D 49 | excel_color_dict['51, 51, 153'] = 0x3E 50 | excel_color_dict['51, 51, 51'] = 0x3F 51 | 52 | 53 | @memoized 54 | def rgb(c): 55 | split = (c[0:2], c[2:4], c[4:6]) 56 | out = [] 57 | for x in split: 58 | out.append(int(x,16)) 59 | return out 60 | 61 | @memoized 62 | def get_closest_rgb_match(hex): 63 | hex = hex.replace("#",'').strip() 64 | color_dict = excel_color_dict 65 | orig_rgb = rgb(hex) 66 | new_color = '' 67 | min_distance = 195075 68 | orig_r = orig_rgb[0] 69 | orig_g = orig_rgb[1] 70 | orig_b = orig_rgb[2] 71 | for key in color_dict.iterkeys(): 72 | new_r = int(key.split(',')[0]) 73 | new_g = int(key.split(',')[1]) 74 | new_b = int(key.split(',')[2]) 75 | r_distance = orig_r - new_r 76 | g_distance = orig_g - new_g 77 | b_distance = orig_b - new_b 78 | current_distance = (r_distance * r_distance) + (g_distance * g_distance) + (b_distance * b_distance) 79 | if current_distance < min_distance: 80 | min_distance = current_distance 81 | new_color = key 82 | return color_dict.get(new_color) 83 | 84 | 85 | -------------------------------------------------------------------------------- /mm/composer_base.py: -------------------------------------------------------------------------------- 1 | 2 | import logging 3 | from model_base import HeaderFieldType 4 | 5 | log = logging.getLogger(__name__) 6 | 7 | 8 | class ComposerBase(object): 9 | """ Used by Composers """ 10 | def run(self): 11 | raise Exception("Overwrite run() in subclass") 12 | 13 | def __init__(self, data_model, grid, document): 14 | self.data_model = data_model 15 | self.grid = grid 16 | self.document = document 17 | self.row_id = 0 18 | self.col_id = 0 19 | 20 | def row(self, row): 21 | self.col_id = 0 22 | self.start_new_row(self.row_id) 23 | for cell in row: 24 | self.write_cell(self.row_id, self.col_id, cell) 25 | self.col_id += 1 26 | self.end_row(self.row_id) 27 | self.row_id += 1 28 | 29 | def iterate_grid(self): 30 | for row in self.grid.grid_data: 31 | self.row(row) 32 | 33 | def write_header(self): 34 | i = 0 35 | for header in self.data_model.field_titles: 36 | cell = HeaderFieldType(data=header) 37 | log.info(cell.__dict__) 38 | self.write_cell(0, i, cell) 39 | i += 1 40 | self.row_id += 1 41 | 42 | def set_option(self, x): 43 | log.warn("%s not supported with this composer %s" % (x, self.__class__.__name__)) 44 | 45 | def finish(self): 46 | """ Things we do after we are done """ 47 | for key in [x for x in dir(self.document.config) if not x.startswith("_")]: 48 | self.set_option(key) 49 | -------------------------------------------------------------------------------- /mm/composer_xls.py: -------------------------------------------------------------------------------- 1 | import re 2 | from composer_base import ComposerBase 3 | import lib.xlwt_0_7_2 as xlwt 4 | from lib.font_data.core import get_string_width 5 | from lib.xldate.convert import to_excel_from_C_codes 6 | import logging 7 | import StringIO 8 | import model_base 9 | import style_base 10 | import color_converter 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | def get_string_width_from_style(char_string, style): 16 | if type(char_string) not in (unicode, str): 17 | return 0 18 | point_size = style.font.height / 0x14 # convert back to points 19 | font_name = style.font.name 20 | if not font_name: 21 | font_name = 'Arial' 22 | return int(get_string_width(font_name, point_size, char_string) * 50) 23 | 24 | 25 | class styleXLS(style_base.StyleBase): 26 | 27 | font_points = 12 28 | 29 | def get_pattern(self): 30 | pattern = xlwt.Pattern() 31 | # see issue #27 https://github.com/brianray/mm/issues/27 32 | if not self.background_color: 33 | return pattern 34 | pattern.pattern = 1 35 | color = color_converter.get_closest_rgb_match(self.background_color) 36 | pattern.pattern_fore_colour = color 37 | return pattern 38 | 39 | def get_font_color(self): 40 | color = 0 41 | if self.color: 42 | color = color_converter.get_closest_rgb_match(self.color) 43 | return color 44 | 45 | def get_border(self): 46 | border = xlwt.Borders() 47 | if False: # TODO borders 48 | border.left = border.right = border.top = border.bottom = 3000 49 | if self.border_color: 50 | color = color_converter.get_closest_rgb_match(self.border_color) 51 | border.top_color = color 52 | border.bottom_color = color 53 | border.left_color = color 54 | border.right_color = color 55 | return border 56 | 57 | def is_bold(self): 58 | if self.font_style == 'bold': 59 | return True 60 | return False 61 | 62 | def get_font_points(self): 63 | if self.font_size: 64 | return self.font_size 65 | return 12 # TODO: default from config? 66 | 67 | def get_font_name(self): 68 | if not self.font_family: 69 | return 'Arial' 70 | return self.font_family 71 | 72 | def get_text_align(self): 73 | text_align = xlwt.Alignment() 74 | # HORZ - (0-General, 1-Left, 2-Center, 3-Right, 4-Filled, 5-Justified, 6-CenterAcrossSel, 7-Distributed) 75 | horz = 0 76 | if self.text_align == 'center': 77 | horz = 2 78 | elif self.text_align == 'right': 79 | horz = 3 80 | elif self.text_align == 'left': 81 | horz = 1 # left 82 | elif self.text_align is not None: 83 | log.warn("Unknown text_align %s" % self.text_align) 84 | 85 | text_align.horz = horz 86 | return text_align 87 | 88 | 89 | class ComposerXLS(ComposerBase): 90 | 91 | def convert_style(self, stylestr): 92 | in_style = styleXLS() 93 | in_style.style_from_string(stylestr) 94 | 95 | style = xlwt.XFStyle() 96 | fnt1 = xlwt.Font() 97 | fnt1.name = in_style.get_font_name() 98 | fnt1.bold = in_style.is_bold() 99 | fnt1.height = in_style.get_font_points() * 0x14 100 | fnt1.colour_index = in_style.get_font_color() 101 | style.font = fnt1 102 | style.alignment = in_style.get_text_align() 103 | style.pattern = in_style.get_pattern() 104 | style.borders = in_style.get_border() 105 | 106 | return style 107 | 108 | def cell_to_value(self, cell, row_id): 109 | 110 | if self.document.config.headers and row_id == 0: 111 | css_like_style = self.document.config.header_style 112 | elif len(self.document.config.row_styles) == 0: 113 | css_like_style = '' 114 | else: 115 | style_index = row_id % len(self.document.config.row_styles) 116 | css_like_style = self.document.config.row_styles[style_index] 117 | 118 | style = self.convert_style(css_like_style) 119 | 120 | if type(cell) == model_base.HeaderFieldType: 121 | style = self.convert_style(self.document.config.header_style) 122 | return cell.data, style 123 | 124 | elif type(cell) in (model_base.IntFieldType, model_base.StringFieldType): 125 | return cell.data, style 126 | 127 | elif type(cell) == model_base.DateTimeFieldType: 128 | style.num_format_str = self.document.config.get('datetime_format', 'M/D/YY h:mm') 129 | return cell.data, style 130 | elif type(cell) == model_base.DateFieldType: 131 | num_string_format = self.document.config.get('date_format', 'M/D/YY') 132 | if cell.format: 133 | num_string_format = to_excel_from_C_codes(cell.format, self.document.config) 134 | style.num_format_str = num_string_format 135 | return cell.data, style 136 | 137 | else: 138 | return cell.data, style 139 | 140 | def start_new_row(self, id): 141 | pass 142 | 143 | def end_row(self, id): 144 | pass 145 | 146 | def write_cell(self, row_id, col_id, cell): 147 | 148 | value, style = self.cell_to_value(cell, row_id) 149 | if type(cell) == model_base.ImageFieldType: 150 | if cell.width: 151 | self.sheet.col(col_id).width = cell.width * 256 152 | if cell.height: 153 | self.sheet.col(col_id).height = cell.height * 256 154 | self.sheet.insert_bitmap(value, row_id, col_id) 155 | 156 | elif type(cell) == model_base.URLFieldType: 157 | self.sheet.write( 158 | row_id, 159 | col_id, 160 | xlwt.Formula('HYPERLINK("%s";"%s")' % (value, cell.displayname)), 161 | style 162 | ) 163 | elif type(cell) == model_base.FormulaFieldType: 164 | self.sheet.write( 165 | row_id, 166 | col_id, 167 | xlwt.Formula(re.sub('^=', '', value)), 168 | style 169 | ) 170 | 171 | else: 172 | # most cases 173 | self.sheet.write(row_id, col_id, value, style) 174 | self.done_write_cell(row_id, col_id, cell, value, style) 175 | 176 | def done_write_cell(self, row_id, col_id, cell, value, style): 177 | 178 | if self.document.config.get('adjust_all_col_width', False): 179 | 180 | current_width = self.sheet.col_width(col_id) + 0x0d00 181 | log.info("current width is %s" % current_width) 182 | new_width = None 183 | 184 | if type(cell) == model_base.StringFieldType: 185 | new_width = get_string_width_from_style(value, style) 186 | 187 | elif type(cell) == model_base.DateTimeFieldType: 188 | new_width = 6550 # todo: different date formats 189 | 190 | elif type(cell) == model_base.URLFieldType: 191 | new_width = get_string_width_from_style(cell.displayname, style) 192 | 193 | if new_width and new_width > current_width: 194 | log.info("setting col #%s form width %s to %s" % (col_id, current_width, new_width)) 195 | col = self.sheet.col(col_id) 196 | if new_width > 65535: # USHRT_MAX 197 | new_width = 65534 198 | current_width = new_width 199 | col.width = new_width 200 | 201 | def set_option(self, key): 202 | 203 | val = getattr(self.document.config, key) 204 | if key == 'freeze_col' and val and val > 0: 205 | self.sheet.panes_frozen = True 206 | self.sheet.vert_split_pos = val 207 | 208 | elif key == 'freeze_row' and val and val > 0: 209 | self.sheet.panes_frozen = True 210 | self.sheet.horz_split_pos = val 211 | 212 | else: 213 | 214 | log.info("Nothing to be done for %s" % key) 215 | 216 | return 217 | log.info("Set option %s" % key) 218 | 219 | def run(self, child=None): 220 | top = False 221 | if not child: 222 | self.w = xlwt.Workbook(style_compression=2) 223 | top = True 224 | else: 225 | self.w = child 226 | self.sheet = self.w.add_sheet(self.document.name or "Sheet 1") 227 | if self.document.config.headers: 228 | self.write_header() 229 | self.iterate_grid() 230 | self.finish() 231 | 232 | # process any childern 233 | for doc_child in self.document.children: 234 | doc_child.writestr(child=self.w) 235 | 236 | if top: 237 | # write the file to string 238 | output = StringIO.StringIO() 239 | self.w.save(output) 240 | contents = output.getvalue() 241 | output.close() 242 | 243 | return contents 244 | -------------------------------------------------------------------------------- /mm/config_base.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class ConfigBase(object): 4 | """ Holds the configuration """ 5 | 6 | def get(self,key,default=None): 7 | if hasattr(self,key): 8 | return getattr(self,key) 9 | return default 10 | 11 | def __init__(self, config_dict=None): 12 | if config_dict: 13 | self.set_dict(config_dict) 14 | 15 | def set_dict(self, config_dict): 16 | for k,v in config_dict.items(): 17 | setattr(self, k, v) 18 | 19 | 20 | # default settings 21 | headers = True 22 | header_style = "color: #ffffff; font-family: arial; background-color: #0000B3; font-size: 12pt; text-align: center" 23 | freeze_col = 0 24 | freeze_row = 1 25 | row_styles = ( 26 | "color: #000000; font-family: arial; background-color: #666666; border-color: #ff0000", 27 | "color: #000000; font-family: arial; background-color: #FFFFFF" # Alternate 28 | ) 29 | adjust_all_col_width = True 30 | datetime_format = 'M/D/YY h:mm:ss' 31 | date_format = 'M/D/YY' 32 | time_format = "h:mm:ss" 33 | INGORE_DATA_MISMATCH = True 34 | -------------------------------------------------------------------------------- /mm/contrib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/contrib/__init__.py -------------------------------------------------------------------------------- /mm/contrib/django/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/contrib/django/__init__.py -------------------------------------------------------------------------------- /mm/contrib/django/data_model.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from mm import model_base 3 | from django.db import models 4 | 5 | log = logging.getLogger(__name__) 6 | 7 | 8 | class DjangoDataModel(object): 9 | """ Data Model creates a list of system defined data types in self.field_headers""" 10 | 11 | def __init__(self, data, order=None, column_types=None): 12 | """ constructor takes data as a tuple or list""" 13 | 14 | self.field_headers = [] 15 | self.field_titles = [] 16 | if data.count() == 0: 17 | raise Exception("Can not make spreadsheets with an empty set") 18 | first_data = data[0] 19 | 20 | header_types = {} 21 | for f in first_data._meta.fields: # TODO: + obj._meta.many_to_many 22 | header_types[f.name] = self.figure_out_type(f) 23 | 24 | #TODO: 'if order' sort the list 25 | 26 | # assign data 27 | for verbose_name, field_type_class in header_types.items(): 28 | 29 | # we add it to the 'class' so to be 30 | # used in every instance 31 | self.field_titles.append(verbose_name) 32 | self.field_headers.append(field_type_class) 33 | log.info("created field type %s for %s" % (field_type_class, verbose_name)) 34 | 35 | def __serialize(obj): 36 | obj._meta.fields 37 | 38 | def _string_types(self): 39 | return [ 40 | models.CharField, 41 | models.CommaSeparatedIntegerField, 42 | models.IPAddressField, 43 | models.SlugField, 44 | models.TextField, 45 | models.EmailField, 46 | models.FilePathField, 47 | models.GenericIPAddressField, 48 | ] 49 | 50 | def _int_types(self): 51 | return [ 52 | models.AutoField, 53 | models.IntegerField, 54 | models.PositiveIntegerField, 55 | models.PositiveSmallIntegerField, 56 | models.SmallIntegerField, 57 | models.BigIntegerField, 58 | 59 | ] 60 | 61 | def _bool_types(self): 62 | return [ 63 | models.BooleanField, 64 | models.NullBooleanField, 65 | ] 66 | 67 | def _date_types(self): 68 | return [ 69 | models.DateField, 70 | ] 71 | 72 | def _time_types(self): 73 | return [ 74 | models.TimeField 75 | ] 76 | 77 | def _datetime_types(self): 78 | return [ 79 | models.DateTimeField, 80 | ] 81 | 82 | def _decimal_types(self): 83 | return [ 84 | models.DecimalField, 85 | ] 86 | 87 | def _float_types(self): 88 | return [ 89 | models.FloatField, 90 | ] 91 | 92 | def _none_types(self): 93 | l = [ 94 | models.NullBooleanField, 95 | models.FileField, 96 | models.ImageField, 97 | ] 98 | if "BinaryField" in dir(models): 99 | l.append(models.BinaryField) 100 | return l 101 | 102 | def _url_types(self): 103 | return [ 104 | models.URLField 105 | ] 106 | 107 | def type_mapping(self): 108 | return ( 109 | (self._string_types, model_base.StringFieldType), 110 | (self._int_types, model_base.IntFieldType), 111 | (self._bool_types, model_base.BoolFieldType), 112 | (self._date_types, model_base.DateFieldType), 113 | (self._time_types, model_base.TimeFieldType), 114 | (self._datetime_types, model_base.DateTimeFieldType), 115 | (self._decimal_types, model_base.DecimalFieldType), 116 | (self._float_types, model_base.FloatFieldType), 117 | (self._none_types, model_base.NoneFieldType), 118 | (self._url_types, model_base.URLFieldType), 119 | ) 120 | 121 | def figure_out_type(self, item): 122 | """ 123 | 124 | This is how django stores types in sqlite3: 125 | 126 | "AutoField" integer NOT NULL PRIMARY KEY, 127 | "BigInteger" bigint NOT NULL, 128 | "BinaryField" BLOB NOT NULL, 129 | "BooleanField" bool NOT NULL, 130 | "CharField" varchar(50) NOT NULL, 131 | "CommaSeparatedIntegerField" varchar(25) NOT NULL, 132 | "DateField" date NOT NULL, 133 | "DateTimeField" datetime NOT NULL, 134 | "DecimalField" decimal NOT NULL, 135 | "EmailField" varchar(75) NOT NULL, 136 | "FileField" varchar(100) NOT NULL, 137 | "FilePathField" varchar(100) NOT NULL, 138 | "FloatField" real NOT NULL, 139 | "ImageField" varchar(100) NOT NULL, 140 | "IntegerField" integer NOT NULL, 141 | "IPAddressField" char(15) NOT NULL, 142 | "GenericIPAddressField" char(39) NOT NULL, 143 | "NullBooleanField" bool, 144 | "PositiveIntegerField" integer unsigned NOT NULL, 145 | "PositiveSmallIntegerField" smallint unsigned NOT NULL, 146 | "SlugField" varchar(50) NOT NULL, 147 | "SmallIntegerField" smallint NOT NULL, 148 | "TextField" text NOT NULL, 149 | "TimeField" time NOT NULL, 150 | "URLField" varchar(200) NOT NULL 151 | """ 152 | item_type = type(item) 153 | for func, mm_type in self.type_mapping(): 154 | if item_type in func(): 155 | return mm_type 156 | 157 | log.warn("Returning None type for type %s" % item_type) 158 | return model_base.NoneFieldType 159 | -------------------------------------------------------------------------------- /mm/contrib/django/grid.py: -------------------------------------------------------------------------------- 1 | import logging 2 | log = logging.getLogger(__name__) 3 | 4 | 5 | class DjangoGrid(object): 6 | 7 | def populate(self, indata, config=None): 8 | for required in ('row_count', 'col_count', 'headers'): 9 | if not hasattr(self, required): 10 | raise Exception("missing required attribute to Grid: %s" % required) 11 | # create a grid 12 | self.grid_data = [[None] * self.col_count for i in range(self.row_count)] 13 | 14 | # now populate 15 | # this is pass one 16 | # want to do as much processing here as we can 17 | # we populate left to right, top to bottom 18 | for row_id in range(self.row_count): 19 | row = indata[row_id] 20 | for col_id in range(self.col_count): 21 | field_type_class = self.headers[col_id] 22 | data = getattr(row, self.titles[col_id]) 23 | self.grid_data[row_id][col_id] = field_type_class(data) 24 | 25 | #Ilport pdb; pdb.set_trace() 26 | log.info("populated grid %sX%s" % (self.row_count, self.col_count)) 27 | -------------------------------------------------------------------------------- /mm/contrib/prettytable/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/contrib/prettytable/__init__.py -------------------------------------------------------------------------------- /mm/contrib/prettytable/composers.py: -------------------------------------------------------------------------------- 1 | from mm.composer_base import ComposerBase 2 | import logging 3 | log = logging.getLogger(__name__) 4 | pretty_table = False 5 | try: 6 | 7 | import prettytable # NOQA 8 | pretty_table = True 9 | except ImportError: 10 | pass 11 | 12 | 13 | class ComposerPrettyTable(ComposerBase): 14 | 15 | def write_header(self): 16 | self.pt.field_names = self.data_model.field_titles 17 | 18 | def row(self, row): 19 | self.pt.add_row([cell.data for cell in row]) 20 | 21 | def run(self, child=None): 22 | if not pretty_table: 23 | raise Exception("Module 'prettytable' required for text table output") 24 | self.pt = prettytable.PrettyTable() 25 | if self.document.config.headers: 26 | self.write_header() 27 | self.iterate_grid() 28 | self.finish() 29 | 30 | # process any childern 31 | for doc_child in self.document.children: 32 | doc_child.writestr(child=self.pt) 33 | 34 | return self.pt 35 | -------------------------------------------------------------------------------- /mm/document_base.py: -------------------------------------------------------------------------------- 1 | from document_writers import DocumentWriter 2 | from model_base import DataModel 3 | from config_base import ConfigBase 4 | from serializer_base import Serializer 5 | from grid_base import GridBase 6 | import logging 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | class Document(DocumentWriter): 12 | """ 13 | Document reporesents the abstact view you interact with in order to send 14 | data for your document and ultimately get output. 15 | """ 16 | def __init__( 17 | self, 18 | data, 19 | data_model_class=None, 20 | grid_class=None, 21 | serializer_class=None, 22 | config=None, 23 | config_dict=None, 24 | order=None, 25 | column_types=None): 26 | """ 27 | data -- a dict or a list of data you wish to use for a the 28 | spreadsheet 29 | data_model -- (optional) fields defenitions 30 | grid_model -- (optional) takes data_model and data and fills grid 31 | serializer_class -- (optional) class to use to serialize raw data 32 | config -- (optional) Configuration (ConfigBase) instance 33 | config_dict -- (optional) a dictionary of key/values of settings 34 | order -- (optional) also headers 35 | column_types -- (optional) a dictionary of column types; e.g. column_name1 is a date column: {'column_name1': mm.Date) 36 | 37 | """ 38 | self.data = data 39 | self.config = config 40 | self.name = None 41 | self.children = [] 42 | if not self.config: 43 | self.config = ConfigBase() 44 | if config_dict: 45 | self.config.set_dict(config_dict) 46 | 47 | # make a data model if one does not exist 48 | self.data_model_class = data_model_class 49 | if not data_model_class: 50 | self.data_model_class = DataModel 51 | 52 | self.data_model = self.data_model_class(data, order=order, column_types=column_types) 53 | 54 | # grid base 55 | if not grid_class: 56 | grid_class = GridBase 57 | 58 | # Serialize the data 59 | # we look at it here once and only once 60 | # we look at it again when we write 61 | # goal to pass over data no more than twice, if possible 62 | if not serializer_class: 63 | serializer_class = Serializer 64 | serializer = serializer_class( 65 | self.data_model, 66 | self.data, 67 | self.config, 68 | grid_class=grid_class 69 | ) 70 | 71 | # returns a grid instance 72 | self.grid = serializer.serialize() 73 | 74 | log.info("Documnet Created") 75 | 76 | def set_composer_class(self, composer_class): 77 | self.composer_class = composer_class 78 | 79 | def set_composer(self, composer): 80 | self.composer = composer 81 | 82 | def set_name(self, name): 83 | self.name = name 84 | 85 | def add_child(self, document): 86 | self.children.append(document) 87 | -------------------------------------------------------------------------------- /mm/document_writers.py: -------------------------------------------------------------------------------- 1 | from composer_xls import ComposerXLS 2 | from mm.contrib.prettytable.composers import ComposerPrettyTable, pretty_table 3 | 4 | import os 5 | import tempfile 6 | import logging 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | class DocumentWriter(object): 12 | "runs a composer" 13 | 14 | composer_class = None 15 | composer = None 16 | 17 | def writestr(self, child=False): 18 | composer_class = self.composer_class 19 | if not composer_class: 20 | # default format is XLS 21 | composer_class = ComposerXLS 22 | log.info("Setting output format to XLS") 23 | self.composer = composer_class(self.data_model, self.grid, self) 24 | return self.composer.run(child=child) 25 | 26 | def write(self, filename): 27 | ext = os.path.splitext(filename)[-1].lower() 28 | if ext == "xls": 29 | self.composer = ComposerXLS(self.data_model, self.grid, self) 30 | log.info("Setting output format to XLS, based on file extension") 31 | elif ext == "txt" and pretty_table: 32 | self.composer = ComposerPrettyTable(self.data_model, self.grid, self) 33 | log.info("Setting output format to TXT, based on file extension") 34 | 35 | with open(filename, "wb") as f: 36 | f.write(self.writestr()) 37 | 38 | log.info("wrote file: %s" % filename) 39 | 40 | def write_gdata(self, name, username, password, auth_token=None): 41 | try: 42 | import gdata 43 | import gdata.docs.service 44 | except ImportError: 45 | raise Exception("Must install package 'gdata' to use write_gdata()") 46 | 47 | tmp_file, tmp_file_path = tempfile.mkstemp() 48 | self.write(tmp_file_path) 49 | 50 | gd_client = gdata.docs.service.DocsService() 51 | gd_client.ssl = True 52 | if not auth_token: 53 | gd_client.ClientLogin( 54 | username, 55 | password, 56 | "marmir-1.0") 57 | else: 58 | #TODO: use the token 59 | raise Exception("oauth not yet supported") 60 | 61 | ms = gdata.MediaSource( 62 | file_path=tmp_file_path, 63 | content_type='application/vnd.ms-excel') 64 | entry = gd_client.Upload(ms, name) # NOQA 65 | 66 | #cleanup 67 | os.unlink(tmp_file_path) 68 | 69 | return gd_client.GetClientLoginToken() 70 | -------------------------------------------------------------------------------- /mm/grid_base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from model_base import is_custom_mm_type 3 | log = logging.getLogger(__name__) 4 | 5 | 6 | class GridBase(object): 7 | 8 | def populate(self, indata, config): 9 | for required in ('row_count', 'col_count', 'headers', 'titles'): 10 | if not hasattr(self, required): 11 | raise Exception("missing required attribute to Grid: %s" % 12 | required) 13 | # create a grid 14 | self.grid_data = [[None] * self.col_count for i in range(self.row_count)] 15 | 16 | using_lists = False # support for lists #2 17 | if type(indata[0]) != dict: 18 | using_lists = True 19 | 20 | # now populate 21 | # this is pass one 22 | # want to do as much processing here as we can 23 | # we populate left to right, top to bottom 24 | n_missing = 0 25 | for row_id in range(self.row_count): 26 | for col_id in range(self.col_count): 27 | field_type_class = self.headers[col_id] 28 | 29 | # headers from seelf.data_model.field_headers, sorted 30 | if using_lists: 31 | try: 32 | # direct data access lists 33 | data = indata[row_id][col_id] 34 | except IndexError: 35 | log.warning('No index found in row %d column %d' % 36 | (row_id, col_id)) 37 | if config.INGORE_DATA_MISMATCH: 38 | data = '' 39 | n_missing += 1 40 | 41 | else: 42 | if len(indata[row_id]) > self.col_count: 43 | raise Exception("Data mismatch: Row %d has %d more columns than row 1" % 44 | ((row_id + 1), len(indata[row_id])-self.col_count)) 45 | try: 46 | #direct data access dicts 47 | data = indata[row_id][self.titles[col_id]] 48 | except IndexError: 49 | log.warning('No index found in row %d column %d' % 50 | (row_id, col_id)) 51 | n_missing += 1 52 | if config.INGORE_DATA_MISMATCH: 53 | data = '' 54 | except KeyError: 55 | log.warning('No key found in row %d column %d' % 56 | (row_id, col_id)) 57 | n_missing += 1 58 | if config.INGORE_DATA_MISMATCH: 59 | data = '' 60 | 61 | if is_custom_mm_type(data): 62 | # explicit type 63 | try: 64 | self.grid_data[row_id][col_id] = data 65 | except IndexError: 66 | log.warning('No index found in row %d column %d' % 67 | (row_id, col_id)) 68 | n_missing += 1 69 | if config.INGORE_DATA_MISMATCH: 70 | data = '' 71 | else: 72 | # wrap in type from headers 73 | try: 74 | self.grid_data[row_id][col_id] = field_type_class(data) 75 | except IndexError: 76 | log.warning('No index found in row %d column %d' % 77 | (row_id, col_id)) 78 | n_missing += 1 79 | if config.INGORE_DATA_MISMATCH: 80 | data = '' 81 | log.info("populated grid %sX%s" % (self.row_count, self.col_count)) 82 | if n_missing > 0: 83 | log.info('%d missing items' % n_missing) 84 | -------------------------------------------------------------------------------- /mm/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/lib/__init__.py -------------------------------------------------------------------------------- /mm/lib/font_data/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /mm/lib/font_data/core.py: -------------------------------------------------------------------------------- 1 | import os.path as path 2 | from decorators import memoized 3 | try: 4 | import cPickle as pickle 5 | except: 6 | import pickle 7 | 8 | LATEST_FONT_DATA = "font_data_ms_fonts.bin" 9 | 10 | @memoized 11 | def get_font_data(): 12 | this_file_path = path.abspath(__file__) 13 | data_path = path.join( path.dirname(this_file_path), LATEST_FONT_DATA) 14 | pkl_file = open(data_path, 'rb') 15 | FONT_DATA = pickle.load(pkl_file) 16 | pkl_file.close() 17 | return FONT_DATA 18 | 19 | 20 | @memoized 21 | def get_character_data(font_name, char): 22 | font_data = get_font_data() 23 | font_name = font_name.replace("-","_").replace(" ","_") 24 | if font_name not in font_data: 25 | font_name = font_name.capitalize() 26 | if font_name not in font_data: 27 | raise Exception("no font data for font %s" % font_name) 28 | font_set = font_data[font_name] 29 | if char not in font_set['values']: 30 | return font_set['default_width'], {} # empty kerns 31 | return font_set['values'][char] 32 | 33 | @memoized 34 | def get_character_width(font_name, char): 35 | width, kerns = get_character_data(font_name, char) 36 | return width 37 | 38 | @memoized 39 | def get_kern_offset(font_name, char1, char2): 40 | width, kerns = get_character_data(font_name, char1) 41 | if char2 in kerns: 42 | return kerns[char2] 43 | return 0 44 | 45 | @memoized 46 | def get_string_width(font_name, point_size, char_string): 47 | out_width_256 = 0 48 | current_pos = 0 49 | str_length = len(char_string) 50 | for char in char_string: 51 | out_width_256 += get_character_width(font_name, char) 52 | if current_pos != (str_length-1): 53 | out_width_256 += get_kern_offset(font_name, char, char_string[current_pos+1]) 54 | current_pos += 1 55 | return out_width_256 * ( point_size / 256.0 ) 56 | 57 | if __name__ == "__main__": 58 | get_character_data('Arial', 'A') 59 | -------------------------------------------------------------------------------- /mm/lib/font_data/decorators.py: -------------------------------------------------------------------------------- 1 | 2 | import collections 3 | import functools 4 | 5 | class memoized(object): 6 | '''Decorator. Caches a function's return value each time it is called. 7 | If called later with the same arguments, the cached value is returned 8 | (not reevaluated). 9 | ''' 10 | def __init__(self, func): 11 | self.func = func 12 | self.cache = {} 13 | def __call__(self, *args): 14 | if not isinstance(args, collections.Hashable): 15 | # uncacheable. a list, for instance. 16 | # better to not cache than blow up. 17 | return self.func(*args) 18 | if args in self.cache: 19 | return self.cache[args] 20 | else: 21 | value = self.func(*args) 22 | self.cache[args] = value 23 | return value 24 | def __repr__(self): 25 | '''Return the function's docstring.''' 26 | return self.func.__doc__ 27 | def __get__(self, obj, objtype): 28 | '''Support instance methods.''' 29 | return functools.partial(self.__call__, obj) 30 | -------------------------------------------------------------------------------- /mm/lib/font_data/font_data_ms_fonts.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/lib/font_data/font_data_ms_fonts.bin -------------------------------------------------------------------------------- /mm/lib/font_data/tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import core 3 | 4 | 5 | class test_font_data(unittest.TestCase): 6 | 7 | def test_checkwidth1(self): 8 | 9 | width = core.get_string_width('Arial', 11, 'hello world') 10 | self.assertEqual(width, 52.421875) 11 | 12 | def test_checkwidth2(self): 13 | width = core.get_string_width('Times New Roman', 23 ,'The quick brown fox jumps over the lazy dog') 14 | self.assertEqual(width, 420.46875) 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /mm/lib/xldate/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /mm/lib/xldate/convert.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | class UnsupportedFormatCodeException(Exception): 5 | pass 6 | 7 | def to_excel_from_C_codes(cdate_str, config): 8 | """ 9 | ref http://office.microsoft.com/en-us/excel-help/create-a-custom-number-format-HP010342372.aspx 10 | and http://docs.python.org/2/library/datetime.html 11 | 12 | """ 13 | pairs = ( 14 | 15 | ('%a', None, "abbreviated weekday names require special function in excel"), # not supported 16 | ('%A', None, "weeday naes require a special funtion in excel"), # " 17 | ('%b', 'mmm'), # month as an abbreviation (Jan to Dec). 18 | ('%B', 'mmmm'), # month as a full name (January to December) 19 | ('%c', config.get('datetime_format', 'M/D/YY h:mm:ss') ), # date and time representation. 20 | ('%d', 'dd'), 21 | ('%f', '[ss].00'), # Microsecond as a decimal number [0,999999], zero-padded on the left || Elapsed time (seconds and hundredths) 3735.80 [ss].00 22 | ('%H', 'hh'), # Hours 00-23 hh 23 | ('%I', None, "AM or PM required for 12 hour clock"), # AM or PM required 24 | ('%j', None, "Day of year not supported in Excel"), 25 | ('%m', 'mm'), 26 | ('%M', 'mm'), 27 | ('%p', 'AM/PM'), # 28 | ('%S', 'ss'), 29 | ('%U', None, "Excel has no support for week number"), 30 | ('%w', None, "Excel has no support for week day"), 31 | ('%W', None, "Excel has no support for Week Number of year"), 32 | ('%x', config.get('datetime_format', 'M/D/YY') ), # date 33 | ('%X', config.get('time_format', 'h:mm:ss') ), # time 34 | ('%y', 'yy'), # year as a two-digit number. 35 | ('%Y', 'yyyy'), # year as a four-digit number. 36 | ('%z', None, "Excel has no time zone support"), 37 | ('%Z', None, "Excel has no time zone support"), 38 | ('%%', r'\%'), 39 | ) 40 | 41 | original_str = cdate_str 42 | for t in pairs: 43 | if not t[1] and cdate_str.find(t[0]) > -1: 44 | reason = "Excel does not support" 45 | if len(t) > 2: 46 | reason = t[2] 47 | raise UnsupportedFormatCodeException("Could not replace %s (%s) found in %s" % (t[0], 48 | reason, 49 | original_str)) 50 | elif not t[1]: 51 | continue 52 | 53 | cdate_str = re.sub(t[0], t[1], cdate_str) 54 | 55 | return cdate_str 56 | 57 | 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /mm/lib/xldate/tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import convert 3 | 4 | 5 | class TestsConvert(unittest.TestCase): 6 | 7 | def test_formats(self): 8 | tests = ( 9 | ("%b %d %H:%M:%S %Y", "mmm dd hh:mm:ss yyyy"), # Jul 08 08:08:10 2011 10 | ('%b|%B|%c|%d|%f|%H|%m|%M|%p|%S|%x|%X|%y|%Y|%%','mmm|mmmm|M/D/YY h:mm:ss|dd|[ss].00|hh|mm|mm|AM/PM|ss|M/D/YY|h:mm:ss|yy|yyyy|\%'), 11 | ) 12 | for test in tests: 13 | excel = convert.to_excel_from_C_codes(test[0],{}) 14 | self.assertEqual(test[1], excel) 15 | 16 | 17 | tests = ( 18 | ('%a', '%A', '%I', '%j', '%U', '%w', '%W', '%z', '%Z',) 19 | ) 20 | for test in tests: 21 | self.assertRaises(convert.UnsupportedFormatCodeException, 22 | convert.to_excel_from_C_codes, test, {}) 23 | 24 | if __name__ == "__main__": 25 | unittest.main() 26 | 27 | 28 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/BIFFRecords.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/lib/xlwt_0_7_2/BIFFRecords.py -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/Bitmap.py: -------------------------------------------------------------------------------- 1 | # -*- coding: windows-1251 -*- 2 | 3 | # Portions are Copyright (C) 2005 Roman V. Kiseliov 4 | # Portions are Copyright (c) 2004 Evgeny Filatov 5 | # Portions are Copyright (c) 2002-2004 John McNamara (Perl Spreadsheet::WriteExcel) 6 | 7 | from BIFFRecords import BiffRecord 8 | from struct import * 9 | 10 | 11 | def _size_col(sheet, col): 12 | return sheet.col_width(col) 13 | 14 | 15 | def _size_row(sheet, row): 16 | return sheet.row_height(row) 17 | 18 | 19 | def _position_image(sheet, row_start, col_start, x1, y1, width, height): 20 | """Calculate the vertices that define the position of the image as required by 21 | the OBJ record. 22 | 23 | +------------+------------+ 24 | | A | B | 25 | +-----+------------+------------+ 26 | | |(x1,y1) | | 27 | | 1 |(A1)._______|______ | 28 | | | | | | 29 | | | | | | 30 | +-----+----| BITMAP |-----+ 31 | | | | | | 32 | | 2 | |______________. | 33 | | | | (B2)| 34 | | | | (x2,y2)| 35 | +---- +------------+------------+ 36 | 37 | Example of a bitmap that covers some of the area from cell A1 to cell B2. 38 | 39 | Based on the width and height of the bitmap we need to calculate 8 vars: 40 | col_start, row_start, col_end, row_end, x1, y1, x2, y2. 41 | The width and height of the cells are also variable and have to be taken into 42 | account. 43 | The values of col_start and row_start are passed in from the calling 44 | function. The values of col_end and row_end are calculated by subtracting 45 | the width and height of the bitmap from the width and height of the 46 | underlying cells. 47 | The vertices are expressed as a percentage of the underlying cell width as 48 | follows (rhs values are in pixels): 49 | 50 | x1 = X / W *1024 51 | y1 = Y / H *256 52 | x2 = (X-1) / W *1024 53 | y2 = (Y-1) / H *256 54 | 55 | Where: X is distance from the left side of the underlying cell 56 | Y is distance from the top of the underlying cell 57 | W is the width of the cell 58 | H is the height of the cell 59 | 60 | Note: the SDK incorrectly states that the height should be expressed as a 61 | percentage of 1024. 62 | 63 | col_start - Col containing upper left corner of object 64 | row_start - Row containing top left corner of object 65 | x1 - Distance to left side of object 66 | y1 - Distance to top of object 67 | width - Width of image frame 68 | height - Height of image frame 69 | 70 | """ 71 | # Adjust start column for offsets that are greater than the col width 72 | while x1 >= _size_col(sheet, col_start): 73 | x1 -= _size_col(sheet, col_start) 74 | col_start += 1 75 | # Adjust start row for offsets that are greater than the row height 76 | while y1 >= _size_row(sheet, row_start): 77 | y1 -= _size_row(sheet, row_start) 78 | row_start += 1 79 | # Initialise end cell to the same as the start cell 80 | row_end = row_start # Row containing bottom right corner of object 81 | col_end = col_start # Col containing lower right corner of object 82 | width = width + x1 - 1 83 | height = height + y1 - 1 84 | # Subtract the underlying cell widths to find the end cell of the image 85 | while (width >= _size_col(sheet, col_end)): 86 | width -= _size_col(sheet, col_end) 87 | col_end += 1 88 | # Subtract the underlying cell heights to find the end cell of the image 89 | while (height >= _size_row(sheet, row_end)): 90 | height -= _size_row(sheet, row_end) 91 | row_end += 1 92 | # Bitmap isn't allowed to start or finish in a hidden cell, i.e. a cell 93 | # with zero height or width. 94 | if ((_size_col(sheet, col_start) == 0) or (_size_col(sheet, col_end) == 0) 95 | or (_size_row(sheet, row_start) == 0) or (_size_row(sheet, row_end) == 0)): 96 | return 97 | # Convert the pixel values to the percentage value expected by Excel 98 | x1 = int(float(x1) / _size_col(sheet, col_start) * 1024) 99 | y1 = int(float(y1) / _size_row(sheet, row_start) * 256) 100 | # Distance to right side of object 101 | x2 = int(float(width) / _size_col(sheet, col_end) * 1024) 102 | # Distance to bottom of object 103 | y2 = int(float(height) / _size_row(sheet, row_end) * 256) 104 | return (col_start, x1, row_start, y1, col_end, x2, row_end, y2) 105 | 106 | 107 | class ObjBmpRecord(BiffRecord): 108 | _REC_ID = 0x005D # Record identifier 109 | 110 | def __init__(self, row, col, sheet, im_data_bmp, x, y, scale_x, scale_y): 111 | # Scale the frame of the image. 112 | width = im_data_bmp.width * scale_x 113 | height = im_data_bmp.height * scale_y 114 | 115 | # Calculate the vertices of the image and write the OBJ record 116 | coordinates = _position_image(sheet, row, col, x, y, width, height) 117 | # print coordinates 118 | col_start, x1, row_start, y1, col_end, x2, row_end, y2 = coordinates 119 | 120 | """Store the OBJ record that precedes an IMDATA record. This could be generalise 121 | to support other Excel objects. 122 | 123 | """ 124 | cObj = 0x0001 # Count of objects in file (set to 1) 125 | OT = 0x0008 # Object type. 8 = Picture 126 | id = 0x0001 # Object ID 127 | grbit = 0x0614 # Option flags 128 | colL = col_start # Col containing upper left corner of object 129 | dxL = x1 # Distance from left side of cell 130 | rwT = row_start # Row containing top left corner of object 131 | dyT = y1 # Distance from top of cell 132 | colR = col_end # Col containing lower right corner of object 133 | dxR = x2 # Distance from right of cell 134 | rwB = row_end # Row containing bottom right corner of object 135 | dyB = y2 # Distance from bottom of cell 136 | cbMacro = 0x0000 # Length of FMLA structure 137 | Reserved1 = 0x0000 # Reserved 138 | Reserved2 = 0x0000 # Reserved 139 | icvBack = 0x09 # Background colour 140 | icvFore = 0x09 # Foreground colour 141 | fls = 0x00 # Fill pattern 142 | fAuto = 0x00 # Automatic fill 143 | icv = 0x08 # Line colour 144 | lns = 0xff # Line style 145 | lnw = 0x01 # Line weight 146 | fAutoB = 0x00 # Automatic border 147 | frs = 0x0000 # Frame style 148 | cf = 0x0009 # Image format, 9 = bitmap 149 | Reserved3 = 0x0000 # Reserved 150 | cbPictFmla = 0x0000 # Length of FMLA structure 151 | Reserved4 = 0x0000 # Reserved 152 | grbit2 = 0x0001 # Option flags 153 | Reserved5 = 0x0000 # Reserved 154 | 155 | data = pack(" 0xFFFF): 222 | raise Exception("bitmap: largest image width supported is 65k.") 223 | if (height > 0xFFFF): 224 | raise Exception("bitmap: largest image height supported is 65k.") 225 | # Read and remove the bitmap planes and bpp data. Verify them. 226 | planes, bitcount = unpack(" USHRT_MAX: 39 | raise ValueError( 40 | 'width must be <= USHRT_MAX: %d > %d' % (width, USHRT_MAX)) 41 | self._width = width 42 | 43 | def get_width(self): 44 | return self._width 45 | 46 | width = property(get_width, set_width) 47 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/CompoundDoc.py: -------------------------------------------------------------------------------- 1 | # -*- coding: windows-1252 -*- 2 | 3 | import sys 4 | import struct 5 | 6 | class Reader: 7 | def __init__(self, filename, dump = False): 8 | self.dump = dump 9 | self.STREAMS = {} 10 | 11 | doc = file(filename, 'rb').read() 12 | self.header, self.data = doc[0:512], doc[512:] 13 | del doc 14 | 15 | self.__build_header() 16 | self.__build_MSAT() 17 | self.__build_SAT() 18 | self.__build_directory() 19 | self.__build_short_sectors_data() 20 | 21 | if len(self.short_sectors_data) > 0: 22 | self.__build_SSAT() 23 | else: 24 | if self.dump and (self.total_ssat_sectors != 0 or self.ssat_start_sid != -2): 25 | print 'NOTE: header says that must be', self.total_ssat_sectors, 'short sectors' 26 | print 'NOTE: starting at', self.ssat_start_sid, 'sector' 27 | print 'NOTE: but file does not contains data in short sectors' 28 | self.ssat_start_sid = -2 29 | self.total_ssat_sectors = 0 30 | self.SSAT = [-2] 31 | 32 | for dentry in self.dir_entry_list[1:]: 33 | (did, 34 | sz, name, 35 | t, c, 36 | did_left, did_right, did_root, 37 | dentry_start_sid, 38 | stream_size 39 | ) = dentry 40 | stream_data = '' 41 | if stream_size > 0: 42 | if stream_size >= self.min_stream_size: 43 | args = (self.data, self.SAT, dentry_start_sid, self.sect_size) 44 | else: 45 | args = (self.short_sectors_data, self.SSAT, dentry_start_sid, self.short_sect_size) 46 | stream_data = self.get_stream_data(*args) 47 | 48 | if name != '': 49 | # BAD IDEA: names may be equal. NEED use full paths... 50 | self.STREAMS[name] = stream_data 51 | 52 | 53 | def __build_header(self): 54 | self.doc_magic = self.header[0:8] 55 | 56 | if self.doc_magic != '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1': 57 | raise Exception, 'Not an OLE file.' 58 | 59 | self.file_uid = self.header[8:24] 60 | self.rev_num = self.header[24:26] 61 | self.ver_num = self.header[26:28] 62 | self.byte_order = self.header[28:30] 63 | self.log2_sect_size, = struct.unpack(' 0: 109 | msat_sector = struct.unpack('<128l', self.data[next*self.sect_size:(next+1)*self.sect_size]) 110 | self.MSAT.extend(msat_sector[:127]) 111 | next = msat_sector[-1] 112 | 113 | if self.dump: 114 | print 'MSAT (header part): \n', self.MSAT[:109] 115 | print 'additional MSAT sectors: \n', self.MSAT[109:] 116 | 117 | 118 | def __build_SAT(self): 119 | sat_stream = ''.join([self.data[i*self.sect_size:(i+1)*self.sect_size] for i in self.MSAT if i >= 0]) 120 | 121 | sat_sids_count = len(sat_stream) >> 2 122 | self.SAT = struct.unpack('<%dl' % sat_sids_count, sat_stream) # SIDs tuple 123 | 124 | if self.dump: 125 | print 'SAT sid count:\n', sat_sids_count 126 | print 'SAT content:\n', self.SAT 127 | 128 | 129 | def __build_SSAT(self): 130 | ssat_stream = self.get_stream_data(self.data, self.SAT, self.ssat_start_sid, self.sect_size) 131 | 132 | ssids_count = len(ssat_stream) >> 2 133 | self.SSAT = struct.unpack('<%dl' % ssids_count, ssat_stream) 134 | 135 | if self.dump: 136 | print 'SSID count:', ssids_count 137 | print 'SSAT content:\n', self.SSAT 138 | 139 | 140 | def __build_directory(self): 141 | dir_stream = self.get_stream_data(self.data, self.SAT, self.dir_start_sid, self.sect_size) 142 | 143 | self.dir_entry_list = [] 144 | 145 | i = 0 146 | while i < len(dir_stream): 147 | dentry = dir_stream[i:i+128] # 128 -- dir entry size 148 | i += 128 149 | 150 | did = len(self.dir_entry_list) 151 | sz, = struct.unpack(' 0 : 153 | name = dentry[0:sz-2].decode('utf_16_le', 'replace') 154 | else: 155 | name = u'' 156 | t, = struct.unpack('B', dentry[66]) 157 | c, = struct.unpack('B', dentry[67]) 158 | did_left , = struct.unpack('= self.min_stream_size: 200 | print 'stream stored as normal stream' 201 | else: 202 | print 'stream stored as short-stream' 203 | 204 | 205 | def __build_short_sectors_data(self): 206 | (did, sz, name, t, c, 207 | did_left, did_right, did_root, 208 | dentry_start_sid, stream_size) = self.dir_entry_list[0] 209 | assert t == 0x05 # Short-Stream Container Stream (SSCS) resides in Root Storage 210 | if stream_size == 0: 211 | self.short_sectors_data = '' 212 | else: 213 | self.short_sectors_data = self.get_stream_data(self.data, self.SAT, dentry_start_sid, self.sect_size) 214 | 215 | 216 | def get_stream_data(self, data, SAT, start_sid, sect_size): 217 | sid = start_sid 218 | chunks = [(sid, sid)] 219 | stream_data = '' 220 | 221 | while SAT[sid] >= 0: 222 | next_in_chain = SAT[sid] 223 | last_chunk_start, last_chunk_finish = chunks[-1] 224 | if next_in_chain == last_chunk_finish + 1: 225 | chunks[-1] = last_chunk_start, next_in_chain 226 | else: 227 | chunks.extend([(next_in_chain, next_in_chain)]) 228 | sid = next_in_chain 229 | for s, f in chunks: 230 | stream_data += data[s*sect_size:(f+1)*sect_size] 231 | #print chunks 232 | return stream_data 233 | 234 | 235 | def print_bin_data(data): 236 | i = 0 237 | while i < len(data): 238 | j = 0 239 | while (i < len(data)) and (j < 16): 240 | c = '0x%02X' % ord(data[i]) 241 | sys.stdout.write(c) 242 | sys.stdout.write(' ') 243 | i += 1 244 | j += 1 245 | print 246 | if i == 0: 247 | print '' 248 | 249 | 250 | 251 | # This implementation writes only 'Root Entry', 'Workbook' streams 252 | # and 2 empty streams for aligning directory stream on sector boundary 253 | # 254 | # LAYOUT: 255 | # 0 header 256 | # 76 MSAT (1st part: 109 SID) 257 | # 512 workbook stream 258 | # ... additional MSAT sectors if streams' size > about 7 Mb == (109*512 * 128) 259 | # ... SAT 260 | # ... directory stream 261 | # 262 | # NOTE: this layout is "ad hoc". It can be more general. RTFM 263 | 264 | class XlsDoc: 265 | SECTOR_SIZE = 0x0200 266 | MIN_LIMIT = 0x1000 267 | 268 | SID_FREE_SECTOR = -1 269 | SID_END_OF_CHAIN = -2 270 | SID_USED_BY_SAT = -3 271 | SID_USED_BY_MSAT = -4 272 | 273 | def __init__(self): 274 | #self.book_stream = '' # padded 275 | self.book_stream_sect = [] 276 | 277 | self.dir_stream = '' 278 | self.dir_stream_sect = [] 279 | 280 | self.packed_SAT = '' 281 | self.SAT_sect = [] 282 | 283 | self.packed_MSAT_1st = '' 284 | self.packed_MSAT_2nd = '' 285 | self.MSAT_sect_2nd = [] 286 | 287 | self.header = '' 288 | 289 | def __build_directory(self): # align on sector boundary 290 | self.dir_stream = '' 291 | 292 | dentry_name = '\x00'.join('Root Entry\x00') + '\x00' 293 | dentry_name_sz = len(dentry_name) 294 | dentry_name_pad = '\x00'*(64 - dentry_name_sz) 295 | dentry_type = 0x05 # root storage 296 | dentry_colour = 0x01 # black 297 | dentry_did_left = -1 298 | dentry_did_right = -1 299 | dentry_did_root = 1 300 | dentry_start_sid = -2 301 | dentry_stream_sz = 0 302 | 303 | self.dir_stream += struct.pack('<64s H 2B 3l 9L l L L', 304 | dentry_name + dentry_name_pad, 305 | dentry_name_sz, 306 | dentry_type, 307 | dentry_colour, 308 | dentry_did_left, 309 | dentry_did_right, 310 | dentry_did_root, 311 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 312 | dentry_start_sid, 313 | dentry_stream_sz, 314 | 0 315 | ) 316 | 317 | dentry_name = '\x00'.join('Workbook\x00') + '\x00' 318 | dentry_name_sz = len(dentry_name) 319 | dentry_name_pad = '\x00'*(64 - dentry_name_sz) 320 | dentry_type = 0x02 # user stream 321 | dentry_colour = 0x01 # black 322 | dentry_did_left = -1 323 | dentry_did_right = -1 324 | dentry_did_root = -1 325 | dentry_start_sid = 0 326 | dentry_stream_sz = self.book_stream_len 327 | 328 | self.dir_stream += struct.pack('<64s H 2B 3l 9L l L L', 329 | dentry_name + dentry_name_pad, 330 | dentry_name_sz, 331 | dentry_type, 332 | dentry_colour, 333 | dentry_did_left, 334 | dentry_did_right, 335 | dentry_did_root, 336 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 337 | dentry_start_sid, 338 | dentry_stream_sz, 339 | 0 340 | ) 341 | 342 | # padding 343 | dentry_name = '' 344 | dentry_name_sz = len(dentry_name) 345 | dentry_name_pad = '\x00'*(64 - dentry_name_sz) 346 | dentry_type = 0x00 # empty 347 | dentry_colour = 0x01 # black 348 | dentry_did_left = -1 349 | dentry_did_right = -1 350 | dentry_did_root = -1 351 | dentry_start_sid = -2 352 | dentry_stream_sz = 0 353 | 354 | self.dir_stream += struct.pack('<64s H 2B 3l 9L l L L', 355 | dentry_name + dentry_name_pad, 356 | dentry_name_sz, 357 | dentry_type, 358 | dentry_colour, 359 | dentry_did_left, 360 | dentry_did_right, 361 | dentry_did_root, 362 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 363 | dentry_start_sid, 364 | dentry_stream_sz, 365 | 0 366 | ) * 2 367 | 368 | def __build_sat(self): 369 | # Build SAT 370 | book_sect_count = self.book_stream_len >> 9 371 | dir_sect_count = len(self.dir_stream) >> 9 372 | 373 | total_sect_count = book_sect_count + dir_sect_count 374 | SAT_sect_count = 0 375 | MSAT_sect_count = 0 376 | SAT_sect_count_limit = 109 377 | while total_sect_count > 128*SAT_sect_count or SAT_sect_count > SAT_sect_count_limit: 378 | SAT_sect_count += 1 379 | total_sect_count += 1 380 | if SAT_sect_count > SAT_sect_count_limit: 381 | MSAT_sect_count += 1 382 | total_sect_count += 1 383 | SAT_sect_count_limit += 127 384 | 385 | 386 | SAT = [self.SID_FREE_SECTOR]*128*SAT_sect_count 387 | 388 | sect = 0 389 | while sect < book_sect_count - 1: 390 | self.book_stream_sect.append(sect) 391 | SAT[sect] = sect + 1 392 | sect += 1 393 | self.book_stream_sect.append(sect) 394 | SAT[sect] = self.SID_END_OF_CHAIN 395 | sect += 1 396 | 397 | while sect < book_sect_count + MSAT_sect_count: 398 | self.MSAT_sect_2nd.append(sect) 399 | SAT[sect] = self.SID_USED_BY_MSAT 400 | sect += 1 401 | 402 | while sect < book_sect_count + MSAT_sect_count + SAT_sect_count: 403 | self.SAT_sect.append(sect) 404 | SAT[sect] = self.SID_USED_BY_SAT 405 | sect += 1 406 | 407 | while sect < book_sect_count + MSAT_sect_count + SAT_sect_count + dir_sect_count - 1: 408 | self.dir_stream_sect.append(sect) 409 | SAT[sect] = sect + 1 410 | sect += 1 411 | self.dir_stream_sect.append(sect) 412 | SAT[sect] = self.SID_END_OF_CHAIN 413 | sect += 1 414 | 415 | self.packed_SAT = struct.pack('<%dl' % (SAT_sect_count*128), *SAT) 416 | 417 | MSAT_1st = [self.SID_FREE_SECTOR]*109 418 | for i, SAT_sect_num in zip(range(0, 109), self.SAT_sect): 419 | MSAT_1st[i] = SAT_sect_num 420 | self.packed_MSAT_1st = struct.pack('<109l', *MSAT_1st) 421 | 422 | MSAT_2nd = [self.SID_FREE_SECTOR]*128*MSAT_sect_count 423 | if MSAT_sect_count > 0: 424 | MSAT_2nd[- 1] = self.SID_END_OF_CHAIN 425 | 426 | i = 109 427 | msat_sect = 0 428 | sid_num = 0 429 | while i < SAT_sect_count: 430 | if (sid_num + 1) % 128 == 0: 431 | #print 'link: ', 432 | msat_sect += 1 433 | if msat_sect < len(self.MSAT_sect_2nd): 434 | MSAT_2nd[sid_num] = self.MSAT_sect_2nd[msat_sect] 435 | else: 436 | #print 'sid: ', 437 | MSAT_2nd[sid_num] = self.SAT_sect[i] 438 | i += 1 439 | #print sid_num, MSAT_2nd[sid_num] 440 | sid_num += 1 441 | 442 | self.packed_MSAT_2nd = struct.pack('<%dl' % (MSAT_sect_count*128), *MSAT_2nd) 443 | 444 | #print vars() 445 | #print zip(range(0, sect), SAT) 446 | #print self.book_stream_sect 447 | #print self.MSAT_sect_2nd 448 | #print MSAT_2nd 449 | #print self.SAT_sect 450 | #print self.dir_stream_sect 451 | 452 | 453 | def __build_header(self): 454 | doc_magic = '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' 455 | file_uid = '\x00'*16 456 | rev_num = '\x3E\x00' 457 | ver_num = '\x03\x00' 458 | byte_order = '\xFE\xFF' 459 | log_sect_size = struct.pack('" 31 | ge_pattern = r">=" 32 | le_pattern = r"<=" 33 | 34 | pattern_type_tuples = ( 35 | (flt_const_pattern, ExcelFormulaParser.NUM_CONST), 36 | (int_const_pattern, ExcelFormulaParser.INT_CONST), 37 | (str_const_pattern, ExcelFormulaParser.STR_CONST), 38 | # (range2d_pattern , ExcelFormulaParser.RANGE2D), 39 | (ref2d_r1c1_pattern, ExcelFormulaParser.REF2D_R1C1), 40 | (ref2d_pattern , ExcelFormulaParser.REF2D), 41 | (true_pattern , ExcelFormulaParser.TRUE_CONST), 42 | (false_pattern , ExcelFormulaParser.FALSE_CONST), 43 | (if_pattern , ExcelFormulaParser.FUNC_IF), 44 | (choose_pattern , ExcelFormulaParser.FUNC_CHOOSE), 45 | (name_pattern , ExcelFormulaParser.NAME), 46 | (quotename_pattern, ExcelFormulaParser.QUOTENAME), 47 | (ne_pattern, ExcelFormulaParser.NE), 48 | (ge_pattern, ExcelFormulaParser.GE), 49 | (le_pattern, ExcelFormulaParser.LE), 50 | ) 51 | 52 | _re = recompile( 53 | '(' + ')|('.join([i[0] for i in pattern_type_tuples]) + ')', 54 | VERBOSE+LOCALE+IGNORECASE) 55 | 56 | _toktype = [None] + [i[1] for i in pattern_type_tuples] 57 | # need dummy at start because re.MatchObject.lastindex counts from 1 58 | 59 | single_char_lookup = { 60 | '=': ExcelFormulaParser.EQ, 61 | '<': ExcelFormulaParser.LT, 62 | '>': ExcelFormulaParser.GT, 63 | '+': ExcelFormulaParser.ADD, 64 | '-': ExcelFormulaParser.SUB, 65 | '*': ExcelFormulaParser.MUL, 66 | '/': ExcelFormulaParser.DIV, 67 | ':': ExcelFormulaParser.COLON, 68 | ';': ExcelFormulaParser.SEMICOLON, 69 | ',': ExcelFormulaParser.COMMA, 70 | '(': ExcelFormulaParser.LP, 71 | ')': ExcelFormulaParser.RP, 72 | '&': ExcelFormulaParser.CONCAT, 73 | '%': ExcelFormulaParser.PERCENT, 74 | '^': ExcelFormulaParser.POWER, 75 | '!': ExcelFormulaParser.BANG, 76 | } 77 | 78 | class Lexer(TokenStream): 79 | def __init__(self, text): 80 | self._text = text[:] 81 | self._pos = 0 82 | self._line = 0 83 | 84 | def isEOF(self): 85 | return len(self._text) <= self._pos 86 | 87 | def curr_ch(self): 88 | return self._text[self._pos] 89 | 90 | def next_ch(self, n = 1): 91 | self._pos += n 92 | 93 | def is_whitespace(self): 94 | return self.curr_ch() in " \t\n\r\f\v" 95 | 96 | def match_pattern(self): 97 | m = _re.match(self._text, self._pos) 98 | if not m: 99 | return None 100 | self._pos = m.end(0) 101 | return Tok(type = _toktype[m.lastindex], text = m.group(0), col = m.start(0) + 1) 102 | 103 | def nextToken(self): 104 | # skip whitespace 105 | while not self.isEOF() and self.is_whitespace(): 106 | self.next_ch() 107 | if self.isEOF(): 108 | return Tok(type = EOF) 109 | # first, try to match token with 2 or more chars 110 | t = self.match_pattern() 111 | if t: 112 | return t 113 | # second, we want 1-char tokens 114 | te = self.curr_ch() 115 | try: 116 | ty = single_char_lookup[te] 117 | except KeyError: 118 | raise TokenStreamException( 119 | "Unexpected char %r in column %u." % (self.curr_ch(), self._pos)) 120 | self.next_ch() 121 | return Tok(type=ty, text=te, col=self._pos) 122 | 123 | if __name__ == '__main__': 124 | try: 125 | for t in Lexer(""" 1.23 456 "abcd" R2C2 a1 iv65536 true false if choose a_name 'qname' <> >= <= """): 126 | print t 127 | except TokenStreamException, e: 128 | print "error:", e 129 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/Formatting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | The XF record is able to store explicit cell formatting attributes or the 4 | attributes of a cell style. Explicit formatting includes the reference to 5 | a cell style XF record. This allows to extend a defined cell style with 6 | some explicit attributes. The formatting attributes are divided into 7 | 6 groups: 8 | 9 | Group Attributes 10 | ------------------------------------- 11 | Number format Number format index (index to FORMAT record) 12 | Font Font index (index to FONT record) 13 | Alignment Horizontal and vertical alignment, text wrap, indentation, 14 | orientation/rotation, text direction 15 | Border Border line styles and colours 16 | Background Background area style and colours 17 | Protection Cell locked, formula hidden 18 | 19 | For each group a flag in the cell XF record specifies whether to use the 20 | attributes contained in that XF record or in the referenced style 21 | XF record. In style XF records, these flags specify whether the attributes 22 | will overwrite explicit cell formatting when the style is applied to 23 | a cell. Changing a cell style (without applying this style to a cell) will 24 | change all cells which already use that style and do not contain explicit 25 | cell attributes for the changed style attributes. If a cell XF record does 26 | not contain explicit attributes in a group (if the attribute group flag 27 | is not set), it repeats the attributes of its style XF record. 28 | 29 | ''' 30 | 31 | import BIFFRecords 32 | 33 | class Font(object): 34 | 35 | ESCAPEMENT_NONE = 0x00 36 | ESCAPEMENT_SUPERSCRIPT = 0x01 37 | ESCAPEMENT_SUBSCRIPT = 0x02 38 | 39 | UNDERLINE_NONE = 0x00 40 | UNDERLINE_SINGLE = 0x01 41 | UNDERLINE_SINGLE_ACC = 0x21 42 | UNDERLINE_DOUBLE = 0x02 43 | UNDERLINE_DOUBLE_ACC = 0x22 44 | 45 | FAMILY_NONE = 0x00 46 | FAMILY_ROMAN = 0x01 47 | FAMILY_SWISS = 0x02 48 | FAMILY_MODERN = 0x03 49 | FAMILY_SCRIPT = 0x04 50 | FAMILY_DECORATIVE = 0x05 51 | 52 | CHARSET_ANSI_LATIN = 0x00 53 | CHARSET_SYS_DEFAULT = 0x01 54 | CHARSET_SYMBOL = 0x02 55 | CHARSET_APPLE_ROMAN = 0x4D 56 | CHARSET_ANSI_JAP_SHIFT_JIS = 0x80 57 | CHARSET_ANSI_KOR_HANGUL = 0x81 58 | CHARSET_ANSI_KOR_JOHAB = 0x82 59 | CHARSET_ANSI_CHINESE_GBK = 0x86 60 | CHARSET_ANSI_CHINESE_BIG5 = 0x88 61 | CHARSET_ANSI_GREEK = 0xA1 62 | CHARSET_ANSI_TURKISH = 0xA2 63 | CHARSET_ANSI_VIETNAMESE = 0xA3 64 | CHARSET_ANSI_HEBREW = 0xB1 65 | CHARSET_ANSI_ARABIC = 0xB2 66 | CHARSET_ANSI_BALTIC = 0xBA 67 | CHARSET_ANSI_CYRILLIC = 0xCC 68 | CHARSET_ANSI_THAI = 0xDE 69 | CHARSET_ANSI_LATIN_II = 0xEE 70 | CHARSET_OEM_LATIN_I = 0xFF 71 | 72 | def __init__(self): 73 | # twip = 1/20 of a point = 1/1440 of a inch 74 | # usually resolution == 96 pixels per 1 inch 75 | # (rarely 120 pixels per 1 inch or another one) 76 | 77 | self.height = 0x00C8 # 200: this is font with height 10 points 78 | self.italic = False 79 | self.struck_out = False 80 | self.outline = False 81 | self.shadow = False 82 | self.colour_index = 0x7FFF 83 | self.bold = False 84 | self._weight = 0x0190 # 0x02BC gives bold font 85 | self.escapement = self.ESCAPEMENT_NONE 86 | self.underline = self.UNDERLINE_NONE 87 | self.family = self.FAMILY_NONE 88 | self.charset = self.CHARSET_SYS_DEFAULT 89 | self.name = 'Arial' 90 | 91 | def get_biff_record(self): 92 | height = self.height 93 | 94 | options = 0x00 95 | if self.bold: 96 | options |= 0x01 97 | self._weight = 0x02BC 98 | if self.italic: 99 | options |= 0x02 100 | if self.underline != self.UNDERLINE_NONE: 101 | options |= 0x04 102 | if self.struck_out: 103 | options |= 0x08 104 | if self.outline: 105 | options |= 0x010 106 | if self.shadow: 107 | options |= 0x020 108 | 109 | colour_index = self.colour_index 110 | weight = self._weight 111 | escapement = self.escapement 112 | underline = self.underline 113 | family = self.family 114 | charset = self.charset 115 | name = self.name 116 | 117 | return BIFFRecords.FontRecord(height, options, colour_index, weight, escapement, 118 | underline, family, charset, 119 | name) 120 | 121 | def _search_key(self): 122 | return ( 123 | self.height, 124 | self.italic, 125 | self.struck_out, 126 | self.outline, 127 | self.shadow, 128 | self.colour_index, 129 | self.bold, 130 | self._weight, 131 | self.escapement, 132 | self.underline, 133 | self.family, 134 | self.charset, 135 | self.name, 136 | ) 137 | 138 | class Alignment(object): 139 | HORZ_GENERAL = 0x00 140 | HORZ_LEFT = 0x01 141 | HORZ_CENTER = 0x02 142 | HORZ_RIGHT = 0x03 143 | HORZ_FILLED = 0x04 144 | HORZ_JUSTIFIED = 0x05 # BIFF4-BIFF8X 145 | HORZ_CENTER_ACROSS_SEL = 0x06 # Centred across selection (BIFF4-BIFF8X) 146 | HORZ_DISTRIBUTED = 0x07 # Distributed (BIFF8X) 147 | 148 | VERT_TOP = 0x00 149 | VERT_CENTER = 0x01 150 | VERT_BOTTOM = 0x02 151 | VERT_JUSTIFIED = 0x03 # Justified (BIFF5-BIFF8X) 152 | VERT_DISTRIBUTED = 0x04 # Distributed (BIFF8X) 153 | 154 | DIRECTION_GENERAL = 0x00 # BIFF8X 155 | DIRECTION_LR = 0x01 156 | DIRECTION_RL = 0x02 157 | 158 | ORIENTATION_NOT_ROTATED = 0x00 159 | ORIENTATION_STACKED = 0x01 160 | ORIENTATION_90_CC = 0x02 161 | ORIENTATION_90_CW = 0x03 162 | 163 | ROTATION_0_ANGLE = 0x00 164 | ROTATION_STACKED = 0xFF 165 | 166 | WRAP_AT_RIGHT = 0x01 167 | NOT_WRAP_AT_RIGHT = 0x00 168 | 169 | SHRINK_TO_FIT = 0x01 170 | NOT_SHRINK_TO_FIT = 0x00 171 | 172 | def __init__(self): 173 | self.horz = self.HORZ_GENERAL 174 | self.vert = self.VERT_BOTTOM 175 | self.dire = self.DIRECTION_GENERAL 176 | self.orie = self.ORIENTATION_NOT_ROTATED 177 | self.rota = self.ROTATION_0_ANGLE 178 | self.wrap = self.NOT_WRAP_AT_RIGHT 179 | self.shri = self.NOT_SHRINK_TO_FIT 180 | self.inde = 0 181 | self.merg = 0 182 | 183 | def _search_key(self): 184 | return ( 185 | self.horz, self.vert, self.dire, self.orie, self.rota, 186 | self.wrap, self.shri, self.inde, self.merg, 187 | ) 188 | 189 | class Borders(object): 190 | NO_LINE = 0x00 191 | THIN = 0x01 192 | MEDIUM = 0x02 193 | DASHED = 0x03 194 | DOTTED = 0x04 195 | THICK = 0x05 196 | DOUBLE = 0x06 197 | HAIR = 0x07 198 | #The following for BIFF8 199 | MEDIUM_DASHED = 0x08 200 | THIN_DASH_DOTTED = 0x09 201 | MEDIUM_DASH_DOTTED = 0x0A 202 | THIN_DASH_DOT_DOTTED = 0x0B 203 | MEDIUM_DASH_DOT_DOTTED = 0x0C 204 | SLANTED_MEDIUM_DASH_DOTTED = 0x0D 205 | 206 | NEED_DIAG1 = 0x01 207 | NEED_DIAG2 = 0x01 208 | NO_NEED_DIAG1 = 0x00 209 | NO_NEED_DIAG2 = 0x00 210 | 211 | def __init__(self): 212 | self.left = self.NO_LINE 213 | self.right = self.NO_LINE 214 | self.top = self.NO_LINE 215 | self.bottom = self.NO_LINE 216 | self.diag = self.NO_LINE 217 | 218 | self.left_colour = 0x40 219 | self.right_colour = 0x40 220 | self.top_colour = 0x40 221 | self.bottom_colour = 0x40 222 | self.diag_colour = 0x40 223 | 224 | self.need_diag1 = self.NO_NEED_DIAG1 225 | self.need_diag2 = self.NO_NEED_DIAG2 226 | 227 | def _search_key(self): 228 | return ( 229 | self.left, self.right, self.top, self.bottom, self.diag, 230 | self.left_colour, self.right_colour, self.top_colour, 231 | self.bottom_colour, self.diag_colour, 232 | self.need_diag1, self.need_diag2, 233 | ) 234 | 235 | class Pattern(object): 236 | # patterns 0x00 - 0x12 237 | NO_PATTERN = 0x00 238 | SOLID_PATTERN = 0x01 239 | 240 | def __init__(self): 241 | self.pattern = self.NO_PATTERN 242 | self.pattern_fore_colour = 0x40 243 | self.pattern_back_colour = 0x41 244 | 245 | def _search_key(self): 246 | return ( 247 | self.pattern, 248 | self.pattern_fore_colour, 249 | self.pattern_back_colour, 250 | ) 251 | 252 | class Protection(object): 253 | def __init__(self): 254 | self.cell_locked = 1 255 | self.formula_hidden = 0 256 | 257 | def _search_key(self): 258 | return ( 259 | self.cell_locked, 260 | self.formula_hidden, 261 | ) 262 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/Row.py: -------------------------------------------------------------------------------- 1 | # -*- coding: windows-1252 -*- 2 | 3 | import BIFFRecords 4 | import Style 5 | from Cell import StrCell, BlankCell, NumberCell, FormulaCell, MulBlankCell, BooleanCell, ErrorCell, \ 6 | _get_cells_biff_data_mul 7 | import ExcelFormula 8 | import datetime as dt 9 | try: 10 | from decimal import Decimal 11 | except ImportError: 12 | # Python 2.3: decimal not supported; create dummy Decimal class 13 | class Decimal(object): 14 | pass 15 | 16 | 17 | class Row(object): 18 | __slots__ = [# private variables 19 | "__idx", 20 | "__parent", 21 | "__parent_wb", 22 | "__cells", 23 | "__min_col_idx", 24 | "__max_col_idx", 25 | "__xf_index", 26 | "__has_default_xf_index", 27 | "__height_in_pixels", 28 | # public variables 29 | "height", 30 | "has_default_height", 31 | "height_mismatch", 32 | "level", 33 | "collapse", 34 | "hidden", 35 | "space_above", 36 | "space_below"] 37 | 38 | def __init__(self, rowx, parent_sheet): 39 | if not (isinstance(rowx, int) and 0 <= rowx <= 65535): 40 | raise ValueError("row index (%r) not an int in range(65536)" % rowx) 41 | self.__idx = rowx 42 | self.__parent = parent_sheet 43 | self.__parent_wb = parent_sheet.get_parent() 44 | self.__cells = {} 45 | self.__min_col_idx = 0 46 | self.__max_col_idx = 0 47 | self.__xf_index = 0x0F 48 | self.__has_default_xf_index = 0 49 | self.__height_in_pixels = 0x11 50 | 51 | self.height = 0x00FF 52 | self.has_default_height = 0x00 53 | self.height_mismatch = 0 54 | self.level = 0 55 | self.collapse = 0 56 | self.hidden = 0 57 | self.space_above = 0 58 | self.space_below = 0 59 | 60 | 61 | def __adjust_height(self, style): 62 | twips = style.font.height 63 | points = float(twips)/20.0 64 | # Cell height in pixels can be calcuted by following approx. formula: 65 | # cell height in pixels = font height in points * 83/50 + 2/5 66 | # It works when screen resolution is 96 dpi 67 | pix = int(round(points*83.0/50.0 + 2.0/5.0)) 68 | if pix > self.__height_in_pixels: 69 | self.__height_in_pixels = pix 70 | 71 | 72 | def __adjust_bound_col_idx(self, *args): 73 | for arg in args: 74 | iarg = int(arg) 75 | if not ((0 <= iarg <= 255) and arg == iarg): 76 | raise ValueError("column index (%r) not an int in range(256)" % arg) 77 | sheet = self.__parent 78 | if iarg < self.__min_col_idx: 79 | self.__min_col_idx = iarg 80 | if iarg > self.__max_col_idx: 81 | self.__max_col_idx = iarg 82 | if iarg < sheet.first_used_col: 83 | sheet.first_used_col = iarg 84 | if iarg > sheet.last_used_col: 85 | sheet.last_used_col = iarg 86 | 87 | def __excel_date_dt(self, date): 88 | if isinstance(date, dt.date) and (not isinstance(date, dt.datetime)): 89 | epoch = dt.date(1899, 12, 31) 90 | elif isinstance(date, dt.time): 91 | date = dt.datetime.combine(dt.datetime(1900, 1, 1), date) 92 | epoch = dt.datetime(1900, 1, 1, 0, 0, 0) 93 | else: 94 | epoch = dt.datetime(1899, 12, 31, 0, 0, 0) 95 | delta = date - epoch 96 | xldate = delta.days + float(delta.seconds) / (24*60*60) 97 | # Add a day for Excel's missing leap day in 1900 98 | if xldate > 59: 99 | xldate += 1 100 | return xldate 101 | 102 | def get_height_in_pixels(self): 103 | return self.__height_in_pixels 104 | 105 | 106 | def set_style(self, style): 107 | self.__adjust_height(style) 108 | self.__xf_index = self.__parent_wb.add_style(style) 109 | self.__has_default_xf_index = 1 110 | 111 | 112 | def get_xf_index(self): 113 | return self.__xf_index 114 | 115 | 116 | def get_cells_count(self): 117 | return len(self.__cells) 118 | 119 | 120 | def get_min_col(self): 121 | return self.__min_col_idx 122 | 123 | 124 | def get_max_col(self): 125 | return self.__max_col_idx 126 | 127 | 128 | def get_row_biff_data(self): 129 | height_options = (self.height & 0x07FFF) 130 | height_options |= (self.has_default_height & 0x01) << 15 131 | 132 | options = (self.level & 0x07) << 0 133 | options |= (self.collapse & 0x01) << 4 134 | options |= (self.hidden & 0x01) << 5 135 | options |= (self.height_mismatch & 0x01) << 6 136 | options |= (self.__has_default_xf_index & 0x01) << 7 137 | options |= (0x01 & 0x01) << 8 138 | options |= (self.__xf_index & 0x0FFF) << 16 139 | options |= (self.space_above & 1) << 28 140 | options |= (self.space_below & 1) << 29 141 | 142 | return BIFFRecords.RowRecord(self.__idx, self.__min_col_idx, 143 | self.__max_col_idx, height_options, options).get() 144 | 145 | def insert_cell(self, col_index, cell_obj): 146 | if col_index in self.__cells: 147 | if not self.__parent._cell_overwrite_ok: 148 | msg = "Attempt to overwrite cell: sheetname=%r rowx=%d colx=%d" \ 149 | % (self.__parent.name, self.__idx, col_index) 150 | raise Exception(msg) 151 | prev_cell_obj = self.__cells[col_index] 152 | sst_idx = getattr(prev_cell_obj, 'sst_idx', None) 153 | if sst_idx is not None: 154 | self.__parent_wb.del_str(sst_idx) 155 | self.__cells[col_index] = cell_obj 156 | 157 | def insert_mulcells(self, colx1, colx2, cell_obj): 158 | self.insert_cell(colx1, cell_obj) 159 | for col_index in xrange(colx1+1, colx2+1): 160 | self.insert_cell(col_index, None) 161 | 162 | def get_cells_biff_data(self): 163 | cell_items = [item for item in self.__cells.iteritems() if item[1] is not None] 164 | cell_items.sort() # in column order 165 | return _get_cells_biff_data_mul(self.__idx, cell_items) 166 | # previously: 167 | # return ''.join([cell.get_biff_data() for colx, cell in cell_items]) 168 | 169 | def get_index(self): 170 | return self.__idx 171 | 172 | def set_cell_text(self, colx, value, style=Style.default_style): 173 | self.__adjust_height(style) 174 | self.__adjust_bound_col_idx(colx) 175 | xf_index = self.__parent_wb.add_style(style) 176 | self.insert_cell(colx, StrCell(self.__idx, colx, xf_index, self.__parent_wb.add_str(value))) 177 | 178 | def set_cell_blank(self, colx, style=Style.default_style): 179 | self.__adjust_height(style) 180 | self.__adjust_bound_col_idx(colx) 181 | xf_index = self.__parent_wb.add_style(style) 182 | self.insert_cell(colx, BlankCell(self.__idx, colx, xf_index)) 183 | 184 | def set_cell_mulblanks(self, first_colx, last_colx, style=Style.default_style): 185 | assert 0 <= first_colx <= last_colx <= 255 186 | self.__adjust_height(style) 187 | self.__adjust_bound_col_idx(first_colx, last_colx) 188 | xf_index = self.__parent_wb.add_style(style) 189 | # ncols = last_colx - first_colx + 1 190 | self.insert_mulcells(first_colx, last_colx, MulBlankCell(self.__idx, first_colx, last_colx, xf_index)) 191 | 192 | def set_cell_number(self, colx, number, style=Style.default_style): 193 | self.__adjust_height(style) 194 | self.__adjust_bound_col_idx(colx) 195 | xf_index = self.__parent_wb.add_style(style) 196 | self.insert_cell(colx, NumberCell(self.__idx, colx, xf_index, number)) 197 | 198 | def set_cell_date(self, colx, datetime_obj, style=Style.default_style): 199 | self.__adjust_height(style) 200 | self.__adjust_bound_col_idx(colx) 201 | xf_index = self.__parent_wb.add_style(style) 202 | self.insert_cell(colx, 203 | NumberCell(self.__idx, colx, xf_index, self.__excel_date_dt(datetime_obj))) 204 | 205 | def set_cell_formula(self, colx, formula, style=Style.default_style, calc_flags=0): 206 | self.__adjust_height(style) 207 | self.__adjust_bound_col_idx(colx) 208 | xf_index = self.__parent_wb.add_style(style) 209 | self.__parent_wb.add_sheet_reference(formula) 210 | self.insert_cell(colx, FormulaCell(self.__idx, colx, xf_index, formula, calc_flags=0)) 211 | 212 | def set_cell_boolean(self, colx, value, style=Style.default_style): 213 | self.__adjust_height(style) 214 | self.__adjust_bound_col_idx(colx) 215 | xf_index = self.__parent_wb.add_style(style) 216 | self.insert_cell(colx, BooleanCell(self.__idx, colx, xf_index, bool(value))) 217 | 218 | def set_cell_error(self, colx, error_string_or_code, style=Style.default_style): 219 | self.__adjust_height(style) 220 | self.__adjust_bound_col_idx(colx) 221 | xf_index = self.__parent_wb.add_style(style) 222 | self.insert_cell(colx, ErrorCell(self.__idx, colx, xf_index, error_string_or_code)) 223 | 224 | def write(self, col, label, style=Style.default_style): 225 | self.__adjust_height(style) 226 | self.__adjust_bound_col_idx(col) 227 | style_index = self.__parent_wb.add_style(style) 228 | if isinstance(label, basestring): 229 | if len(label) > 0: 230 | self.insert_cell(col, 231 | StrCell(self.__idx, col, style_index, self.__parent_wb.add_str(label)) 232 | ) 233 | else: 234 | self.insert_cell(col, BlankCell(self.__idx, col, style_index)) 235 | elif isinstance(label, bool): # bool is subclass of int; test bool first 236 | self.insert_cell(col, BooleanCell(self.__idx, col, style_index, label)) 237 | elif isinstance(label, (float, int, long, Decimal)): 238 | self.insert_cell(col, NumberCell(self.__idx, col, style_index, label)) 239 | elif isinstance(label, (dt.datetime, dt.date, dt.time)): 240 | date_number = self.__excel_date_dt(label) 241 | self.insert_cell(col, NumberCell(self.__idx, col, style_index, date_number)) 242 | elif label is None: 243 | self.insert_cell(col, BlankCell(self.__idx, col, style_index)) 244 | elif isinstance(label, ExcelFormula.Formula): 245 | self.__parent_wb.add_sheet_reference(label) 246 | self.insert_cell(col, FormulaCell(self.__idx, col, style_index, label)) 247 | else: 248 | raise Exception("Unexpected data type %r" % type(label)) 249 | 250 | write_blanks = set_cell_mulblanks 251 | 252 | 253 | 254 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/Style.py: -------------------------------------------------------------------------------- 1 | # -*- coding: windows-1252 -*- 2 | 3 | import Formatting 4 | from BIFFRecords import * 5 | 6 | FIRST_USER_DEFINED_NUM_FORMAT_IDX = 164 7 | 8 | class XFStyle(object): 9 | 10 | def __init__(self): 11 | self.num_format_str = 'General' 12 | self.font = Formatting.Font() 13 | self.alignment = Formatting.Alignment() 14 | self.borders = Formatting.Borders() 15 | self.pattern = Formatting.Pattern() 16 | self.protection = Formatting.Protection() 17 | 18 | default_style = XFStyle() 19 | 20 | class StyleCollection(object): 21 | _std_num_fmt_list = [ 22 | 'general', 23 | '0', 24 | '0.00', 25 | '#,##0', 26 | '#,##0.00', 27 | '"$"#,##0_);("$"#,##', 28 | '"$"#,##0_);[Red]("$"#,##', 29 | '"$"#,##0.00_);("$"#,##', 30 | '"$"#,##0.00_);[Red]("$"#,##', 31 | '0%', 32 | '0.00%', 33 | '0.00E+00', 34 | '# ?/?', 35 | '# ??/??', 36 | 'M/D/YY', 37 | 'D-MMM-YY', 38 | 'D-MMM', 39 | 'MMM-YY', 40 | 'h:mm AM/PM', 41 | 'h:mm:ss AM/PM', 42 | 'h:mm', 43 | 'h:mm:ss', 44 | 'M/D/YY h:mm', 45 | '_(#,##0_);(#,##0)', 46 | '_(#,##0_);[Red](#,##0)', 47 | '_(#,##0.00_);(#,##0.00)', 48 | '_(#,##0.00_);[Red](#,##0.00)', 49 | '_("$"* #,##0_);_("$"* (#,##0);_("$"* "-"_);_(@_)', 50 | '_(* #,##0_);_(* (#,##0);_(* "-"_);_(@_)', 51 | '_("$"* #,##0.00_);_("$"* (#,##0.00);_("$"* "-"??_);_(@_)', 52 | '_(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(@_)', 53 | 'mm:ss', 54 | '[h]:mm:ss', 55 | 'mm:ss.0', 56 | '##0.0E+0', 57 | '@' 58 | ] 59 | 60 | def __init__(self, style_compression=0): 61 | self.style_compression = style_compression 62 | self.stats = [0, 0, 0, 0, 0, 0] 63 | self._font_id2x = {} 64 | self._font_x2id = {} 65 | self._font_val2x = {} 66 | 67 | for x in (0, 1, 2, 3, 5): # The font with index 4 is omitted in all BIFF versions 68 | font = Formatting.Font() 69 | search_key = font._search_key() 70 | self._font_id2x[font] = x 71 | self._font_x2id[x] = font 72 | self._font_val2x[search_key] = x 73 | 74 | self._xf_id2x = {} 75 | self._xf_x2id = {} 76 | self._xf_val2x = {} 77 | 78 | self._num_formats = {} 79 | for fmtidx, fmtstr in zip(range(0, 23), StyleCollection._std_num_fmt_list[0:23]): 80 | self._num_formats[fmtstr] = fmtidx 81 | for fmtidx, fmtstr in zip(range(37, 50), StyleCollection._std_num_fmt_list[23:]): 82 | self._num_formats[fmtstr] = fmtidx 83 | 84 | self.default_style = XFStyle() 85 | self._default_xf = self._add_style(self.default_style)[0] 86 | 87 | def add(self, style): 88 | if style == None: 89 | return 0x10 90 | return self._add_style(style)[1] 91 | 92 | def _add_style(self, style): 93 | num_format_str = style.num_format_str 94 | if num_format_str in self._num_formats: 95 | num_format_idx = self._num_formats[num_format_str] 96 | else: 97 | num_format_idx = ( 98 | FIRST_USER_DEFINED_NUM_FORMAT_IDX 99 | + len(self._num_formats) 100 | - len(StyleCollection._std_num_fmt_list) 101 | ) 102 | self._num_formats[num_format_str] = num_format_idx 103 | 104 | font = style.font 105 | if font in self._font_id2x: 106 | font_idx = self._font_id2x[font] 107 | self.stats[0] += 1 108 | elif self.style_compression: 109 | search_key = font._search_key() 110 | font_idx = self._font_val2x.get(search_key) 111 | if font_idx is not None: 112 | self._font_id2x[font] = font_idx 113 | self.stats[1] += 1 114 | else: 115 | font_idx = len(self._font_x2id) + 1 # Why plus 1? Font 4 is missing 116 | self._font_id2x[font] = font_idx 117 | self._font_val2x[search_key] = font_idx 118 | self._font_x2id[font_idx] = font 119 | self.stats[2] += 1 120 | else: 121 | font_idx = len(self._font_id2x) + 1 122 | self._font_id2x[font] = font_idx 123 | self.stats[2] += 1 124 | 125 | gof = (style.alignment, style.borders, style.pattern, style.protection) 126 | xf = (font_idx, num_format_idx) + gof 127 | if xf in self._xf_id2x: 128 | xf_index = self._xf_id2x[xf] 129 | self.stats[3] += 1 130 | elif self.style_compression == 2: 131 | xf_key = (font_idx, num_format_idx) + tuple([obj._search_key() for obj in gof]) 132 | xf_index = self._xf_val2x.get(xf_key) 133 | if xf_index is not None: 134 | self._xf_id2x[xf] = xf_index 135 | self.stats[4] += 1 136 | else: 137 | xf_index = 0x10 + len(self._xf_x2id) 138 | self._xf_id2x[xf] = xf_index 139 | self._xf_val2x[xf_key] = xf_index 140 | self._xf_x2id[xf_index] = xf 141 | self.stats[5] += 1 142 | else: 143 | xf_index = 0x10 + len(self._xf_id2x) 144 | self._xf_id2x[xf] = xf_index 145 | self.stats[5] += 1 146 | 147 | if xf_index >= 0xFFF: 148 | # 12 bits allowed, 0xFFF is a sentinel value 149 | raise ValueError("More than 4094 XFs (styles)") 150 | 151 | return xf, xf_index 152 | 153 | def get_biff_data(self): 154 | result = '' 155 | result += self._all_fonts() 156 | result += self._all_num_formats() 157 | result += self._all_cell_styles() 158 | result += self._all_styles() 159 | return result 160 | 161 | def _all_fonts(self): 162 | result = '' 163 | if self.style_compression: 164 | alist = self._font_x2id.items() 165 | else: 166 | alist = [(x, o) for o, x in self._font_id2x.items()] 167 | alist.sort() 168 | for font_idx, font in alist: 169 | result += font.get_biff_record().get() 170 | return result 171 | 172 | def _all_num_formats(self): 173 | result = '' 174 | alist = [ 175 | (v, k) 176 | for k, v in self._num_formats.items() 177 | if v >= FIRST_USER_DEFINED_NUM_FORMAT_IDX 178 | ] 179 | alist.sort() 180 | for fmtidx, fmtstr in alist: 181 | result += NumberFormatRecord(fmtidx, fmtstr).get() 182 | return result 183 | 184 | def _all_cell_styles(self): 185 | result = '' 186 | for i in range(0, 16): 187 | result += XFRecord(self._default_xf, 'style').get() 188 | if self.style_compression == 2: 189 | alist = self._xf_x2id.items() 190 | else: 191 | alist = [(x, o) for o, x in self._xf_id2x.items()] 192 | alist.sort() 193 | for xf_idx, xf in alist: 194 | result += XFRecord(xf).get() 195 | return result 196 | 197 | def _all_styles(self): 198 | return StyleRecord().get() 199 | 200 | # easyxf and its supporting objects ################################### 201 | 202 | class EasyXFException(Exception): 203 | pass 204 | 205 | class EasyXFCallerError(EasyXFException): 206 | pass 207 | 208 | class EasyXFAuthorError(EasyXFException): 209 | pass 210 | 211 | class IntULim(object): 212 | # If astring represents a valid unsigned integer ('123', '0xabcd', etc) 213 | # and it is <= limit, return the int value; otherwise return None. 214 | 215 | def __init__(self, limit): 216 | self.limit = limit 217 | 218 | def __call__(self, astring): 219 | try: 220 | value = int(astring, 0) 221 | except ValueError: 222 | return None 223 | if not 0 <= value <= self.limit: 224 | return None 225 | return value 226 | 227 | bool_map = { 228 | # Text values for all Boolean attributes 229 | '1': 1, 'yes': 1, 'true': 1, 'on': 1, 230 | '0': 0, 'no': 0, 'false': 0, 'off': 0, 231 | } 232 | 233 | border_line_map = { 234 | # Text values for these borders attributes: 235 | # left, right, top, bottom and diag 236 | 'no_line': 0x00, 237 | 'thin': 0x01, 238 | 'medium': 0x02, 239 | 'dashed': 0x03, 240 | 'dotted': 0x04, 241 | 'thick': 0x05, 242 | 'double': 0x06, 243 | 'hair': 0x07, 244 | 'medium_dashed': 0x08, 245 | 'thin_dash_dotted': 0x09, 246 | 'medium_dash_dotted': 0x0a, 247 | 'thin_dash_dot_dotted': 0x0b, 248 | 'medium_dash_dot_dotted': 0x0c, 249 | 'slanted_medium_dash_dotted': 0x0d, 250 | } 251 | 252 | charset_map = { 253 | # Text values for font.charset 254 | 'ansi_latin': 0x00, 255 | 'sys_default': 0x01, 256 | 'symbol': 0x02, 257 | 'apple_roman': 0x4d, 258 | 'ansi_jap_shift_jis': 0x80, 259 | 'ansi_kor_hangul': 0x81, 260 | 'ansi_kor_johab': 0x82, 261 | 'ansi_chinese_gbk': 0x86, 262 | 'ansi_chinese_big5': 0x88, 263 | 'ansi_greek': 0xa1, 264 | 'ansi_turkish': 0xa2, 265 | 'ansi_vietnamese': 0xa3, 266 | 'ansi_hebrew': 0xb1, 267 | 'ansi_arabic': 0xb2, 268 | 'ansi_baltic': 0xba, 269 | 'ansi_cyrillic': 0xcc, 270 | 'ansi_thai': 0xde, 271 | 'ansi_latin_ii': 0xee, 272 | 'oem_latin_i': 0xff, 273 | } 274 | 275 | 276 | # Text values for colour indices. "grey" is a synonym of "gray". 277 | # The names are those given by Microsoft Excel 2003 to the colours 278 | # in the default palette. There is no great correspondence with 279 | # any W3C name-to-RGB mapping. 280 | _colour_map_text = """\ 281 | aqua 0x31 282 | black 0x08 283 | blue 0x0C 284 | blue_gray 0x36 285 | bright_green 0x0B 286 | brown 0x3C 287 | coral 0x1D 288 | cyan_ega 0x0F 289 | dark_blue 0x12 290 | dark_blue_ega 0x12 291 | dark_green 0x3A 292 | dark_green_ega 0x11 293 | dark_purple 0x1C 294 | dark_red 0x10 295 | dark_red_ega 0x10 296 | dark_teal 0x38 297 | dark_yellow 0x13 298 | gold 0x33 299 | gray_ega 0x17 300 | gray25 0x16 301 | gray40 0x37 302 | gray50 0x17 303 | gray80 0x3F 304 | green 0x11 305 | ice_blue 0x1F 306 | indigo 0x3E 307 | ivory 0x1A 308 | lavender 0x2E 309 | light_blue 0x30 310 | light_green 0x2A 311 | light_orange 0x34 312 | light_turquoise 0x29 313 | light_yellow 0x2B 314 | lime 0x32 315 | magenta_ega 0x0E 316 | ocean_blue 0x1E 317 | olive_ega 0x13 318 | olive_green 0x3B 319 | orange 0x35 320 | pale_blue 0x2C 321 | periwinkle 0x18 322 | pink 0x0E 323 | plum 0x3D 324 | purple_ega 0x14 325 | red 0x0A 326 | rose 0x2D 327 | sea_green 0x39 328 | silver_ega 0x16 329 | sky_blue 0x28 330 | tan 0x2F 331 | teal 0x15 332 | teal_ega 0x15 333 | turquoise 0x0F 334 | violet 0x14 335 | white 0x09 336 | yellow 0x0D""" 337 | 338 | colour_map = {} 339 | for _line in _colour_map_text.splitlines(): 340 | _name, _num = _line.split() 341 | _num = int(_num, 0) 342 | colour_map[_name] = _num 343 | if 'gray' in _name: 344 | colour_map[_name.replace('gray', 'grey')] = _num 345 | del _colour_map_text, _line, _name, _num 346 | 347 | 348 | pattern_map = { 349 | # Text values for pattern.pattern 350 | # xlwt/doc/pattern_examples.xls showcases all of these patterns. 351 | 'no_fill': 0, 352 | 'none': 0, 353 | 'solid': 1, 354 | 'solid_fill': 1, 355 | 'solid_pattern': 1, 356 | 'fine_dots': 2, 357 | 'alt_bars': 3, 358 | 'sparse_dots': 4, 359 | 'thick_horz_bands': 5, 360 | 'thick_vert_bands': 6, 361 | 'thick_backward_diag': 7, 362 | 'thick_forward_diag': 8, 363 | 'big_spots': 9, 364 | 'bricks': 10, 365 | 'thin_horz_bands': 11, 366 | 'thin_vert_bands': 12, 367 | 'thin_backward_diag': 13, 368 | 'thin_forward_diag': 14, 369 | 'squares': 15, 370 | 'diamonds': 16, 371 | } 372 | 373 | def any_str_func(s): 374 | return s.strip() 375 | 376 | def colour_index_func(s, maxval=0x7F): 377 | try: 378 | value = int(s, 0) 379 | except ValueError: 380 | return None 381 | if not (0 <= value <= maxval): 382 | return None 383 | return value 384 | 385 | colour_index_func_7 = colour_index_func 386 | 387 | def colour_index_func_15(s): 388 | return colour_index_func(s, maxval=0x7FFF) 389 | 390 | def rotation_func(s): 391 | try: 392 | value = int(s, 0) 393 | except ValueError: 394 | return None 395 | if not (-90 <= value <= 90): 396 | raise EasyXFCallerError("rotation %d: should be -90 to +90 degrees" % value) 397 | if value < 0: 398 | value = 90 - value # encode as 91 to 180 (clockwise) 399 | return value 400 | 401 | xf_dict = { 402 | 'align': 'alignment', # synonym 403 | 'alignment': { 404 | 'dire': { 405 | 'general': 0, 406 | 'lr': 1, 407 | 'rl': 2, 408 | }, 409 | 'direction': 'dire', 410 | 'horiz': 'horz', 411 | 'horizontal': 'horz', 412 | 'horz': { 413 | 'general': 0, 414 | 'left': 1, 415 | 'center': 2, 416 | 'centre': 2, # "align: horiz centre" means xf.alignment.horz is set to 2 417 | 'right': 3, 418 | 'filled': 4, 419 | 'justified': 5, 420 | 'center_across_selection': 6, 421 | 'centre_across_selection': 6, 422 | 'distributed': 7, 423 | }, 424 | 'inde': IntULim(15), # restriction: 0 <= value <= 15 425 | 'indent': 'inde', 426 | 'rota': [{'stacked': 255, 'none': 0, }, rotation_func], 427 | 'rotation': 'rota', 428 | 'shri': bool_map, 429 | 'shrink': 'shri', 430 | 'shrink_to_fit': 'shri', 431 | 'vert': { 432 | 'top': 0, 433 | 'center': 1, 434 | 'centre': 1, 435 | 'bottom': 2, 436 | 'justified': 3, 437 | 'distributed': 4, 438 | }, 439 | 'vertical': 'vert', 440 | 'wrap': bool_map, 441 | }, 442 | 'border': 'borders', 443 | 'borders': { 444 | 'left': [border_line_map, IntULim(0x0d)], 445 | 'right': [border_line_map, IntULim(0x0d)], 446 | 'top': [border_line_map, IntULim(0x0d)], 447 | 'bottom': [border_line_map, IntULim(0x0d)], 448 | 'diag': [border_line_map, IntULim(0x0d)], 449 | 'top_colour': [colour_map, colour_index_func_7], 450 | 'bottom_colour': [colour_map, colour_index_func_7], 451 | 'left_colour': [colour_map, colour_index_func_7], 452 | 'right_colour': [colour_map, colour_index_func_7], 453 | 'diag_colour': [colour_map, colour_index_func_7], 454 | 'top_color': 'top_colour', 455 | 'bottom_color': 'bottom_colour', 456 | 'left_color': 'left_colour', 457 | 'right_color': 'right_colour', 458 | 'diag_color': 'diag-colour', 459 | 'need_diag_1': bool_map, 460 | 'need_diag_2': bool_map, 461 | }, 462 | 'font': { 463 | 'bold': bool_map, 464 | 'charset': charset_map, 465 | 'color': 'colour_index', 466 | 'color_index': 'colour_index', 467 | 'colour': 'colour_index', 468 | 'colour_index': [colour_map, colour_index_func_15], 469 | 'escapement': {'none': 0, 'superscript': 1, 'subscript': 2}, 470 | 'family': {'none': 0, 'roman': 1, 'swiss': 2, 'modern': 3, 'script': 4, 'decorative': 5, }, 471 | 'height': IntULim(0xFFFF), # practical limits are much narrower e.g. 160 to 1440 (8pt to 72pt) 472 | 'italic': bool_map, 473 | 'name': any_str_func, 474 | 'outline': bool_map, 475 | 'shadow': bool_map, 476 | 'struck_out': bool_map, 477 | 'underline': [bool_map, {'none': 0, 'single': 1, 'single_acc': 0x21, 'double': 2, 'double_acc': 0x22, }], 478 | }, 479 | 'pattern': { 480 | 'back_color': 'pattern_back_colour', 481 | 'back_colour': 'pattern_back_colour', 482 | 'fore_color': 'pattern_fore_colour', 483 | 'fore_colour': 'pattern_fore_colour', 484 | 'pattern': [pattern_map, IntULim(16)], 485 | 'pattern_back_color': 'pattern_back_colour', 486 | 'pattern_back_colour': [colour_map, colour_index_func_7], 487 | 'pattern_fore_color': 'pattern_fore_colour', 488 | 'pattern_fore_colour': [colour_map, colour_index_func_7], 489 | }, 490 | 'protection': { 491 | 'cell_locked' : bool_map, 492 | 'formula_hidden': bool_map, 493 | }, 494 | } 495 | 496 | def _esplit(s, split_char, esc_char="\\"): 497 | escaped = False 498 | olist = [''] 499 | for c in s: 500 | if escaped: 501 | olist[-1] += c 502 | escaped = False 503 | elif c == esc_char: 504 | escaped = True 505 | elif c == split_char: 506 | olist.append('') 507 | else: 508 | olist[-1] += c 509 | return olist 510 | 511 | def _parse_strg_to_obj(strg, obj, parse_dict, 512 | field_sep=",", line_sep=";", intro_sep=":", esc_char="\\", debug=False): 513 | for line in _esplit(strg, line_sep, esc_char): 514 | line = line.strip() 515 | if not line: 516 | break 517 | split_line = _esplit(line, intro_sep, esc_char) 518 | if len(split_line) != 2: 519 | raise EasyXFCallerError('line %r should have exactly 1 "%c"' % (line, intro_sep)) 520 | section, item_str = split_line 521 | section = section.strip().lower() 522 | for counter in range(2): 523 | result = parse_dict.get(section) 524 | if result is None: 525 | raise EasyXFCallerError('section %r is unknown' % section) 526 | if isinstance(result, dict): 527 | break 528 | if not isinstance(result, str): 529 | raise EasyXFAuthorError( 530 | 'section %r should map to dict or str object; found %r' % (section, type(result))) 531 | # synonym 532 | old_section = section 533 | section = result 534 | else: 535 | raise EasyXFAuthorError('Attempt to define synonym of synonym (%r: %r)' % (old_section, result)) 536 | section_dict = result 537 | section_obj = getattr(obj, section, None) 538 | if section_obj is None: 539 | raise EasyXFAuthorError('instance of %s class has no attribute named %s' % (obj.__class__.__name__, section)) 540 | for kv_str in _esplit(item_str, field_sep, esc_char): 541 | guff = kv_str.split() 542 | if not guff: 543 | continue 544 | k = guff[0].lower().replace('-', '_') 545 | v = ' '.join(guff[1:]) 546 | if not v: 547 | raise EasyXFCallerError("no value supplied for %s.%s" % (section, k)) 548 | for counter in xrange(2): 549 | result = section_dict.get(k) 550 | if result is None: 551 | raise EasyXFCallerError('%s.%s is not a known attribute' % (section, k)) 552 | if not isinstance(result, basestring): 553 | break 554 | # synonym 555 | old_k = k 556 | k = result 557 | else: 558 | raise EasyXFAuthorError('Attempt to define synonym of synonym (%r: %r)' % (old_k, result)) 559 | value_info = result 560 | if not isinstance(value_info, list): 561 | value_info = [value_info] 562 | for value_rule in value_info: 563 | if isinstance(value_rule, dict): 564 | # dict maps strings to integer field values 565 | vl = v.lower().replace('-', '_') 566 | if vl in value_rule: 567 | value = value_rule[vl] 568 | break 569 | elif callable(value_rule): 570 | value = value_rule(v) 571 | if value is not None: 572 | break 573 | else: 574 | raise EasyXFAuthorError("unknown value rule for attribute %r: %r" % (k, value_rule)) 575 | else: 576 | raise EasyXFCallerError("unexpected value %r for %s.%s" % (v, section, k)) 577 | try: 578 | orig = getattr(section_obj, k) 579 | except AttributeError: 580 | raise EasyXFAuthorError('%s.%s in dictionary but not in supplied object' % (section, k)) 581 | if debug: print "+++ %s.%s = %r # %s; was %r" % (section, k, value, v, orig) 582 | setattr(section_obj, k, value) 583 | 584 | def easyxf(strg_to_parse="", num_format_str=None, 585 | field_sep=",", line_sep=";", intro_sep=":", esc_char="\\", debug=False): 586 | xfobj = XFStyle() 587 | if num_format_str is not None: 588 | xfobj.num_format_str = num_format_str 589 | if strg_to_parse: 590 | _parse_strg_to_obj(strg_to_parse, xfobj, xf_dict, 591 | field_sep=field_sep, line_sep=line_sep, intro_sep=intro_sep, esc_char=esc_char, debug=debug) 592 | return xfobj 593 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/UnicodeUtils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/lib/xlwt_0_7_2/UnicodeUtils.py -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/Utils.py: -------------------------------------------------------------------------------- 1 | # pyXLWriter: A library for generating Excel Spreadsheets 2 | # Copyright (c) 2004 Evgeny Filatov 3 | # Copyright (c) 2002-2004 John McNamara (Perl Spreadsheet::WriteExcel) 4 | # 5 | # This library is free software; you can redistribute it and/or modify it 6 | # under the terms of the GNU Lesser General Public License as published by 7 | # the Free Software Foundation; either version 2.1 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # This library is distributed in the hope that it will be useful, but 11 | # WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 13 | # General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Lesser General Public License 16 | # along with this library; if not, write to the Free Software Foundation, 17 | # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 | #---------------------------------------------------------------------------- 19 | # This module was written/ported from PERL Spreadsheet::WriteExcel module 20 | # The author of the PERL Spreadsheet::WriteExcel module is John McNamara 21 | # 22 | #---------------------------------------------------------------------------- 23 | # See the README.txt distributed with pyXLWriter for more details. 24 | 25 | # Portions are (C) Roman V. Kiseliov, 2005 26 | 27 | 28 | # Utilities for work with reference to cells and with sheetnames 29 | 30 | 31 | __rev_id__ = """$Id: Utils.py 3844 2009-05-20 01:02:54Z sjmachin $""" 32 | 33 | import re 34 | from struct import pack 35 | from ExcelMagic import MAX_ROW, MAX_COL 36 | 37 | 38 | _re_cell_ex = re.compile(r"(\$?)([A-I]?[A-Z])(\$?)(\d+)", re.IGNORECASE) 39 | _re_row_range = re.compile(r"\$?(\d+):\$?(\d+)") 40 | _re_col_range = re.compile(r"\$?([A-I]?[A-Z]):\$?([A-I]?[A-Z])", re.IGNORECASE) 41 | _re_cell_range = re.compile(r"\$?([A-I]?[A-Z]\$?\d+):\$?([A-I]?[A-Z]\$?\d+)", re.IGNORECASE) 42 | _re_cell_ref = re.compile(r"\$?([A-I]?[A-Z]\$?\d+)", re.IGNORECASE) 43 | 44 | 45 | def col_by_name(colname): 46 | """ 47 | """ 48 | col = 0 49 | pow = 1 50 | for i in xrange(len(colname)-1, -1, -1): 51 | ch = colname[i] 52 | col += (ord(ch) - ord('A') + 1) * pow 53 | pow *= 26 54 | return col - 1 55 | 56 | 57 | def cell_to_rowcol(cell): 58 | """Convert an Excel cell reference string in A1 notation 59 | to numeric row/col notation. 60 | 61 | Returns: row, col, row_abs, col_abs 62 | 63 | """ 64 | m = _re_cell_ex.match(cell) 65 | if not m: 66 | raise Exception("Ill-formed single_cell reference: %s" % cell) 67 | col_abs, col, row_abs, row = m.groups() 68 | row_abs = bool(row_abs) 69 | col_abs = bool(col_abs) 70 | row = int(row) - 1 71 | col = col_by_name(col.upper()) 72 | return row, col, row_abs, col_abs 73 | 74 | 75 | def cell_to_rowcol2(cell): 76 | """Convert an Excel cell reference string in A1 notation 77 | to numeric row/col notation. 78 | 79 | Returns: row, col 80 | 81 | """ 82 | m = _re_cell_ex.match(cell) 83 | if not m: 84 | raise Exception("Error in cell format") 85 | col_abs, col, row_abs, row = m.groups() 86 | # Convert base26 column string to number 87 | # All your Base are belong to us. 88 | row = int(row) - 1 89 | col = col_by_name(col.upper()) 90 | return row, col 91 | 92 | 93 | def rowcol_to_cell(row, col, row_abs=False, col_abs=False): 94 | """Convert numeric row/col notation to an Excel cell reference string in 95 | A1 notation. 96 | 97 | """ 98 | assert 0 <= row < MAX_ROW # MAX_ROW counts from 1 99 | assert 0 <= col < MAX_COL # MAX_COL counts from 1 100 | d = col // 26 101 | m = col % 26 102 | chr1 = "" # Most significant character in AA1 103 | if row_abs: 104 | row_abs = '$' 105 | else: 106 | row_abs = '' 107 | if col_abs: 108 | col_abs = '$' 109 | else: 110 | col_abs = '' 111 | if d > 0: 112 | chr1 = chr(ord('A') + d - 1) 113 | chr2 = chr(ord('A') + m) 114 | # Zero index to 1-index 115 | return col_abs + chr1 + chr2 + row_abs + str(row + 1) 116 | 117 | def rowcol_pair_to_cellrange(row1, col1, row2, col2, 118 | row1_abs=False, col1_abs=False, row2_abs=False, col2_abs=False): 119 | """Convert two (row,column) pairs 120 | into a cell range string in A1:B2 notation. 121 | 122 | Returns: cell range string 123 | """ 124 | assert row1 <= row2 125 | assert col1 <= col2 126 | return ( 127 | rowcol_to_cell(row1, col1, row1_abs, col1_abs) 128 | + ":" 129 | + rowcol_to_cell(row2, col2, row2_abs, col2_abs) 130 | ) 131 | 132 | def cellrange_to_rowcol_pair(cellrange): 133 | """Convert cell range string in A1 notation to numeric row/col 134 | pair. 135 | 136 | Returns: row1, col1, row2, col2 137 | 138 | """ 139 | cellrange = cellrange.upper() 140 | # Convert a row range: '1:3' 141 | res = _re_row_range.match(cellrange) 142 | if res: 143 | row1 = int(res.group(1)) - 1 144 | col1 = 0 145 | row2 = int(res.group(2)) - 1 146 | col2 = -1 147 | return row1, col1, row2, col2 148 | # Convert a column range: 'A:A' or 'B:G'. 149 | # A range such as A:A is equivalent to A1:A16384, so add rows as required 150 | res = _re_col_range.match(cellrange) 151 | if res: 152 | col1 = col_by_name(res.group(1).upper()) 153 | row1 = 0 154 | col2 = col_by_name(res.group(2).upper()) 155 | row2 = -1 156 | return row1, col1, row2, col2 157 | # Convert a cell range: 'A1:B7' 158 | res = _re_cell_range.match(cellrange) 159 | if res: 160 | row1, col1 = cell_to_rowcol2(res.group(1)) 161 | row2, col2 = cell_to_rowcol2(res.group(2)) 162 | return row1, col1, row2, col2 163 | # Convert a cell reference: 'A1' or 'AD2000' 164 | res = _re_cell_ref.match(cellrange) 165 | if res: 166 | row1, col1 = cell_to_rowcol2(res.group(1)) 167 | return row1, col1, row1, col1 168 | raise Exception("Unknown cell reference %s" % (cell)) 169 | 170 | 171 | def cell_to_packed_rowcol(cell): 172 | """ pack row and column into the required 4 byte format """ 173 | row, col, row_abs, col_abs = cell_to_rowcol(cell) 174 | if col >= MAX_COL: 175 | raise Exception("Column %s greater than IV in formula" % cell) 176 | if row >= MAX_ROW: # this for BIFF8. for BIFF7 available 2^14 177 | raise Exception("Row %s greater than %d in formula" % (cell, MAX_ROW)) 178 | col |= int(not row_abs) << 15 179 | col |= int(not col_abs) << 14 180 | return row, col 181 | 182 | # === sheetname functions === 183 | 184 | def valid_sheet_name(sheet_name): 185 | if sheet_name == u"" or sheet_name[0] == u"'" or len(sheet_name) > 31: 186 | return False 187 | for c in sheet_name: 188 | if c in u"[]:\\?/*\x00": 189 | return False 190 | return True 191 | 192 | def quote_sheet_name(unquoted_sheet_name): 193 | if not valid_sheet_name(unquoted_sheet_name): 194 | raise Exception( 195 | 'attempt to quote an invalid worksheet name %r' % unquoted_sheet_name) 196 | return u"'" + unquoted_sheet_name.replace(u"'", u"''") + u"'" 197 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: windows-1252 -*- 2 | 3 | __VERSION__ = '0.7.2' 4 | 5 | import sys 6 | if sys.version_info[:2] < (2, 3): 7 | print >> sys.stderr, "Sorry, xlwt requires Python 2.3 or later" 8 | sys.exit(1) 9 | 10 | from Workbook import Workbook 11 | from Worksheet import Worksheet 12 | from Row import Row 13 | from Column import Column 14 | from Formatting import Font, Alignment, Borders, Pattern, Protection 15 | from Style import XFStyle, easyxf 16 | from ExcelFormula import * 17 | -------------------------------------------------------------------------------- /mm/lib/xlwt_0_7_2/licences.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/mm/lib/xlwt_0_7_2/licences.py -------------------------------------------------------------------------------- /mm/model_base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime 3 | import inspect 4 | import sys 5 | from types import NoneType 6 | from lib.font_data.decorators import memoized 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | class BaseFieldType(object): 12 | 13 | def __init__(self, data): 14 | self.data = data 15 | 16 | def __repr__(self): 17 | return unicode(self) 18 | 19 | def __unicode__(self): 20 | return u"%s" % type(self) 21 | 22 | 23 | class HeaderFieldType(BaseFieldType): 24 | pass 25 | 26 | 27 | class DateFieldType(BaseFieldType): 28 | def __init__(self, data, format=None): 29 | self.format = format 30 | super(DateFieldType, self).__init__(data) 31 | 32 | 33 | class TimeFieldType(BaseFieldType): 34 | pass 35 | 36 | 37 | class DateTimeFieldType(BaseFieldType): 38 | 39 | def __init__(self, data): 40 | if data and data.tzinfo: 41 | data = data.replace(tzinfo=None) # excel can't handle 42 | super(DateTimeFieldType, self).__init__(data) 43 | 44 | 45 | class IntFieldType(BaseFieldType): 46 | pass 47 | 48 | 49 | class FloatFieldType(BaseFieldType): 50 | pass 51 | 52 | 53 | class DecimalFieldType(BaseFieldType): 54 | pass 55 | 56 | 57 | class StringFieldType(BaseFieldType): 58 | pass 59 | 60 | 61 | class BoolFieldType(BaseFieldType): 62 | pass 63 | 64 | 65 | class URLFieldType(BaseFieldType): 66 | def __init__(self, path, displayname=None): 67 | if not displayname: 68 | displayname = path 69 | self.displayname = displayname 70 | super(URLFieldType, self).__init__(path) 71 | 72 | 73 | class ImageFieldType(BaseFieldType): 74 | def __init__(self, path, width=None, height=None): 75 | self.width = width 76 | self.height = height 77 | super(ImageFieldType, self).__init__(path) 78 | 79 | 80 | class FormulaFieldType(BaseFieldType): 81 | pass 82 | 83 | 84 | class NoneFieldType(BaseFieldType): 85 | pass 86 | 87 | 88 | @memoized 89 | def get_members_list(): 90 | return [ 91 | x[1] for x 92 | in inspect.getmembers(sys.modules[__name__], inspect.isclass) 93 | if issubclass(x[1], BaseFieldType)] 94 | 95 | 96 | def is_custom_mm_type(inst): 97 | members = get_members_list() 98 | if type(inst) in members: 99 | return True 100 | return False 101 | 102 | 103 | class DataModel(object): 104 | """ Data Model creates a list of system defined data types in self.field_headers""" 105 | 106 | def __init__(self, data, order=None, column_types=None): 107 | """ constructor takes data as a tuple or list""" 108 | 109 | def get_field_type_class(title, value): 110 | if column_types and title in column_types: 111 | return column_types[title] 112 | elif is_custom_mm_type(value): 113 | return type(value) 114 | else: 115 | # we figure out the type 116 | return self.figure_out_type(value) 117 | 118 | self.field_headers = [] 119 | self.field_titles = [] 120 | if len(data) == 0: 121 | raise Exception("Can not make spreadsheets with an empty set") 122 | first_data = data[0] 123 | if type(data[0]) != dict and not hasattr(data[0], "iteritems"): 124 | # they sent a list #2 125 | if not order: 126 | raise Exception("use 'order' to set headers") 127 | self.field_titles = order 128 | for i in range(len(self.field_titles)): 129 | log.info("looking at %s ..." % data[0][i]) 130 | 131 | field_type_class = get_field_type_class(i, data[0][i]) 132 | 133 | # we add it to the 'class' so to be 134 | # used in every instance 135 | self.field_headers.append(field_type_class) 136 | log.info("created field type %s for column %s" % (field_type_class, i)) 137 | 138 | elif hasattr(first_data, "iteritems") and len(first_data) > 0: 139 | if order: 140 | # add in this order it was explicitly set 141 | self.field_titles = order 142 | else: 143 | # no order set, just get 144 | self.field_titles = first_data.keys() 145 | 146 | for k in self.field_titles: 147 | log.info("looking at %s ..." % data[0][k]) 148 | 149 | field_type_class = get_field_type_class(k, data[0][k]) 150 | 151 | # we add it to the 'class' so to be 152 | # used in every instance 153 | self.field_headers.append(field_type_class) 154 | log.info("created field type %s for %s" % (field_type_class, k)) 155 | 156 | def figure_out_type(self, item): 157 | item_type = type(item) 158 | 159 | if item_type == unicode or item_type == str: 160 | if item.startswith('=') : 161 | return FormulaFieldType 162 | else : 163 | return StringFieldType 164 | 165 | elif item_type == int: 166 | return IntFieldType 167 | 168 | elif item_type == datetime: 169 | return DateTimeFieldType 170 | 171 | elif item_type == bool: 172 | return BoolFieldType 173 | 174 | elif item_type == NoneType: # NOQA 175 | return NoneFieldType 176 | 177 | log.warn("Returning None type for type %s" % item_type) 178 | return NoneFieldType 179 | -------------------------------------------------------------------------------- /mm/serializer_base.py: -------------------------------------------------------------------------------- 1 | from grid_base import GridBase 2 | 3 | 4 | class Serializer(object): 5 | """ Class that pairs data with models """ 6 | def __init__(self, data_model, data, config, grid_class=GridBase): 7 | self.data = data 8 | self.data_model = data_model 9 | self.grid_class = grid_class 10 | self.config = config 11 | 12 | def serialize(self): 13 | """ returnes serialzed data into a Grid """ 14 | grid = self.grid_class() 15 | 16 | # we care about order 17 | # that was set in model_base 18 | grid.row_count = len(self.data) 19 | field_headers = self.data_model.field_headers 20 | grid.col_count = len(field_headers) 21 | grid.headers = field_headers 22 | grid.titles = self.data_model.field_titles 23 | grid.populate(self.data,self.config) 24 | 25 | return grid 26 | -------------------------------------------------------------------------------- /mm/style_base.py: -------------------------------------------------------------------------------- 1 | import logging as log 2 | 3 | class StyleBase(object): 4 | 5 | color = None 6 | font_family = None 7 | background_color = None 8 | border_color = None 9 | font_style = None 10 | text_align = None 11 | _font_size_points = None # points 12 | 13 | @property 14 | def font_size(self): 15 | return self._font_size_points 16 | 17 | @font_size.setter 18 | def font_size(self, value): 19 | if 'pt' in value: 20 | value = int(value.replace('pt','')) 21 | elif 'px' in value: 22 | log.warning("font-size does not (yet) support pixel sizes") # TODO: support pixel 23 | self._font_size_points = None 24 | return 25 | else: 26 | log.warning("assuming font-size is in points") 27 | 28 | self._font_size_points = int(value) 29 | 30 | def style_from_string(self, in_str): 31 | 32 | for attr in in_str.split(";"): 33 | if attr.strip() == '': continue 34 | k,v = attr.split(":") 35 | value = v.strip() 36 | key = k.strip().replace("-", "_") 37 | if not hasattr(self, key): 38 | log.warn("Unknown style attribute %s: %s" % (key,value)) 39 | continue 40 | 41 | setattr(self, key, value) 42 | 43 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /requirements_extra.txt: -------------------------------------------------------------------------------- 1 | gdata==2.0.17 2 | prettytable==0.7.2 3 | 4 | -------------------------------------------------------------------------------- /requirements_unittest.txt: -------------------------------------------------------------------------------- 1 | xlrd 2 | django 3 | gdata 4 | 5 | -------------------------------------------------------------------------------- /scripts/textwidths.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | r''' 3 | Output is in format [fontname]{defualt_width:x, values: {character: (width, kern_pairs)}} and an example looks like this:: 4 | 5 | 6 | {'Arial': {'default_width': 152, 7 | 'values': {'\x00': (192, 8 | {'A': -1, 9 | '\\': -1, 10 | '_': -3, 11 | 'j': -12, 12 | '\xaf': -4, 13 | '\xc0': -1, 14 | '\xc1': -1, 15 | '\xc2': -1, 16 | '\xc3': -1, 17 | '\xc4': -1, 18 | '\xc5': -1, 19 | '\xce': -4, 20 | '\xee': -3}), 21 | .... 22 | 'P': (171, 23 | {' ': -5, 24 | ',': -33, 25 | '.': -33, 26 | 'A': -20, 27 | '\\': -1, 28 | '_': -3, 29 | 'j': -12, 30 | '\xa0': -5, 31 | '\xaf': -4, 32 | '\xc0': -1, 33 | '\xc1': -1, 34 | '\xc2': -1, 35 | '\xc3': -1, 36 | '\xc4': -1, 37 | '\xc5': -1, 38 | '\xce': -4, 39 | '\xee': -3}), 40 | 41 | The scale of all numbers are 256 points. 42 | 43 | ''' 44 | 45 | import Image, ImageDraw, ImageFont 46 | 47 | 48 | FONT_PATH = "/usr/share/fonts/truetype/msttcorefonts/" 49 | FONT_EXT = ".ttf" 50 | 51 | def get_width(font, char_str): 52 | im = Image.new('RGBA', (100, 100), (0, 0, 0, 0)) 53 | draw = ImageDraw.Draw(im) 54 | font = ImageFont.truetype(FONT_PATH+font+FONT_EXT, 256) 55 | width, height = draw.textsize(char_str, font) 56 | del draw 57 | return width, char_str 58 | 59 | 60 | font_list = [ 61 | "Andale_Mono", 62 | "Arial_Black", 63 | "Arial_Bold_Italic", 64 | "Arial_Bold", 65 | "Arial_Italic", 66 | "Arial", 67 | "Comic_Sans_MS_Bold", 68 | "Comic_Sans_MS", 69 | "Courier_New", 70 | "Georgia_Bold", 71 | "Georgia_Italic", 72 | "Georgia", 73 | "Impact", 74 | "Times_New_Roman_Bold_Italic", 75 | "Times_New_Roman_Bold", 76 | "Times_New_Roman_Italic", 77 | "Times_New_Roman", 78 | "Trebuchet_MS_Bold_Italic", 79 | "Trebuchet_MS_Bold", 80 | "Trebuchet_MS_Italic", 81 | "Trebuchet_MS", 82 | "Verdana_Bold_Italic", 83 | "Verdana_Bold", 84 | "Verdana_Italic", 85 | "Verdana", 86 | "Webdings", 87 | ] 88 | 89 | fonts = {} 90 | 91 | for font in font_list: 92 | 93 | values = {} 94 | fonts[font] = dict(values=values,default_width=128) 95 | all_width = 0 96 | width_count = 0 97 | 98 | for i in range(256): 99 | 100 | width, char = get_width(font, chr(i)) 101 | values[char] = (width, {}) 102 | if width != 0: 103 | all_width += width 104 | width_count += 1 105 | 106 | fonts[font]['default_width'] = int(all_width / width_count) 107 | 108 | 109 | for lchar,ltables in values.items(): 110 | lwidth = ltables[0] 111 | kerntable = ltables[1] 112 | for rchar,rtables in values.items(): 113 | rwidth = rtables[0] 114 | pairwidth, pair = get_width(font, lchar+rchar ) 115 | kern = pairwidth - (lwidth + rwidth) 116 | if kern != 0: 117 | kerntable[rchar] = kern 118 | 119 | # write in binary pickle mode 120 | import cPickle as pickle 121 | with open("font_data.bin","wb") as f: 122 | pickle.dump(fonts,f,True) 123 | 124 | 125 | 126 | 127 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | import mm 4 | 5 | 6 | setup( 7 | name='Marmir', 8 | version=mm.__version__, 9 | description='Marmir: makes spreadsheets easy', 10 | author='Brian Ray', 11 | author_email='brianhray@gmail.com', 12 | maintainer='Brian Ray', 13 | maintainer_email='brianhray@gmail.com', 14 | url='http://brianray.github.com/mm', 15 | long_description="Python power spreadsheets on steroids", 16 | # license="Brian Ray", TBD 17 | platforms=["any"], 18 | packages=find_packages(), 19 | include_package_data=True, 20 | ) 21 | -------------------------------------------------------------------------------- /tests/api_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datetime 3 | import mm 4 | 5 | class ApiTestSuite(unittest.TestCase): 6 | 7 | def test_simple_types(self): 8 | date = mm.Date(datetime.datetime.now(), "%Y-%m-%dT%H:%M:%S") 9 | 10 | 11 | 12 | if __name__ == "__main__": 13 | unittest.main() 14 | -------------------------------------------------------------------------------- /tests/author.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/tests/author.bmp -------------------------------------------------------------------------------- /tests/basic_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datetime 3 | import mm 4 | import os 5 | import filecmp 6 | from xlrd_helper import XLSReader 7 | from mm.config_base import ConfigBase 8 | path = os.path.dirname(__file__) 9 | now = datetime.datetime.now().replace(microsecond=0) 10 | 11 | 12 | class TestBasicSuite(unittest.TestCase): 13 | 14 | def test_minimal(self): 15 | 16 | my_data = [ 17 | { 18 | 'msg': "My first Cell", 19 | 'id': 1, 20 | 'when': now, 21 | }, 22 | { 23 | 'msg': "My second Cell", 24 | 'id': 2, 25 | 'when': now, 26 | }, 27 | 28 | ] 29 | mm_doc = mm.Document(my_data) 30 | str = mm_doc.writestr() 31 | self.assertTrue( 32 | len(str) > 10, 33 | msg="String should be longer than %s" % len(str)) 34 | with open("tests/generated_files/test_doc.xls", "wb") as f: 35 | f.write(str) 36 | self.check("tests/generated_files/test_doc.xls", my_data) 37 | 38 | def test_minimal_lists(self): 39 | 40 | my_headers = ('id', 'msg', 'when') 41 | my_data = ( 42 | (1, "my first row", now), 43 | (2, "my second row", now), 44 | ) 45 | 46 | mm_doc = mm.Document(my_data, order=my_headers) 47 | str = mm_doc.writestr() 48 | self.assertTrue( 49 | len(str) > 10, 50 | msg="String should be longer than %s" % len(str)) 51 | with open("tests/generated_files/test_list_doc.xls", "wb") as f: 52 | f.write(str) 53 | 54 | as_dict = [dict(zip(my_headers, row)) for row in my_data] 55 | self.check("tests/generated_files/test_list_doc.xls", as_dict) 56 | 57 | def check(self, filename, my_data): 58 | xls = XLSReader(filename) 59 | row = 0 60 | headers = [] 61 | for ddict in my_data: 62 | col = 0 63 | for header, value in ddict.items(): 64 | cell_type = xls.get_type(row, col) 65 | cell_value = xls.get_value(row, col) 66 | if row == 0: 67 | #headers 68 | headers.append(cell_value) 69 | else: 70 | column_header = headers[col] 71 | data = my_data[row - 1] 72 | self.assertEquals(cell_value, data[column_header]) 73 | self.assertEquals(cell_type, type(data[column_header])) 74 | 75 | col += 1 76 | row += 1 77 | 78 | def test_mid_complex(self): 79 | 80 | my_data = [ 81 | { 82 | 'msg': "My first Cell", 83 | 'id': 1, 84 | 'when': mm.Date(now, "%Y-%m-%dT%H:%M:%S"), 85 | 'homepage': mm.URL("https://github.com/brianray") 86 | }, 87 | { 88 | 'msg': "My second Cell", 89 | 'id': 2, 90 | 'when': now, 91 | 'homepage': mm.URL("http://twitter.com/brianray", 92 | "Tweet Tweet") 93 | }, 94 | 95 | ] 96 | mm_doc = mm.Document(my_data) 97 | str = mm_doc.writestr() 98 | self.assertTrue( 99 | len(str) > 10, 100 | msg="String should be longer than %s" % len(str)) 101 | with open("tests/generated_files/test_doc2.xls", "wb") as f: 102 | f.write(str) 103 | 104 | #TODO self.check("tests/generated_files/test_doc2.xls", my_data) 105 | 106 | def test_image(self): 107 | 108 | my_data = [ 109 | { 110 | 'profile': mm.Image(os.path.join(path, "author.bmp"), 230, 326) 111 | }, 112 | ] 113 | mm_doc = mm.Document(my_data) 114 | str = mm_doc.writestr() 115 | self.assertTrue( 116 | len(str) > 10, 117 | msg="String should be longer than %s" % len(str)) 118 | with open("tests/generated_files/test_doc_image.xls", "wb") as f: 119 | f.write(str) 120 | 121 | def test_col_type(self): 122 | 123 | my_data = [ 124 | { 125 | 'date': "2003-01-01" 126 | }, 127 | ] 128 | col_types = {'date': mm.Date} 129 | mm_doc = mm.Document(my_data, column_types=col_types) 130 | str = mm_doc.writestr() 131 | self.assertTrue( 132 | len(str) > 10, 133 | msg="String should be longer than %s" % len(str)) 134 | with open("tests/generated_files/test_col_types.xls", "wb") as f: 135 | f.write(str) 136 | 137 | def test_missing_1(self): 138 | my_data = [ 139 | { 140 | 'msg': "My first Cell", 141 | 'id': 1, 142 | 'when': now, 143 | }, 144 | { 145 | 'msg': "My second Cell has missing data", 146 | 'id': 2, 147 | }, 148 | 149 | ] 150 | mm_doc = mm.Document(my_data) 151 | str = mm_doc.writestr() 152 | self.assertTrue( 153 | len(str) > 10, 154 | msg="String should be longer than %s" % len(str)) 155 | with open("tests/generated_files/test_doc.xls", "wb") as f: 156 | f.write(str) 157 | self.check("tests/generated_files/test_doc.xls", my_data) 158 | 159 | def test_missing_2(self): 160 | my_data = [ 161 | { 162 | 'msg': "My first Cell", 163 | 'id': 1, 164 | }, 165 | { 166 | 'msg': "My second Cell", 167 | 'id': 2, 168 | }, 169 | { 170 | 'msg': "My third Cell has missing data", 171 | 'id': 3, 172 | 'when': now, 173 | }, 174 | ] 175 | with self.assertRaises(Exception): 176 | mm_doc = mm.Document(my_data) 177 | str = mm_doc.writestr() 178 | self.assertTrue( 179 | len(str) > 10, 180 | msg="String should be longer than %s" % len(str)) 181 | with open("test_doc.xls", "wb") as f: 182 | f.write(str) 183 | self.check("tests/generated_files/test_doc.xls", my_data) 184 | 185 | def test_write_and_writestr_binary_output(self): 186 | """ 187 | write() and writestr() should generate the same files 188 | """ 189 | my_data = [ 190 | { 191 | 'msg': "My first Cell", 192 | 'id': 1, 193 | 'when': now, 194 | }, 195 | { 196 | 'msg': "My second Cell", 197 | 'id': 2, 198 | 'when': now, 199 | }, 200 | ] 201 | 202 | # Write using write() 203 | mm_doc = mm.Document(my_data) 204 | mm_doc.write("tests/generated_files/test_file1.xls") 205 | 206 | # Write using writestr() in binary mode 207 | str = mm_doc.writestr() 208 | with open("tests/generated_files/test_file2.xls", "wb") as f: 209 | f.write(str) 210 | 211 | self.assertTrue(filecmp.cmp("tests/generated_files/test_file1.xls", 212 | "tests/generated_files/test_file2.xls")) 213 | 214 | if __name__ == "__main__": 215 | unittest.main() 216 | -------------------------------------------------------------------------------- /tests/customize_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datetime 3 | import mm 4 | import os 5 | 6 | path = os.path.dirname(__file__) 7 | 8 | class CustomTestSuite(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.my_data = [ 12 | { 13 | 'msg': "My first Cell", 14 | 'id': 1, 15 | 'when': datetime.datetime.now(), 16 | }, 17 | { 18 | 'msg': "My second Cell", 19 | 'id': 2, 20 | 'when': datetime.datetime.now(), 21 | }, 22 | 23 | ] 24 | 25 | def test_no_header_row(self): 26 | 27 | config = { 28 | 'headers': False, 29 | 'freeze_row': 0 30 | } 31 | 32 | mm_doc = mm.Document(self.my_data, config_dict=config) 33 | str = mm_doc.writestr() 34 | self.assertTrue(len(str) > 10, 35 | msg="String should be longer than %s" % len(str)) 36 | with open("tests/generated_files/test_custom_no_header.xls", "wb") as f: 37 | f.write(str) 38 | 39 | 40 | 41 | def test_no_style(self): 42 | 43 | config = { 44 | 'header_style': '', 45 | 'row_styles': () 46 | } 47 | 48 | mm_doc = mm.Document(self.my_data, config_dict=config) 49 | str = mm_doc.writestr() 50 | self.assertTrue(len(str) > 10, 51 | msg="String should be longer than %s" % len(str)) 52 | with open("tests/generated_files/test_custom_no_styles.xls", "wb") as f: 53 | f.write(str) 54 | 55 | 56 | 57 | def test_row_style(self): 58 | 59 | config = { 60 | 'row_styles': ( "font-family: Times-New-Roman;",) 61 | } 62 | 63 | mm_doc = mm.Document(self.my_data, config_dict=config) 64 | str = mm_doc.writestr() 65 | self.assertTrue(len(str) > 10, 66 | msg="String should be longer than %s" % len(str)) 67 | with open("tests/generated_files/test_custom_row_styles.xls", "wb") as f: 68 | f.write(str) 69 | 70 | 71 | 72 | if __name__ == "__main__": 73 | unittest.main() 74 | -------------------------------------------------------------------------------- /tests/django_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import mm 3 | import os 4 | path = os.path.dirname(__file__) 5 | no_django = True 6 | try: 7 | os.environ['PYTHONPATH'] = path +':'+path + "/dummy_django_project" 8 | os.environ['DJANGO_SETTINGS_MODULE'] = 'dummy_django_project.settings' 9 | import django 10 | from dummy_django_project.marmir_test.models import TestAllBaseTypes 11 | from mm.contrib.django.data_model import DjangoDataModel 12 | from mm.contrib.django.grid import DjangoGrid 13 | no_django = False 14 | except ImportError: 15 | print "could not import django" 16 | 17 | 18 | class DjangoTestSuite(unittest.TestCase): 19 | 20 | def setUp(self): 21 | os.environ['PYTHONPATH'] = path +':'+path + "/dummy_django_project" 22 | if not no_django: 23 | from django.core import management 24 | management.call_command('syncdb', interactive=False, verbosity=0) 25 | 26 | def tearDown(self): 27 | os.unlink('marmir_django_test.sql') 28 | 29 | 30 | @unittest.skipIf(no_django, 'Skipping django test (django package not installed)') 31 | def test_custom_django_serializer(self): 32 | django_query_set = TestAllBaseTypes.objects.all() 33 | mm_doc = mm.Document(django_query_set, data_model_class=DjangoDataModel, grid_class=DjangoGrid) 34 | str = mm_doc.writestr() 35 | self.assertTrue(len(str) > 10, 36 | msg="String should be longer than %s" % len(str)) 37 | with open("tests/generated_files/test_django_serializer.xls", "wb") as f: 38 | f.write(str) 39 | 40 | 41 | 42 | 43 | if __name__ == "__main__": 44 | unittest.main() 45 | -------------------------------------------------------------------------------- /tests/dummy_django_project/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/tests/dummy_django_project/__init__.py -------------------------------------------------------------------------------- /tests/dummy_django_project/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") 7 | 8 | from django.core.management import execute_from_command_line 9 | 10 | execute_from_command_line(sys.argv) 11 | -------------------------------------------------------------------------------- /tests/dummy_django_project/marmir_test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brianray/mm/65744d46a134f72444b5a5962fb81a91ace459bd/tests/dummy_django_project/marmir_test/__init__.py -------------------------------------------------------------------------------- /tests/dummy_django_project/marmir_test/models.py: -------------------------------------------------------------------------------- 1 | #from django-excel-templates/test/det_testproject/testapp/models.py 2 | from django.db import models 3 | 4 | class TestAllBaseTypes(models.Model): 5 | AutoField = models.AutoField(primary_key=True) # int 6 | BooleanField = models.BooleanField() # bool 7 | CharField = models.CharField(max_length=50) # unicode 8 | CommaSeparatedIntegerField = models.CommaSeparatedIntegerField(max_length=25) # unicode 9 | DateField = models.DateField(auto_now_add=True) # datetime.date 10 | DateTimeField = models.DateTimeField(auto_now_add=True) # datetime.datetime 11 | DecimalField = models.DecimalField(max_digits=10,decimal_places=2) # decimal.Decimal 12 | EmailField = models.EmailField(max_length=75) # unicode 13 | #FileField = models.FileField() 14 | #FilePathField = models.FilePathField() 15 | FloatField = models.FloatField() # float 16 | #ImageField = models.ImageField() 17 | IntegerField = models.IntegerField() # int 18 | IPAddressField = models.IPAddressField() # unicode 19 | NullBooleanField = models.NullBooleanField() # bool 20 | PositiveIntegerField = models.PositiveIntegerField() # int 21 | PositiveSmallIntegerField = models.PositiveSmallIntegerField() # int 22 | SlugField = models.SlugField(max_length=30) # unicode 23 | SmallIntegerField = models.SmallIntegerField() # int 24 | BigIntegerField = models.BigIntegerField() # int 25 | TextField = models.TextField() # unicode 26 | TimeField = models.TimeField(auto_now_add=True) # datetime.time 27 | URLField = models.URLField(max_length=100) # unicode 28 | #XMLField = models.XMLField() 29 | 30 | # https://github.com/brianray/mm/issues/11 31 | null_char = models.CharField(max_length=10, null=True, blank=True) 32 | 33 | def __unicode__(self): 34 | return u"%s" % (self.CharField) 35 | 36 | 37 | -------------------------------------------------------------------------------- /tests/dummy_django_project/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | # Django settings for dummy_django_project project 3 | this_path = os.path.realpath(__file__) 4 | 5 | DEBUG = True 6 | TEMPLATE_DEBUG = DEBUG 7 | 8 | ADMINS = ( 9 | # ('Your Name', 'your_email@example.com'), 10 | ) 11 | 12 | MANAGERS = ADMINS 13 | 14 | DATABASES = { 15 | 'default': { 16 | 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 17 | 'NAME': 'marmir_django_test.sql', # Or path to database file if using sqlite3. 18 | 'USER': '', # Not used with sqlite3. 19 | 'PASSWORD': '', # Not used with sqlite3. 20 | 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 21 | 'PORT': '', # Set to empty string for default. Not used with sqlite3. 22 | } 23 | } 24 | 25 | # Local time zone for this installation. Choices can be found here: 26 | # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name 27 | # although not all choices may be available on all operating systems. 28 | # In a Windows environment this must be set to your system time zone. 29 | TIME_ZONE = 'America/Chicago' 30 | 31 | # Language code for this installation. All choices can be found here: 32 | # http://www.i18nguy.com/unicode/language-identifiers.html 33 | LANGUAGE_CODE = 'en-us' 34 | 35 | SITE_ID = 1 36 | 37 | # If you set this to False, Django will make some optimizations so as not 38 | # to load the internationalization machinery. 39 | USE_I18N = True 40 | 41 | # If you set this to False, Django will not format dates, numbers and 42 | # calendars according to the current locale. 43 | USE_L10N = True 44 | 45 | # If you set this to False, Django will not use timezone-aware datetimes. 46 | USE_TZ = True 47 | 48 | # Absolute filesystem path to the directory that will hold user-uploaded files. 49 | # Example: "/home/media/media.lawrence.com/media/" 50 | MEDIA_ROOT = '' 51 | 52 | # URL that handles the media served from MEDIA_ROOT. Make sure to use a 53 | # trailing slash. 54 | # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" 55 | MEDIA_URL = '' 56 | 57 | # Absolute path to the directory static files should be collected to. 58 | # Don't put anything in this directory yourself; store your static files 59 | # in apps' "static/" subdirectories and in STATICFILES_DIRS. 60 | # Example: "/home/media/media.lawrence.com/static/" 61 | STATIC_ROOT = '' 62 | 63 | # URL prefix for static files. 64 | # Example: "http://media.lawrence.com/static/" 65 | STATIC_URL = '/static/' 66 | 67 | # Additional locations of static files 68 | STATICFILES_DIRS = ( 69 | # Put strings here, like "/home/html/static" or "C:/www/django/static". 70 | # Always use forward slashes, even on Windows. 71 | # Don't forget to use absolute paths, not relative paths. 72 | ) 73 | 74 | # List of finder classes that know how to find static files in 75 | # various locations. 76 | STATICFILES_FINDERS = ( 77 | 'django.contrib.staticfiles.finders.FileSystemFinder', 78 | 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 79 | # 'django.contrib.staticfiles.finders.DefaultStorageFinder', 80 | ) 81 | 82 | # Make this unique, and don't share it with anybody. 83 | SECRET_KEY = 'tcvj45_-*zqq5y+4#utdr34#ic542o57)w9h29=#7bfp&h3^xq' 84 | 85 | # List of callables that know how to import templates from various sources. 86 | TEMPLATE_LOADERS = ( 87 | 'django.template.loaders.filesystem.Loader', 88 | 'django.template.loaders.app_directories.Loader', 89 | # 'django.template.loaders.eggs.Loader', 90 | ) 91 | 92 | MIDDLEWARE_CLASSES = ( 93 | 'django.middleware.common.CommonMiddleware', 94 | 'django.contrib.sessions.middleware.SessionMiddleware', 95 | 'django.middleware.csrf.CsrfViewMiddleware', 96 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 97 | 'django.contrib.messages.middleware.MessageMiddleware', 98 | # Uncomment the next line for simple clickjacking protection: 99 | # 'django.middleware.clickjacking.XFrameOptionsMiddleware', 100 | ) 101 | 102 | ROOT_URLCONF = 'dummy_django_project.urls' 103 | 104 | # Python dotted path to the WSGI application used by Django's runserver. 105 | WSGI_APPLICATION = 'dummy_django_project.wsgi.application' 106 | 107 | TEMPLATE_DIRS = ( 108 | # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". 109 | # Always use forward slashes, even on Windows. 110 | # Don't forget to use absolute paths, not relative paths. 111 | ) 112 | 113 | INSTALLED_APPS = ( 114 | 'django.contrib.auth', 115 | 'django.contrib.contenttypes', 116 | 'django.contrib.sessions', 117 | 'django.contrib.sites', 118 | 'django.contrib.messages', 119 | 'django.contrib.staticfiles', 120 | # Uncomment the next line to enable the admin: 121 | 'django.contrib.admin', 122 | # Uncomment the next line to enable admin documentation: 123 | # 'django.contrib.admindocs', 124 | 'dummy_django_project.marmir_test' 125 | ) 126 | 127 | # A sample logging configuration. The only tangible logging 128 | # performed by this configuration is to send an email to 129 | # the site admins on every HTTP 500 error when DEBUG=False. 130 | # See http://docs.djangoproject.com/en/dev/topics/logging for 131 | # more details on how to customize your logging configuration. 132 | LOGGING = { 133 | 'version': 1, 134 | 'disable_existing_loggers': False, 135 | 'filters': { 136 | 'require_debug_false': { 137 | '()': 'django.utils.log.RequireDebugFalse' 138 | } 139 | }, 140 | 'handlers': { 141 | 'mail_admins': { 142 | 'level': 'ERROR', 143 | 'filters': ['require_debug_false'], 144 | 'class': 'django.utils.log.AdminEmailHandler' 145 | } 146 | }, 147 | 'loggers': { 148 | 'django.request': { 149 | 'handlers': ['mail_admins'], 150 | 'level': 'ERROR', 151 | 'propagate': True, 152 | }, 153 | } 154 | } 155 | 156 | import warnings 157 | warnings.filterwarnings( 158 | 'ignore', r"DateTimeField received a naive datetime", 159 | RuntimeWarning, r'django\.db\.models\.fields') 160 | 161 | -------------------------------------------------------------------------------- /tests/dup_type_fix_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datetime 3 | import mm 4 | import os 5 | from xlrd_helper import XLSReader 6 | 7 | path = os.path.dirname(__file__) 8 | now = datetime.datetime.now().replace(microsecond=0) 9 | class TestBasicSuite(unittest.TestCase): 10 | 11 | def test_minimal(self): 12 | 13 | my_data = [ 14 | { 15 | 'id dup': 2, 16 | 'id': 1, 17 | }, 18 | { 19 | 'id dup': 4, 20 | 'id': 3, 21 | }, 22 | 23 | ] 24 | mm_doc = mm.Document(my_data) 25 | str = mm_doc.writestr() 26 | self.assertTrue(len(str) > 10, 27 | msg="String should be longer than %s" % len(str)) 28 | with open("tests/generated_files/test_dup.xls", "wb") as f: 29 | f.write(str) 30 | 31 | self.check("tests/generated_files/test_dup.xls", my_data) 32 | 33 | def check(self, filename, my_data): 34 | xls = XLSReader(filename) 35 | row = 0 36 | headers = [] 37 | for ddict in my_data: 38 | col = 0 39 | for header, value in ddict.items(): 40 | cell_type = xls.get_type(row, col) 41 | cell_value = xls.get_value(row, col) 42 | if row == 0: 43 | #headers 44 | if cell_value in headers: 45 | raise Exception("duplicate header %s" % cell_value) 46 | headers.append(cell_value) 47 | else: 48 | column_header = headers[col] 49 | data = my_data[row-1] 50 | self.assertEquals(cell_value, data[column_header]) 51 | self.assertEquals(cell_type, type(data[column_header])) 52 | 53 | col += 1 54 | row += 1 55 | 56 | 57 | 58 | 59 | if __name__ == "__main__": 60 | unittest.main() 61 | -------------------------------------------------------------------------------- /tests/formula_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import datetime 4 | import unittest 5 | import mm 6 | 7 | now = datetime.datetime.now().replace(microsecond=0) 8 | 9 | class TestFormula(unittest.TestCase): 10 | def test_simple_formula(self): 11 | my_data = [ 12 | { 13 | 'msg': "My first Row", 14 | 'id': 1, 15 | 'when': now, 16 | }, 17 | { 18 | 'msg': "My second Row", 19 | 'id': 2, 20 | 'when': now, 21 | }, 22 | { 23 | 'msg': "The total", 24 | 'id': mm.Formula("SUM(C2:C3)"), 25 | 'when': now, 26 | }, 27 | ] 28 | 29 | mm_doc = mm.Document(my_data) 30 | mm_doc.write("example_formula.xls") 31 | 32 | if __name__ == "__main__": 33 | unittest.main() 34 | -------------------------------------------------------------------------------- /tests/gdata_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import getpass 3 | import datetime 4 | import mm 5 | import os 6 | from xlrd_helper import XLSReader 7 | no_gdata = True 8 | try: 9 | import gdata 10 | no_gdata = False 11 | except ImportError: 12 | print "no gdata" 13 | 14 | 15 | path = os.path.dirname(__file__) 16 | now = datetime.datetime.now().replace(microsecond=0) 17 | class TestGdataSuite(unittest.TestCase): 18 | 19 | 20 | @unittest.skipIf(no_gdata, 'Skipping gdata test (gdata package not installed)') 21 | def test_minimal(self): 22 | 23 | my_data = [ 24 | { 25 | 'msg': "My first Cell", 26 | 'id': 1, 27 | 'when': now, 28 | }, 29 | { 30 | 'msg': "My second Cell", 31 | 'id': 2, 32 | 'when': now, 33 | }, 34 | 35 | ] 36 | mm_doc = mm.Document(my_data) 37 | 38 | # TODO: store this in a config 39 | username = raw_input("Google Username: ") 40 | password = getpass.getpass("Google password: ") 41 | 42 | str = mm_doc.write_gdata( 43 | "Test MarMir File", 44 | username, 45 | password) 46 | 47 | # TODO: check if file actually got there or not 48 | 49 | 50 | 51 | 52 | if __name__ == "__main__": 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /tests/generated_files/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /tests/model_base_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import mm.model_base as model_base 3 | 4 | class ModelBaseSuite(unittest.TestCase): 5 | 6 | 7 | def test_get_member_listl(self): 8 | list = model_base.get_members_list() 9 | self.assertIn(model_base.IntFieldType, list) 10 | 11 | 12 | if __name__ == "__main__": 13 | unittest.main() 14 | -------------------------------------------------------------------------------- /tests/more_data_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datetime 3 | import mm 4 | import os 5 | from decimal import Decimal 6 | 7 | from xlrd_helper import XLSReader 8 | 9 | path = os.path.dirname(__file__) 10 | now = datetime.datetime.now().replace(microsecond=0) 11 | 12 | 13 | class TestMoreDataSuite(unittest.TestCase): 14 | 15 | def test_minimal(self): 16 | 17 | my_data = [ 18 | { 19 | 'none type': None, 20 | 'Bool True': True, 21 | 'Bool False': False, 22 | 'Decimal': Decimal("3.14"), 23 | }, 24 | 25 | ] 26 | mm_doc = mm.Document(my_data) 27 | str = mm_doc.writestr() 28 | self.assertTrue( 29 | len(str) > 10, 30 | msg="String should be longer than %s" % len(str)) 31 | with open("tests/generated_files/test_multi_data.xls", "wb") as f: 32 | f.write(str) 33 | 34 | self.check("tests/generated_files/test_multi_data.xls", my_data) 35 | 36 | def check(self, filename, my_data): 37 | xls = XLSReader(filename) 38 | row = 0 39 | headers = [] 40 | for ddict in my_data: 41 | col = 0 42 | for header, value in ddict.items(): 43 | cell_type = xls.get_type(row, col) 44 | cell_value = xls.get_value(row, col) 45 | if row == 0: 46 | #headers 47 | headers.append(cell_value) 48 | else: 49 | column_header = headers[col] 50 | data = my_data[row - 1] 51 | self.assertEquals(cell_value, data[column_header]) 52 | self.assertEquals(cell_type, type(data[column_header])) 53 | 54 | col += 1 55 | row += 1 56 | 57 | if __name__ == "__main__": 58 | unittest.main() 59 | -------------------------------------------------------------------------------- /tests/multi_sheets.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datetime 3 | import mm 4 | import os 5 | from xlrd_helper import XLSReader 6 | 7 | path = os.path.dirname(__file__) 8 | now = datetime.datetime.now().replace(microsecond=0) 9 | 10 | 11 | class TestMultiSuite(unittest.TestCase): 12 | 13 | def test_minimal(self): 14 | 15 | my_data = [ 16 | { 17 | 'msg': "My first Cell", 18 | 'id': 1, 19 | 'when': now, 20 | }, 21 | { 22 | 'msg': "My second Cell", 23 | 'id': 2, 24 | 'when': now, 25 | }, 26 | 27 | ] 28 | mm_doc = mm.Document(my_data) 29 | mm_doc.set_name("My Sheet 1") 30 | mm_child = mm.Document(my_data) 31 | mm_child.set_name("My Sheet 2") 32 | mm_doc.add_child(mm_child) 33 | str = mm_doc.writestr() 34 | self.assertTrue( 35 | len(str) > 10, 36 | msg="String should be longer than %s" % len(str)) 37 | with open("tests/generated_files/test_multi.xls", "wb") as f: 38 | f.write(str) 39 | 40 | self.check("tests/generated_files/test_multi.xls", my_data) 41 | 42 | def check(self, filename, my_data): 43 | xls = XLSReader(filename) 44 | for worksheet_id in (0, 1): 45 | row = 0 46 | headers = [] 47 | for ddict in my_data: 48 | col = 0 49 | for header, value in ddict.items(): 50 | cell_type = xls.get_type(row, col, worksheet_idx=worksheet_id) 51 | cell_value = xls.get_value(row, col, worksheet_idx=worksheet_id) 52 | if row == 0: 53 | #headers 54 | headers.append(cell_value) 55 | else: 56 | column_header = headers[col] 57 | data = my_data[row - 1] 58 | self.assertEquals(cell_value, data[column_header]) 59 | self.assertEquals(cell_type, type(data[column_header])) 60 | 61 | col += 1 62 | row += 1 63 | 64 | if __name__ == "__main__": 65 | unittest.main() 66 | -------------------------------------------------------------------------------- /tests/pretty_table_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datetime 3 | import mm 4 | no_pretty_table = True 5 | try: 6 | import prettytable # NOQA 7 | from mm.contrib.prettytable.composers import ComposerPrettyTable 8 | 9 | no_pretty_table = False 10 | except ImportError: 11 | print "could not import pretty_table" 12 | 13 | now = datetime.datetime.now().replace(microsecond=0) 14 | 15 | 16 | class PrettyTableTestSuite(unittest.TestCase): 17 | 18 | def setUp(self): 19 | pass 20 | 21 | def tearDown(self): 22 | pass 23 | 24 | @unittest.skipIf(no_pretty_table, 'Skipping PrettyTable test (prettytable package not installed)') 25 | def test_custom_pretty_table_serializer(self): 26 | 27 | my_data = [ 28 | { 29 | 'msg': "My first Cell", 30 | 'id': 1, 31 | 'when': now, 32 | }, 33 | { 34 | 'msg': "My second Cell", 35 | 'id': 2, 36 | 'when': now, 37 | }, 38 | 39 | ] 40 | mm_doc = mm.Document(my_data) # data_model_class=PrettyTableModel) 41 | mm_doc.set_composer_class(ComposerPrettyTable) 42 | out = ''' 43 | +----------------+---------------------+----+ 44 | | msg | when | id | 45 | +----------------+---------------------+----+ 46 | | My first Cell | %(now)s | 1 | 47 | | My second Cell | %(now)s | 2 | 48 | +----------------+---------------------+----+''' % dict(now=now) 49 | self.assertEquals(str(mm_doc.writestr()), out.strip()) 50 | 51 | 52 | if __name__ == "__main__": 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /tests/psycopg2_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import mm 3 | import os 4 | from xlrd_helper import XLSReader 5 | path = os.path.dirname(__file__) 6 | no_psycopg = True 7 | try: 8 | import psycopg2 9 | import psycopg2.extras 10 | no_psycopg = False 11 | except ImportError: 12 | print "could not import psycopg2" 13 | 14 | 15 | class PsycopgTestSuite(unittest.TestCase): 16 | 17 | def setUp(self): 18 | if no_psycopg: 19 | return 20 | 21 | self.conn = psycopg2.connect("dbname='template1' user='testuser' host='localhost' password='password'") 22 | dict_cur = self.conn.cursor() 23 | dict_cur.execute("CREATE TABLE marmir_test (num INT, data CHAR(12))") 24 | dict_cur.close() 25 | 26 | def tearDown(self): 27 | if no_psycopg: 28 | return 29 | dict_cur = self.conn.cursor() 30 | dict_cur.execute("DROP TABLE marmir_test;") 31 | dict_cur.close() 32 | 33 | def test_psycopg(self): 34 | 35 | if no_psycopg: 36 | print 'Skipping psycopg2 test (psycopg2 package not installed)' 37 | return 38 | 39 | dict_cur = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) 40 | dict_cur.execute("INSERT INTO marmir_test (num, data) VALUES(%s, %s)", 41 | (100, "abc'def")) 42 | dict_cur.execute("INSERT INTO marmir_test (num, data) VALUES(%s, %s)", 43 | (200, "mmmmaaarrrr")) 44 | dict_cur.execute("SELECT * FROM marmir_test") 45 | my_data = dict_cur.fetchall() 46 | dict_cur.close() 47 | 48 | mm_doc = mm.Document(my_data) 49 | str = mm_doc.writestr() 50 | self.assertTrue( 51 | len(str) > 10, 52 | msg="String should be longer than %s" % len(str)) 53 | with open("tests/generated_files/test_psycopg.xls", "wb") as f: 54 | f.write(str) 55 | 56 | self.check("tests/generated_files/test_psycopg.xls", my_data) 57 | 58 | def check(self, filename, my_data): 59 | xls = XLSReader(filename) 60 | row = 0 61 | headers = [] 62 | for ddict in my_data: 63 | col = 0 64 | for header, value in ddict.items(): 65 | cell_type = xls.get_type(row, col) 66 | cell_value = xls.get_value(row, col) 67 | if row == 0: 68 | #headers 69 | headers.append(cell_value) 70 | else: 71 | column_header = headers[col] 72 | data = my_data[row - 1] 73 | self.assertEquals(cell_value, data[column_header]) 74 | self.assertEquals(cell_type, type(data[column_header])) 75 | 76 | col += 1 77 | row += 1 78 | 79 | 80 | if __name__ == "__main__": 81 | unittest.main() 82 | -------------------------------------------------------------------------------- /tests/xlrd_helper.py: -------------------------------------------------------------------------------- 1 | import xlrd 2 | from datetime import datetime 3 | 4 | 5 | class XLSReader: 6 | 7 | def __init__(self, file): 8 | workbook = xlrd.open_workbook(file) 9 | self.book = workbook 10 | self.worksheets = [workbook.sheet_by_name(x) for x in workbook.sheet_names()] 11 | 12 | # Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank 13 | self.cell_types = { 14 | '1': str, 15 | '2': int, 16 | '3': datetime 17 | } 18 | 19 | def get_type(self, row, colm, worksheet_idx=0): 20 | worksheet = self.worksheets[worksheet_idx] 21 | return self.cell_types[str(worksheet.cell_type(row, colm))] 22 | 23 | def get_value(self, row, colm, worksheet_idx=0): 24 | worksheet = self.worksheets[worksheet_idx] 25 | 26 | val = worksheet.cell_value(row, colm) 27 | cell_type = self.get_type(row, colm) 28 | if cell_type == datetime: 29 | return datetime(*xlrd.xldate_as_tuple(val, self.book.datemode)) 30 | return val 31 | --------------------------------------------------------------------------------