├── .coveragerc ├── .github └── workflows │ └── testing.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.rst ├── dev_requirements.txt ├── setup.py ├── tests ├── __init__.py ├── test_binary_vdf.py ├── test_vdf.py └── test_vdf_dict.py ├── vdf ├── __init__.py └── vdict.py └── vdf2json ├── Makefile ├── README.rst ├── setup.py └── vdf2json ├── __init__.py └── cli.py /.coveragerc: -------------------------------------------------------------------------------- 1 | 2 | # Docs: https://coverage.readthedocs.org/en/latest/config.html 3 | 4 | [run] 5 | branch = False 6 | 7 | # If True, stores relative file paths in data file (needed for Github Actions). 8 | # Using this parameter requires coverage>=5.0 9 | relative_files = True 10 | -------------------------------------------------------------------------------- /.github/workflows/testing.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | paths-ignore: 7 | - '.gitignore' 8 | - '*.md' 9 | - '*.rst' 10 | - 'LICENSE' 11 | - 'requirements.txt' 12 | - 'vdf2json/**' 13 | pull_request: 14 | branches: [ master ] 15 | paths-ignore: 16 | - '.gitignore' 17 | - '*.md' 18 | - '*.rst' 19 | - 'LICENSE' 20 | - 'requirements.txt' 21 | - 'vdf2json/**' 22 | 23 | jobs: 24 | test: 25 | runs-on: ${{ matrix.os }} 26 | strategy: 27 | matrix: 28 | os: [ubuntu-latest, macos-latest, windows-latest] 29 | python-version: [2.7, 3.5, 3.6, 3.7, 3.8, 3.9, '3.10'] 30 | no-coverage: [0] 31 | include: 32 | - os: ubuntu-latest 33 | python-version: pypy-2.7 34 | no-coverage: 1 35 | - os: ubuntu-latest 36 | python-version: pypy-3.6 37 | no-coverage: 1 38 | steps: 39 | - uses: actions/checkout@v2 40 | - name: Set up Python Env 41 | uses: actions/setup-python@v2 42 | with: 43 | python-version: ${{ matrix.python-version }} 44 | - name: Display Python version 45 | run: python -c "import sys; print(sys.version)" 46 | - name: Install dependencies 47 | run: | 48 | make init 49 | - name: Run Tests 50 | env: 51 | NOCOV: ${{ matrix.no-coverage }} 52 | run: | 53 | make test 54 | - name: Upload to Coveralls 55 | # pypy + concurrenct=gevent not supported in coveragepy. See https://github.com/nedbat/coveragepy/issues/560 56 | if: matrix.no-coverage == 0 57 | env: 58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 59 | COVERALLS_PARALLEL: true 60 | COVERALLS_FLAG_NAME: "${{ matrix.os }}_${{ matrix.python-version }}" 61 | run: | 62 | coveralls --service=github 63 | 64 | coveralls: 65 | name: Finish Coveralls 66 | needs: test 67 | runs-on: ubuntu-latest 68 | container: python:3-slim 69 | steps: 70 | - name: Install coveralls 71 | run: | 72 | pip3 install --upgrade coveralls 73 | - name: Send coverage finish to coveralls.io 74 | env: 75 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 76 | run: | 77 | coveralls --finish 78 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist 2 | *.egg-info 3 | *.pyc 4 | .coverage 5 | *.swp 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 Rossen Georgiev 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | of the Software, and to permit persons to whom the Software is furnished to do 8 | so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for VDF module 2 | 3 | define HELPBODY 4 | Available commands: 5 | 6 | make help - this thing. 7 | make init - install python dependancies 8 | make test - run tests and coverage 9 | make pylint - code analysis 10 | make build - pylint + test 11 | 12 | endef 13 | 14 | export HELPBODY 15 | help: 16 | @echo "$$HELPBODY" 17 | 18 | init: 19 | pip install -r dev_requirements.txt 20 | 21 | COVOPTS = --cov-config .coveragerc --cov=vdf 22 | 23 | ifeq ($(NOCOV), 1) 24 | COVOPTS = 25 | endif 26 | 27 | test: 28 | rm -f .coverage vdf/*.pyc tests/*.pyc 29 | PYTHONHASHSEED=0 pytest --tb=short $(COVOPTS) tests 30 | 31 | pylint: 32 | pylint -r n -f colorized vdf || true 33 | 34 | build: pylint test 35 | 36 | clean: 37 | rm -rf dist vdf.egg-info vdf/*.pyc 38 | 39 | dist: clean 40 | python setup.py sdist 41 | python setup.py bdist_wheel --universal 42 | 43 | register: 44 | python setup.py register -r pypi 45 | 46 | upload: dist register 47 | twine upload -r pypi dist/* 48 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | | |pypi| |license| |coverage| |master_build| 2 | | |sonar_maintainability| |sonar_reliability| |sonar_security| 3 | 4 | Pure python module for (de)serialization to and from VDF that works just like ``json``. 5 | 6 | Tested and works on ``py2.7``, ``py3.3+``, ``pypy`` and ``pypy3``. 7 | 8 | VDF is Valve's KeyValue text file format 9 | 10 | https://developer.valvesoftware.com/wiki/KeyValues 11 | 12 | | Supported versions: ``kv1`` 13 | | Unsupported: ``kv2`` and ``kv3`` 14 | 15 | Install 16 | ------- 17 | 18 | You can grab the latest release from https://pypi.org/project/vdf/ or via ``pip`` 19 | 20 | .. code:: bash 21 | 22 | pip install vdf 23 | 24 | Install the current dev version from ``github`` 25 | 26 | .. code:: bash 27 | 28 | pip install git+https://github.com/ValvePython/vdf 29 | 30 | 31 | Problems & solutions 32 | -------------------- 33 | 34 | - There are known files that contain duplicate keys. This is supported the format and 35 | makes mapping to ``dict`` impossible. For this case the module provides ``vdf.VDFDict`` 36 | that can be used as mapper instead of ``dict``. See the example section for details. 37 | 38 | - By default de-serialization will return a ``dict``, which doesn't preserve nor guarantee 39 | key order on Python versions prior to 3.6, due to `hash randomization`_. If key order is 40 | important on old Pythons, I suggest using ``collections.OrderedDict``, or ``vdf.VDFDict``. 41 | 42 | Example usage 43 | ------------- 44 | 45 | For text representation 46 | 47 | .. code:: python 48 | 49 | import vdf 50 | 51 | # parsing vdf from file or string 52 | d = vdf.load(open('file.txt')) 53 | d = vdf.loads(vdf_text) 54 | d = vdf.parse(open('file.txt')) 55 | d = vdf.parse(vdf_text) 56 | 57 | # dumping dict as vdf to string 58 | vdf_text = vdf.dumps(d) 59 | indented_vdf = vdf.dumps(d, pretty=True) 60 | 61 | # dumping dict as vdf to file 62 | vdf.dump(d, open('file2.txt','w'), pretty=True) 63 | 64 | 65 | For binary representation 66 | 67 | .. code:: python 68 | 69 | d = vdf.binary_loads(vdf_bytes) 70 | b = vdf.binary_dumps(d) 71 | 72 | # alternative format - VBKV 73 | 74 | d = vdf.binary_loads(vdf_bytes, alt_format=True) 75 | b = vdf.binary_dumps(d, alt_format=True) 76 | 77 | # VBKV with header and CRC checking 78 | 79 | d = vdf.vbkv_loads(vbkv_bytes) 80 | b = vdf.vbkv_dumps(d) 81 | 82 | Using an alternative mapper 83 | 84 | .. code:: python 85 | 86 | d = vdf.loads(vdf_string, mapper=collections.OrderedDict) 87 | d = vdf.loads(vdf_string, mapper=vdf.VDFDict) 88 | 89 | ``VDFDict`` works much like the regular ``dict``, except it handles duplicates and remembers 90 | insert order. Additionally, keys can only be of type ``str``. The most important difference 91 | is that when trying to assigning a key that already exist it will create a duplicate instead 92 | of reassign the value to the existing key. 93 | 94 | .. code:: python 95 | 96 | >>> d = vdf.VDFDict() 97 | >>> d['key'] = 111 98 | >>> d['key'] = 222 99 | >>> d 100 | VDFDict([('key', 111), ('key', 222)]) 101 | >>> d.items() 102 | [('key', 111), ('key', 222)] 103 | >>> d['key'] 104 | 111 105 | >>> d[(0, 'key')] # get the first duplicate 106 | 111 107 | >>> d[(1, 'key')] # get the second duplicate 108 | 222 109 | >>> d.get_all_for('key') 110 | [111, 222] 111 | 112 | >>> d[(1, 'key')] = 123 # reassign specific duplicate 113 | >>> d.get_all_for('key') 114 | [111, 123] 115 | 116 | >>> d['key'] = 333 117 | >>> d.get_all_for('key') 118 | [111, 123, 333] 119 | >>> del d[(1, 'key')] 120 | >>> d.get_all_for('key') 121 | [111, 333] 122 | >>> d[(1, 'key')] 123 | 333 124 | 125 | >>> print vdf.dumps(d) 126 | "key" "111" 127 | "key" "333" 128 | 129 | >>> d.has_duplicates() 130 | True 131 | >>> d.remove_all_for('key') 132 | >>> len(d) 133 | 0 134 | >>> d.has_duplicates() 135 | False 136 | 137 | 138 | .. |pypi| image:: https://img.shields.io/pypi/v/vdf.svg?style=flat&label=latest%20version 139 | :target: https://pypi.org/project/vdf/ 140 | :alt: Latest version released on PyPi 141 | 142 | .. |license| image:: https://img.shields.io/pypi/l/vdf.svg?style=flat&label=license 143 | :target: https://pypi.org/project/vdf/ 144 | :alt: MIT License 145 | 146 | .. |coverage| image:: https://img.shields.io/coveralls/ValvePython/vdf/master.svg?style=flat 147 | :target: https://coveralls.io/r/ValvePython/vdf?branch=master 148 | :alt: Test coverage 149 | 150 | .. |sonar_maintainability| image:: https://sonarcloud.io/api/project_badges/measure?project=ValvePython_vdf&metric=sqale_rating 151 | :target: https://sonarcloud.io/dashboard?id=ValvePython_vdf 152 | :alt: SonarCloud Rating 153 | 154 | .. |sonar_reliability| image:: https://sonarcloud.io/api/project_badges/measure?project=ValvePython_vdf&metric=reliability_rating 155 | :target: https://sonarcloud.io/dashboard?id=ValvePython_vdf 156 | :alt: SonarCloud Rating 157 | 158 | .. |sonar_security| image:: https://sonarcloud.io/api/project_badges/measure?project=ValvePython_vdf&metric=security_rating 159 | :target: https://sonarcloud.io/dashboard?id=ValvePython_vdf 160 | :alt: SonarCloud Rating 161 | 162 | .. |master_build| image:: https://github.com/ValvePython/vdf/workflows/Tests/badge.svg?branch=master 163 | :target: https://github.com/ValvePython/vdf/actions?query=workflow%3A%22Tests%22+branch%3Amaster 164 | :alt: Build status of master branch 165 | 166 | .. _DuplicateOrderedDict: https://github.com/rossengeorgiev/dota2_notebooks/blob/master/DuplicateOrderedDict_for_VDF.ipynb 167 | 168 | .. _hash randomization: https://docs.python.org/2/using/cmdline.html#envvar-PYTHONHASHSEED 169 | -------------------------------------------------------------------------------- /dev_requirements.txt: -------------------------------------------------------------------------------- 1 | mock; python_version < '3.3' 2 | 3 | coverage>=5.0; python_version == '2.7' or python_version >= '3.5' 4 | pytest-cov>=2.7.0; python_version == '2.7' or python_version >= '3.5' 5 | 6 | # coveralls 2.0 has removed support for Python 2.7 and 3.4 7 | git+https://github.com/andy-maier/coveralls-python.git@andy/add-py27#egg=coveralls; python_version == '2.7' 8 | coveralls>=2.1.2; python_version >= '3.5' 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | from codecs import open 5 | from os import path 6 | import vdf 7 | 8 | here = path.abspath(path.dirname(__file__)) 9 | with open(path.join(here, 'README.rst'), encoding='utf-8') as f: 10 | long_description = f.read() 11 | 12 | setup( 13 | name='vdf', 14 | version=vdf.__version__, 15 | description='Library for working with Valve\'s VDF text format', 16 | long_description=long_description, 17 | url='https://github.com/ValvePython/vdf', 18 | author='Rossen Georgiev', 19 | author_email='rossen@rgp.io', 20 | license='MIT', 21 | classifiers=[ 22 | 'Development Status :: 5 - Production/Stable', 23 | 'Intended Audience :: Developers', 24 | 'License :: OSI Approved :: MIT License', 25 | 'Topic :: Software Development :: Libraries :: Python Modules', 26 | 'Natural Language :: English', 27 | 'Operating System :: OS Independent', 28 | 'Programming Language :: Python :: 2.7', 29 | 'Programming Language :: Python :: 3.4', 30 | 'Programming Language :: Python :: 3.5', 31 | 'Programming Language :: Python :: 3.6', 32 | 'Programming Language :: Python :: 3.7', 33 | 'Programming Language :: Python :: 3.8', 34 | 'Programming Language :: Python :: 3.9', 35 | 'Programming Language :: Python :: Implementation :: PyPy', 36 | ], 37 | keywords='valve keyvalue vdf tf2 dota2 csgo', 38 | packages=['vdf'], 39 | zip_safe=True, 40 | ) 41 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ValvePython/vdf/d76292623e326fb165fe3bdb684832cdf30959d4/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_binary_vdf.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | 4 | import vdf 5 | from io import BytesIO 6 | from collections import OrderedDict 7 | 8 | u = str if sys.version_info >= (3,) else unicode 9 | 10 | 11 | class BinaryVDF(unittest.TestCase): 12 | def test_BASE_INT(self): 13 | repr(vdf.BASE_INT()) 14 | 15 | def test_simple(self): 16 | pairs = [ 17 | ('a', 'test'), 18 | ('a2', b'\xd0\xb0\xd0\xb1\xd0\xb2\xd0\xb3'.decode('utf-8')), 19 | ('bb', 1), 20 | ('bb2', -500), 21 | ('ccc', 1.0), 22 | ('dddd', vdf.POINTER(1234)), 23 | ('fffff', vdf.COLOR(1234)), 24 | ('gggggg', vdf.UINT_64(1234)), 25 | ('hhhhhhh', vdf.INT_64(-1234)), 26 | ] 27 | 28 | data = OrderedDict(pairs) 29 | data['level1-1'] = OrderedDict(pairs) 30 | data['level1-1']['level2-1'] = OrderedDict(pairs) 31 | data['level1-1']['level2-2'] = OrderedDict(pairs) 32 | data['level1-2'] = OrderedDict(pairs) 33 | 34 | result = vdf.binary_loads(vdf.binary_dumps(data), mapper=OrderedDict) 35 | 36 | self.assertEqual(data, result) 37 | 38 | result = vdf.binary_loads(vdf.binary_dumps(data, alt_format=True), mapper=OrderedDict, alt_format=True) 39 | 40 | self.assertEqual(data, result) 41 | 42 | result = vdf.vbkv_loads(vdf.vbkv_dumps(data), mapper=OrderedDict) 43 | 44 | self.assertEqual(data, result) 45 | 46 | def test_vbkv_empty(self): 47 | with self.assertRaises(ValueError): 48 | vdf.vbkv_loads(b'') 49 | 50 | def test_loads_empty(self): 51 | self.assertEqual(vdf.binary_loads(b''), {}) 52 | self.assertEqual(vdf.binary_load(BytesIO(b'')), {}) 53 | 54 | def test_dumps_empty(self): 55 | self.assertEqual(vdf.binary_dumps({}), b'') 56 | 57 | buf = BytesIO() 58 | vdf.binary_dump({}, buf) 59 | 60 | self.assertEqual(buf.getvalue(), b'') 61 | 62 | def test_dumps_unicode(self): 63 | self.assertEqual(vdf.binary_dumps({u('a'): u('b')}), b'\x01a\x00b\x00\x08') 64 | 65 | def test_dumps_unicode_alternative(self): 66 | self.assertEqual(vdf.binary_dumps({u('a'): u('b')}, alt_format=True), b'\x01a\x00b\x00\x0b') 67 | 68 | def test_dump_params_invalid(self): 69 | with self.assertRaises(TypeError): 70 | vdf.binary_dump([], BytesIO()) 71 | with self.assertRaises(TypeError): 72 | vdf.binary_dump({}, b'aaaa') 73 | 74 | def test_dumps_params_invalid(self): 75 | with self.assertRaises(TypeError): 76 | vdf.binary_dumps([]) 77 | with self.assertRaises(TypeError): 78 | vdf.binary_dumps(b'aaaa') 79 | 80 | def test_dumps_key_invalid_type(self): 81 | with self.assertRaises(TypeError): 82 | vdf.binary_dumps({1:1}) 83 | with self.assertRaises(TypeError): 84 | vdf.binary_dumps({None:1}) 85 | 86 | def test_dumps_value_invalid_type(self): 87 | with self.assertRaises(TypeError): 88 | vdf.binary_dumps({'': None}) 89 | 90 | def test_alternative_format(self): 91 | with self.assertRaises(SyntaxError): 92 | vdf.binary_loads(b'\x00a\x00\x00b\x00\x0b\x0b') 93 | with self.assertRaises(SyntaxError): 94 | vdf.binary_loads(b'\x00a\x00\x00b\x00\x08\x08', alt_format=True) 95 | 96 | def test_load_params_invalid(self): 97 | with self.assertRaises(TypeError): 98 | vdf.binary_load(b'aaaa') 99 | with self.assertRaises(TypeError): 100 | vdf.binary_load(1234) 101 | with self.assertRaises(TypeError): 102 | vdf.binary_load(BytesIO(b'aaaa'), b'bbbb') 103 | 104 | def test_loads_params_invalid(self): 105 | with self.assertRaises(TypeError): 106 | vdf.binary_loads([]) 107 | with self.assertRaises(TypeError): 108 | vdf.binary_loads(11111) 109 | with self.assertRaises(TypeError): 110 | vdf.binary_loads(BytesIO()) 111 | with self.assertRaises(TypeError): 112 | vdf.binary_load(b'', b'bbbb') 113 | 114 | def test_loads_unbalanced_nesting(self): 115 | with self.assertRaises(SyntaxError): 116 | vdf.binary_loads(b'\x00a\x00\x00b\x00\x08') 117 | with self.assertRaises(SyntaxError): 118 | vdf.binary_loads(b'\x00a\x00\x00b\x00\x08\x08\x08\x08') 119 | 120 | def test_loads_unknown_type(self): 121 | with self.assertRaises(SyntaxError): 122 | vdf.binary_loads(b'\x33a\x00\x08') 123 | 124 | def test_loads_unterminated_string(self): 125 | with self.assertRaises(SyntaxError): 126 | vdf.binary_loads(b'\x01abbbb') 127 | 128 | def test_loads_type_checks(self): 129 | with self.assertRaises(TypeError): 130 | vdf.binary_loads(None) 131 | with self.assertRaises(TypeError): 132 | vdf.binary_loads(b'', mapper=list) 133 | 134 | def test_merge_multiple_keys_on(self): 135 | # VDFDict([('a', VDFDict([('a', '1'), ('b', '2')])), ('a', VDFDict([('a', '3'), ('c', '4')]))]) 136 | test = b'\x00a\x00\x01a\x001\x00\x01b\x002\x00\x08\x00a\x00\x01a\x003\x00\x01c\x004\x00\x08\x08' 137 | result = {'a': {'a': '3', 'b': '2', 'c': '4'}} 138 | 139 | self.assertEqual(vdf.binary_loads(test, merge_duplicate_keys=True), result) 140 | 141 | def test_merge_multiple_keys_off(self): 142 | # VDFDict([('a', VDFDict([('a', '1'), ('b', '2')])), ('a', VDFDict([('a', '3'), ('c', '4')]))]) 143 | test = b'\x00a\x00\x01a\x001\x00\x01b\x002\x00\x08\x00a\x00\x01a\x003\x00\x01c\x004\x00\x08\x08' 144 | result = {'a': {'a': '3', 'c': '4'}} 145 | 146 | self.assertEqual(vdf.binary_loads(test, merge_duplicate_keys=False), result) 147 | 148 | def test_raise_on_remaining(self): 149 | # default binary_loads is to raise 150 | with self.assertRaises(SyntaxError): 151 | vdf.binary_loads(b'\x01key\x00value\x00\x08' + b'aaaa') 152 | 153 | # do not raise 154 | self.assertEqual(vdf.binary_loads(b'\x01key\x00value\x00\x08' + b'aaaa', raise_on_remaining=False), {'key': 'value'}) 155 | 156 | def test_raise_on_remaining_with_file(self): 157 | buf = BytesIO(b'\x01key\x00value\x00\x08' + b'aaaa') 158 | 159 | # binary_load doesn't raise by default 160 | self.assertEqual(vdf.binary_load(buf), {'key': 'value'}) 161 | self.assertEqual(buf.read(), b'aaaa') 162 | 163 | # raise when extra data remains 164 | buf.seek(0) 165 | with self.assertRaises(SyntaxError): 166 | vdf.binary_load(buf, raise_on_remaining=True) 167 | self.assertEqual(buf.read(), b'aaaa') 168 | 169 | def test_vbkv_loads_empty(self): 170 | with self.assertRaises(ValueError): 171 | vdf.vbkv_loads(b'') 172 | 173 | def test_vbkv_dumps_empty(self): 174 | self.assertEqual(vdf.vbkv_dumps({}), b'VBKV\x00\x00\x00\x00') 175 | 176 | def test_vbkv_loads_invalid_header(self): 177 | with self.assertRaises(ValueError): 178 | vdf.vbkv_loads(b'DD1235764tdffhghsdf') 179 | 180 | def test_vbkv_loads_invalid_checksum(self): 181 | with self.assertRaises(ValueError): 182 | vdf.vbkv_loads(b'VBKV\x01\x02\x03\x04\x00a\x00\x0b\x0b') 183 | 184 | def test_loads_utf8_invalmid(self): 185 | self.assertEqual({'aaa': b'bb\xef\xbf\xbdbb'.decode('utf-8')}, vdf.binary_loads(b'\x01aaa\x00bb\xffbb\x00\x08')) 186 | 187 | def test_loads_utf16(self): 188 | self.assertEqual({'aaa': b'b\x00b\x00\xff\xffb\x00b\x00'.decode('utf-16le')}, vdf.binary_loads(b'\x05aaa\x00b\x00b\x00\xff\xffb\x00b\x00\x00\x00\x08')) 189 | -------------------------------------------------------------------------------- /tests/test_vdf.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | 4 | try: 5 | from unittest import mock 6 | except ImportError: 7 | import mock 8 | 9 | try: 10 | from StringIO import StringIO 11 | except ImportError: 12 | from io import StringIO 13 | 14 | import vdf 15 | 16 | 17 | class testcase_helpers_escapes(unittest.TestCase): 18 | # https://github.com/ValveSoftware/source-sdk-2013/blob/0d8dceea4310fde5706b3ce1c70609d72a38efdf/sp/src/tier1/utlbuffer.cpp#L57-L68 19 | esc_chars_raw = "aa\n\t\v\b\r\f\a\\?\"'bb" 20 | esc_chars_escaped = 'aa\\n\\t\\v\\b\\r\\f\\a\\\\\\?\\"\\\'bb' 21 | 22 | def test_escape(self): 23 | self.assertEqual(vdf._escape(self.esc_chars_raw), self.esc_chars_escaped) 24 | 25 | def test_unescape(self): 26 | self.assertEqual(vdf._unescape(self.esc_chars_escaped), self.esc_chars_raw) 27 | 28 | def test_escape_unescape(self): 29 | self.assertEqual(vdf._unescape(vdf._escape(self.esc_chars_raw)), self.esc_chars_raw) 30 | 31 | 32 | class testcase_helpers_load(unittest.TestCase): 33 | def setUp(self): 34 | self.f = StringIO() 35 | 36 | def tearDown(self): 37 | self.f.close() 38 | 39 | @mock.patch("vdf.parse") 40 | def test_routine_loads(self, mock_parse): 41 | vdf.loads("") 42 | 43 | (fp,), _ = mock_parse.call_args 44 | 45 | self.assertIsInstance(fp, StringIO) 46 | 47 | def test_routine_loads_assert(self): 48 | for t in [5, 5.5, 1.0j, None, [], (), {}, lambda: 0, sys.stdin, self.f]: 49 | self.assertRaises(TypeError, vdf.loads, t) 50 | 51 | @mock.patch("vdf.parse") 52 | def test_routine_load(self, mock_parse): 53 | vdf.load(sys.stdin) 54 | mock_parse.assert_called_with(sys.stdin) 55 | 56 | vdf.load(self.f) 57 | mock_parse.assert_called_with(self.f) 58 | 59 | @mock.patch("vdf.parse") 60 | def test_routines_mapper_passing(self, mock_parse): 61 | vdf.load(sys.stdin, mapper=dict) 62 | mock_parse.assert_called_with(sys.stdin, mapper=dict) 63 | 64 | vdf.loads("", mapper=dict) 65 | (fp,), kw = mock_parse.call_args 66 | self.assertIsInstance(fp, StringIO) 67 | self.assertIs(kw['mapper'], dict) 68 | 69 | class CustomDict(dict): 70 | pass 71 | 72 | vdf.load(sys.stdin, mapper=CustomDict) 73 | mock_parse.assert_called_with(sys.stdin, mapper=CustomDict) 74 | vdf.loads("", mapper=CustomDict) 75 | (fp,), kw = mock_parse.call_args 76 | self.assertIsInstance(fp, StringIO) 77 | self.assertIs(kw['mapper'], CustomDict) 78 | 79 | def test_routine_load_assert(self): 80 | for t in [5, 5.5, 1.0j, None, [], (), {}, lambda: 0, '']: 81 | self.assertRaises(TypeError, vdf.load, t) 82 | 83 | 84 | class testcase_helpers_dump(unittest.TestCase): 85 | def setUp(self): 86 | self.f = StringIO() 87 | 88 | def tearDown(self): 89 | self.f.close() 90 | 91 | def test_dump_params_invalid(self): 92 | # pretty/escaped only accept bool 93 | with self.assertRaises(TypeError): 94 | vdf.dump({'a': 1}, StringIO(), pretty=1) 95 | with self.assertRaises(TypeError): 96 | vdf.dumps({'a': 1}, pretty=1) 97 | with self.assertRaises(TypeError): 98 | vdf.dump({'a': 1}, StringIO(), escaped=1) 99 | with self.assertRaises(TypeError): 100 | vdf.dumps({'a': 1}, escaped=1) 101 | 102 | def test_routine_dumps_asserts(self): 103 | for x in [5, 5.5, 1.0j, True, None, (), {}, lambda: 0, sys.stdin, self.f]: 104 | for y in [5, 5.5, 1.0j, None, [], (), {}, lambda: 0, sys.stdin, self.f]: 105 | self.assertRaises(TypeError, vdf.dumps, x, y) 106 | 107 | def test_routine_dump_asserts(self): 108 | for x in [5, 5.5, 1.0j, True, None, (), {}, lambda: 0, sys.stdin, self.f]: 109 | for y in [5, 5.5, 1.0j, True, None, [], (), {}, lambda: 0]: 110 | self.assertRaises(TypeError, vdf.dump, x, y) 111 | 112 | def test_routine_dump_writing(self): 113 | class CustomDict(dict): 114 | pass 115 | 116 | for mapper in (dict, CustomDict): 117 | src = mapper({"asd": "123"}) 118 | expected = vdf.dumps(src) 119 | 120 | vdf.dump(src, self.f) 121 | self.f.seek(0) 122 | 123 | self.assertEqual(expected, self.f.read()) 124 | 125 | 126 | class testcase_routine_parse(unittest.TestCase): 127 | def test_parse_bom_removal(self): 128 | result = vdf.loads(vdf.BOMS + '"asd" "123"') 129 | self.assertEqual(result, {'asd': '123'}) 130 | 131 | if sys.version_info[0] == 2: 132 | result = vdf.loads(vdf.BOMS_UNICODE + '"asd" "123"') 133 | self.assertEqual(result, {'asd': '123'}) 134 | 135 | def test_parse_source_asserts(self): 136 | for t in ['', 5, 5.5, 1.0j, True, None, (), {}, lambda: 0]: 137 | self.assertRaises(TypeError, vdf.parse, t) 138 | 139 | def test_parse_mapper_assert(self): 140 | self.assertRaises(TypeError, vdf.parse, StringIO(" "), mapper=list) 141 | 142 | def test_parse_file_source(self): 143 | self.assertEqual(vdf.parse(StringIO(" ")), {}) 144 | 145 | class CustomDict(dict): 146 | pass 147 | 148 | self.assertEqual(vdf.parse(StringIO(" "), mapper=CustomDict), {}) 149 | 150 | 151 | class testcase_VDF(unittest.TestCase): 152 | def test_empty(self): 153 | self.assertEqual(vdf.loads(""), {}) 154 | 155 | def test_keyvalue_pairs(self): 156 | INPUT = ''' 157 | "key1" "value1" 158 | key2 "value2" 159 | KEY3 "value3" 160 | "key4" value4 161 | "key5" VALUE5 162 | ''' 163 | 164 | EXPECTED = { 165 | 'key1': 'value1', 166 | 'key2': 'value2', 167 | 'KEY3': 'value3', 168 | 'key4': 'value4', 169 | 'key5': 'VALUE5', 170 | } 171 | 172 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 173 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 174 | 175 | def test_keyvalue_open_quoted(self): 176 | INPUT = ( 177 | '"key1" "a\n' 178 | 'b\n' 179 | 'c"\n' 180 | 'key2 "a\n' 181 | 'b\n' 182 | 'c"\n' 183 | ) 184 | 185 | EXPECTED = { 186 | 'key1': 'a\nb\nc', 187 | 'key2': 'a\nb\nc', 188 | } 189 | 190 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 191 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 192 | 193 | def test_multi_keyvalue_pairs(self): 194 | INPUT = ''' 195 | "root1" 196 | { 197 | "key1" "value1" 198 | key2 "value2" 199 | "key3" value3 200 | } 201 | root2 202 | { 203 | "key1" "value1" 204 | key2 "value2" 205 | "key3" value3 206 | } 207 | ''' 208 | 209 | EXPECTED = { 210 | 'root1': { 211 | 'key1': 'value1', 212 | 'key2': 'value2', 213 | 'key3': 'value3', 214 | }, 215 | 'root2': { 216 | 'key1': 'value1', 217 | 'key2': 'value2', 218 | 'key3': 'value3', 219 | } 220 | } 221 | 222 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 223 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 224 | 225 | def test_deep_nesting(self): 226 | INPUT = ''' 227 | "root" 228 | { 229 | node1 230 | { 231 | "node2" 232 | { 233 | NODE3 234 | { 235 | "node4" 236 | { 237 | node5 238 | { 239 | "node6" 240 | { 241 | NODE7 242 | { 243 | "node8" 244 | { 245 | "key" "value" 246 | } 247 | } 248 | } 249 | } 250 | } 251 | } 252 | } 253 | } 254 | } 255 | ''' 256 | 257 | EXPECTED = { 258 | 'root': { 259 | 'node1': { 260 | 'node2': { 261 | 'NODE3': { 262 | 'node4': { 263 | 'node5': { 264 | 'node6': { 265 | 'NODE7': { 266 | 'node8': { 267 | 'key': 'value' 268 | }}}}}}}}} 269 | } 270 | 271 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 272 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 273 | 274 | def test_comments_and_blank_lines(self): 275 | INPUT = ''' 276 | // this is comment 277 | "key1" "value1" // another comment 278 | key2 "value2" // further comments 279 | "key3" value3 // useless comment 280 | 281 | key4 // comments comments comments 282 | { // is this a comment? 283 | 284 | k v // comment 285 | 286 | } // you only comment once 287 | 288 | // comment out of nowhere 289 | 290 | "key5" // pretty much anything here 291 | { // is this a comment? 292 | 293 | K V //comment 294 | 295 | } 296 | ''' 297 | 298 | EXPECTED = { 299 | 'key1': 'value1', 300 | 'key2': 'value2', 301 | 'key3': 'value3', 302 | 'key4': { 303 | 'k': 'v' 304 | }, 305 | 'key5': { 306 | 'K': 'V' 307 | }, 308 | } 309 | 310 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 311 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 312 | 313 | def test_hash_key(self): 314 | INPUT = '#include "asd.vdf"' 315 | EXPECTED = {'#include': 'asd.vdf'} 316 | 317 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 318 | 319 | INPUT = '#base asd.vdf' 320 | EXPECTED = {'#base': 'asd.vdf'} 321 | 322 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 323 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 324 | 325 | def test_wierd_symbols_for_unquoted(self): 326 | INPUT = 'a asd.vdf\nb language_*lol*\nc zxc_-*.sss//\nd<2?$% /cde/$fgh/' 327 | EXPECTED = { 328 | 'a': 'asd.vdf', 329 | 'b': 'language_*lol*', 330 | 'c': 'zxc_-*.sss', 331 | 'd<2?$%': '/cde/$fgh/', 332 | } 333 | 334 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 335 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 336 | 337 | def test_space_for_unquoted(self): 338 | INPUT = 'a b c d \n efg h i\t // j k\n' 339 | EXPECTED= { 340 | 'a': 'b c d', 341 | 'efg': 'h i', 342 | } 343 | 344 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 345 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 346 | 347 | def test_merge_multiple_keys_on(self): 348 | INPUT = ''' 349 | a 350 | { 351 | a 1 352 | b 2 353 | } 354 | a 355 | { 356 | a 3 357 | c 4 358 | } 359 | ''' 360 | 361 | EXPECTED = {'a': {'a': '3', 'b': '2', 'c': '4'}} 362 | 363 | self.assertEqual(vdf.loads(INPUT, merge_duplicate_keys=True), EXPECTED) 364 | self.assertEqual(vdf.loads(INPUT, escaped=False, merge_duplicate_keys=True), EXPECTED) 365 | 366 | def test_merge_multiple_keys_off(self): 367 | INPUT = ''' 368 | a 369 | { 370 | a 1 371 | b 2 372 | } 373 | a 374 | { 375 | a 3 376 | c 4 377 | } 378 | ''' 379 | 380 | EXPECTED = {'a': {'a': '3', 'c': '4'}} 381 | 382 | self.assertEqual(vdf.loads(INPUT, merge_duplicate_keys=False), EXPECTED) 383 | self.assertEqual(vdf.loads(INPUT, escaped=False, merge_duplicate_keys=False), EXPECTED) 384 | 385 | def test_escape_before_last(self): 386 | INPUT = r''' 387 | "aaa\\" "1" 388 | "1" "bbb\\" 389 | ''' 390 | 391 | EXPECTED = { 392 | "aaa\\": "1", 393 | "1": "bbb\\", 394 | } 395 | 396 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 397 | 398 | def test_escape_before_last_unescaped(self): 399 | INPUT = r''' 400 | "aaa\\" "1" 401 | "1" "bbb\\" 402 | ''' 403 | 404 | EXPECTED = { 405 | "aaa\\\\": "1", 406 | "1": "bbb\\\\", 407 | } 408 | 409 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 410 | 411 | def test_single_line_empty_block(self): 412 | INPUT = ''' 413 | "root1" 414 | { 415 | "key1" {} 416 | key2 "value2" 417 | "key3" value3 418 | } 419 | root2 { } 420 | root3 421 | { 422 | "key1" "value1" 423 | key2 { } 424 | "key3" value3 425 | } 426 | ''' 427 | 428 | EXPECTED = { 429 | 'root1': { 430 | 'key1': {}, 431 | 'key2': 'value2', 432 | 'key3': 'value3', 433 | }, 434 | 'root2': {}, 435 | 'root3': { 436 | 'key1': 'value1', 437 | 'key2': {}, 438 | 'key3': 'value3', 439 | } 440 | } 441 | 442 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 443 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 444 | 445 | def test_inline_opening_bracker(self): 446 | INPUT = ''' 447 | "root1" { 448 | "key1" { 449 | } 450 | key2 "value2" 451 | "key3" value3 452 | } 453 | root2 { } 454 | root3 { 455 | "key1" "value1" 456 | key2 { 457 | 458 | } 459 | "key3" value3 460 | } 461 | ''' 462 | 463 | EXPECTED = { 464 | 'root1': { 465 | 'key1': {}, 466 | 'key2': 'value2', 467 | 'key3': 'value3', 468 | }, 469 | 'root2': {}, 470 | 'root3': { 471 | 'key1': 'value1', 472 | 'key2': {}, 473 | 'key3': 'value3', 474 | } 475 | } 476 | 477 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 478 | self.assertEqual(vdf.loads(INPUT, escaped=False), EXPECTED) 479 | 480 | def test_duplicate_key_with_value_from_str_to_mapper(self): 481 | INPUT = r''' 482 | level1 483 | { 484 | key1 text1 485 | key2 text2 486 | } 487 | level1 488 | { 489 | key2 490 | { 491 | key3 text3 492 | } 493 | } 494 | ''' 495 | 496 | EXPECTED = { 497 | "level1": { 498 | "key1": "text1", 499 | "key2": { 500 | "key3": "text3" 501 | } 502 | } 503 | } 504 | 505 | self.assertEqual(vdf.loads(INPUT), EXPECTED) 506 | 507 | class testcase_VDF_other(unittest.TestCase): 508 | def test_dumps_pretty_output(self): 509 | tests = [ 510 | [ 511 | {'1': '1'}, 512 | '"1" "1"\n', 513 | ], 514 | [ 515 | {'1': {'2': '2'}}, 516 | '"1"\n{\n\t"2" "2"\n}\n', 517 | ], 518 | [ 519 | {'1': {'2': {'3': '3'}}}, 520 | '"1"\n{\n\t"2"\n\t{\n\t\t"3" "3"\n\t}\n}\n', 521 | ], 522 | ] 523 | for test, expected in tests: 524 | self.assertEqual(vdf.dumps(test, pretty=True), expected) 525 | 526 | def test_parse_exceptions(self): 527 | tests = [ 528 | 529 | # expect bracket - invalid syntax 530 | '"asd"\n"zxc" "333"\n"', 531 | 'asd\nzxc 333\n"', 532 | 533 | # invalid syntax 534 | '"asd" "123"\n"zxc" "333"\n"', 535 | 'asd 123\nzxc 333\n"', 536 | '"asd\n\n\n\n\nzxc', 537 | '"asd" "bbb\n\n\n\n\nzxc', 538 | 539 | # one too many closing parenthasis 540 | '"asd"\n{\n"zxc" "123"\n}\n}\n}\n}\n', 541 | 'asd\n{\nzxc 123\n}\n}\n}\n}\n', 542 | 543 | # unclosed parenthasis 544 | '"asd"\n{\n"zxc" "333"\n' 545 | 'asd\n{\nzxc 333\n' 546 | ] 547 | 548 | for test in tests: 549 | self.assertRaises(SyntaxError, vdf.loads, test) 550 | -------------------------------------------------------------------------------- /tests/test_vdf_dict.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from vdf import VDFDict 3 | 4 | 5 | class VDFDictCase(unittest.TestCase): 6 | def test_init(self): 7 | with self.assertRaises(ValueError): 8 | VDFDict("asd zxc") 9 | with self.assertRaises(ValueError): 10 | VDFDict(5) 11 | with self.assertRaises(ValueError): 12 | VDFDict((('1',1), ('2', 2))) 13 | 14 | def test_repr(self): 15 | self.assertIsInstance(repr(VDFDict()), str) 16 | 17 | def test_len(self): 18 | self.assertEqual(len(VDFDict()), 0) 19 | self.assertEqual(len(VDFDict({'1':1})), 1) 20 | 21 | def test_verify_key_tuple(self): 22 | a = VDFDict() 23 | with self.assertRaises(ValueError): 24 | a._verify_key_tuple([]) 25 | with self.assertRaises(ValueError): 26 | a._verify_key_tuple((1,)) 27 | with self.assertRaises(ValueError): 28 | a._verify_key_tuple((1,1,1)) 29 | with self.assertRaises(TypeError): 30 | a._verify_key_tuple((None, 'asd')) 31 | with self.assertRaises(TypeError): 32 | a._verify_key_tuple(('1', 'asd')) 33 | with self.assertRaises(TypeError): 34 | a._verify_key_tuple((1, 1)) 35 | with self.assertRaises(TypeError): 36 | a._verify_key_tuple((1, None)) 37 | 38 | def test_normalize_key(self): 39 | a = VDFDict() 40 | self.assertEqual(a._normalize_key('AAA'), (0, 'AAA')) 41 | self.assertEqual(a._normalize_key((5, 'BBB')), (5, 'BBB')) 42 | 43 | def test_normalize_key_exception(self): 44 | a = VDFDict() 45 | with self.assertRaises(TypeError): 46 | a._normalize_key(5) 47 | with self.assertRaises(TypeError): 48 | a._normalize_key([]) 49 | with self.assertRaises(TypeError): 50 | a._normalize_key(None) 51 | 52 | def test_setitem(self): 53 | a = list(zip(map(str, range(5, 0, -1)), range(50, 0, -10))) 54 | b = VDFDict() 55 | for k,v in a: 56 | b[k] = v 57 | self.assertEqual(a, list(b.items())) 58 | 59 | def test_setitem_with_duplicates(self): 60 | a = list(zip(['5']*5, range(50, 0, -10))) 61 | b = VDFDict() 62 | for k,v in a: 63 | b[k] = v 64 | self.assertEqual(a, list(b.items())) 65 | 66 | def test_setitem_key_exceptions(self): 67 | with self.assertRaises(TypeError): 68 | VDFDict()[5] = None 69 | with self.assertRaises(TypeError): 70 | VDFDict()[(0, 5)] = None 71 | with self.assertRaises(ValueError): 72 | VDFDict()[(0, '5', 1)] = None 73 | 74 | def test_setitem_key_valid_types(self): 75 | VDFDict()['5'] = None 76 | VDFDict({'5': None})[(0, '5')] = None 77 | 78 | def test_setitem_keyerror_fullkey(self): 79 | with self.assertRaises(KeyError): 80 | VDFDict([("1", None)])[(1, "1")] = None 81 | 82 | def test_getitem(self): 83 | a = VDFDict([('1',2), ('1',3)]) 84 | self.assertEqual(a['1'], 2) 85 | self.assertEqual(a[(0, '1')], 2) 86 | self.assertEqual(a[(1, '1')], 3) 87 | 88 | def test_del(self): 89 | a = VDFDict([("1",1),("1",2),("5",51),("1",3),("5",52)]) 90 | b = [("1",1),("1",2),("1",3),("5",52)] 91 | del a["5"] 92 | self.assertEqual(list(a.items()), b) 93 | 94 | def test_del_by_fullkey(self): 95 | a = VDFDict([("1",1),("1",2),("5",51),("1",3),("5",52)]) 96 | b = [("1",1),("1",2),("1",3),("5",52)] 97 | del a[(0, "5")] 98 | self.assertEqual(list(a.items()), b) 99 | 100 | def test_del_first_duplicate(self): 101 | a = [("1",1),("1",2),("1",3),("1",4)] 102 | b = VDFDict(a) 103 | 104 | del b["1"] 105 | del b["1"] 106 | del b[(0, "1")] 107 | del b[(0, "1")] 108 | 109 | self.assertEqual(len(b), 0) 110 | 111 | def test_del_exception(self): 112 | with self.assertRaises(KeyError): 113 | a = VDFDict() 114 | del a["1"] 115 | with self.assertRaises(KeyError): 116 | a = VDFDict({'1':1}) 117 | del a[(1, "1")] 118 | 119 | def test_iter(self): 120 | a = VDFDict({"1": 1}) 121 | iter(a).__iter__ 122 | self.assertEqual(len(list(iter(a))), 1) 123 | 124 | def test_in(self): 125 | a = VDFDict({"1":2, "3":4, "5":6}) 126 | self.assertTrue('1' in a) 127 | self.assertTrue((0, '1') in a) 128 | self.assertFalse('6' in a) 129 | self.assertFalse((1, '1') in a) 130 | 131 | def test_eq(self): 132 | self.assertEqual(VDFDict(), VDFDict()) 133 | self.assertNotEqual(VDFDict(), VDFDict({'1':1})) 134 | self.assertNotEqual(VDFDict(), {'1':1}) 135 | a = [("a", 1), ("b", 5), ("a", 11)] 136 | self.assertEqual(VDFDict(a), VDFDict(a)) 137 | self.assertNotEqual(VDFDict(a), VDFDict(a[1:])) 138 | 139 | def test_clear(self): 140 | a = VDFDict([("1",2),("1",2),("5",3),("1",2)]) 141 | a.clear() 142 | self.assertEqual(len(a), 0) 143 | self.assertEqual(len(a.keys()), 0) 144 | self.assertEqual(len(list(a.iterkeys())), 0) 145 | self.assertEqual(len(a.values()), 0) 146 | self.assertEqual(len(list(a.itervalues())), 0) 147 | self.assertEqual(len(a.items()), 0) 148 | self.assertEqual(len(list(a.iteritems())), 0) 149 | 150 | def test_get(self): 151 | a = VDFDict([('1',11), ('1',22)]) 152 | self.assertEqual(a.get('1'), 11) 153 | self.assertEqual(a.get((1, '1')), 22) 154 | self.assertEqual(a.get('2', 33), 33) 155 | self.assertEqual(a.get((0, '2'), 44), 44) 156 | 157 | def test_setdefault(self): 158 | a = VDFDict([('1',11), ('1',22)]) 159 | self.assertEqual(a.setdefault('1'), 11) 160 | self.assertEqual(a.setdefault((0, '1')), 11) 161 | self.assertEqual(a.setdefault('2'), None) 162 | self.assertEqual(a.setdefault((0, '2')), None) 163 | self.assertEqual(a.setdefault('3', 33), 33) 164 | 165 | def test_pop(self): 166 | a = VDFDict([('1',11),('2',22),('1',33),('2',44),('2',55)]) 167 | self.assertEqual(a.pop('1'), 11) 168 | self.assertEqual(a.pop('1'), 33) 169 | with self.assertRaises(KeyError): 170 | a.pop('1') 171 | self.assertEqual(a.pop((1, '2')), 44) 172 | self.assertEqual(a.pop((1, '2')), 55) 173 | 174 | def test_popitem(self): 175 | a = [('1',11),('2',22),('1',33)] 176 | b = VDFDict(a) 177 | self.assertEqual(b.popitem(), a.pop()) 178 | self.assertEqual(b.popitem(), a.pop()) 179 | self.assertEqual(b.popitem(), a.pop()) 180 | with self.assertRaises(KeyError): 181 | b.popitem() 182 | 183 | def test_update(self): 184 | a = VDFDict([("1",2),("1",2),("5",3),("1",2)]) 185 | b = VDFDict() 186 | b.update([("1",2),("1",2)]) 187 | b.update([("5",3),("1",2)]) 188 | self.assertEqual(list(a.items()), list(b.items())) 189 | 190 | def test_update_exceptions(self): 191 | a = VDFDict() 192 | with self.assertRaises(TypeError): 193 | a.update(None) 194 | with self.assertRaises(TypeError): 195 | a.update(1) 196 | with self.assertRaises(TypeError): 197 | a.update("asd zxc") 198 | with self.assertRaises(ValueError): 199 | a.update([(1,1,1), (2,2,2)]) 200 | 201 | map_test = [ 202 | ("1", 2), 203 | ("4", 3),("4", 3),("4", 2), 204 | ("7", 2), 205 | ("1", 2), 206 | ] 207 | 208 | def test_keys(self): 209 | _dict = VDFDict(self.map_test) 210 | self.assertSequenceEqual( 211 | list(_dict.keys()), 212 | list(x[0] for x in self.map_test)) 213 | 214 | def test_values(self): 215 | _dict = VDFDict(self.map_test) 216 | self.assertSequenceEqual( 217 | list(_dict.values()), 218 | list(x[1] for x in self.map_test)) 219 | 220 | def test_items(self): 221 | _dict = VDFDict(self.map_test) 222 | self.assertSequenceEqual( 223 | list(_dict.items()), 224 | self.map_test) 225 | 226 | def test_direct_access_get(self): 227 | b = dict() 228 | a = VDFDict({"1":2, "3":4, "5":6}) 229 | for k,v in a.items(): 230 | b[k] = v 231 | self.assertEqual(dict(a.items()), b) 232 | 233 | def test_duplicate_keys(self): 234 | items = [('key1', 1), ('key1', 2), ('key3', 3), ('key1', 1)] 235 | keys = [x[0] for x in items] 236 | values = [x[1] for x in items] 237 | _dict = VDFDict(items) 238 | self.assertEqual(list(_dict.items()), items) 239 | self.assertEqual(list(_dict.keys()), keys) 240 | self.assertEqual(list(_dict.values()), values) 241 | 242 | def test_same_type_init(self): 243 | self.assertSequenceEqual( 244 | tuple(VDFDict(self.map_test).items()), 245 | tuple(VDFDict(VDFDict(self.map_test)).items())) 246 | 247 | def test_get_all_for(self): 248 | a = VDFDict([("1",2),("1",2**31),("5",3),("1",2)]) 249 | self.assertEqual( 250 | list(a.get_all_for("1")), 251 | [2,2**31,2], 252 | ) 253 | 254 | def test_get_all_for_invalid_key(self): 255 | a = VDFDict() 256 | with self.assertRaises(TypeError): 257 | a.get_all_for(None) 258 | with self.assertRaises(TypeError): 259 | a.get_all_for(5) 260 | with self.assertRaises(TypeError): 261 | a.get_all_for((0, '5')) 262 | 263 | def test_remove_all_for(self): 264 | a = VDFDict([("1",2),("1",2),("5",3),("1",2)]) 265 | a.remove_all_for("1") 266 | self.assertEqual(list(a.items()), [("5",3)]) 267 | self.assertEqual(len(a), 1) 268 | 269 | def test_remove_all_for_invalid_key(self): 270 | a = VDFDict() 271 | with self.assertRaises(TypeError): 272 | a.remove_all_for(None) 273 | with self.assertRaises(TypeError): 274 | a.remove_all_for(5) 275 | with self.assertRaises(TypeError): 276 | a.remove_all_for((0, '5')) 277 | 278 | def test_has_duplicates(self): 279 | # single level duplicate 280 | a = [('1', 11), ('1', 22)] 281 | b = VDFDict(a) 282 | self.assertTrue(b.has_duplicates()) 283 | 284 | # duplicate in nested 285 | c = VDFDict({'1': b}) 286 | self.assertTrue(c.has_duplicates()) 287 | 288 | # duplicate in nested dict 289 | d = VDFDict({'1': {'2': {'3': b}}}) 290 | self.assertTrue(d.has_duplicates()) 291 | 292 | # duplicate in nested dict 293 | d = VDFDict({'1': {'2': {'3': None}}}) 294 | self.assertFalse(d.has_duplicates()) 295 | -------------------------------------------------------------------------------- /vdf/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for deserializing/serializing to and from VDF 3 | """ 4 | __version__ = "3.4" 5 | __author__ = "Rossen Georgiev" 6 | 7 | import re 8 | import sys 9 | import struct 10 | from binascii import crc32 11 | from io import BytesIO 12 | from io import StringIO as unicodeIO 13 | 14 | try: 15 | from collections.abc import Mapping 16 | except: 17 | from collections import Mapping 18 | 19 | from vdf.vdict import VDFDict 20 | 21 | # Py2 & Py3 compatibility 22 | if sys.version_info[0] >= 3: 23 | string_type = str 24 | int_type = int 25 | BOMS = '\ufffe\ufeff' 26 | 27 | def strip_bom(line): 28 | return line.lstrip(BOMS) 29 | else: 30 | from StringIO import StringIO as strIO 31 | string_type = basestring 32 | int_type = long 33 | BOMS = '\xef\xbb\xbf\xff\xfe\xfe\xff' 34 | BOMS_UNICODE = '\\ufffe\\ufeff'.decode('unicode-escape') 35 | 36 | def strip_bom(line): 37 | return line.lstrip(BOMS if isinstance(line, str) else BOMS_UNICODE) 38 | 39 | # string escaping 40 | _unescape_char_map = { 41 | r"\n": "\n", 42 | r"\t": "\t", 43 | r"\v": "\v", 44 | r"\b": "\b", 45 | r"\r": "\r", 46 | r"\f": "\f", 47 | r"\a": "\a", 48 | r"\\": "\\", 49 | r"\?": "?", 50 | r"\"": "\"", 51 | r"\'": "\'", 52 | } 53 | _escape_char_map = {v: k for k, v in _unescape_char_map.items()} 54 | 55 | def _re_escape_match(m): 56 | return _escape_char_map[m.group()] 57 | 58 | def _re_unescape_match(m): 59 | return _unescape_char_map[m.group()] 60 | 61 | def _escape(text): 62 | return re.sub(r"[\n\t\v\b\r\f\a\\\?\"']", _re_escape_match, text) 63 | 64 | def _unescape(text): 65 | return re.sub(r"(\\n|\\t|\\v|\\b|\\r|\\f|\\a|\\\\|\\\?|\\\"|\\')", _re_unescape_match, text) 66 | 67 | # parsing and dumping for KV1 68 | def parse(fp, mapper=dict, merge_duplicate_keys=True, escaped=True): 69 | """ 70 | Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a VDF) 71 | to a Python object. 72 | 73 | ``mapper`` specifies the Python object used after deserializetion. ``dict` is 74 | used by default. Alternatively, ``collections.OrderedDict`` can be used if you 75 | wish to preserve key order. Or any object that acts like a ``dict``. 76 | 77 | ``merge_duplicate_keys`` when ``True`` will merge multiple KeyValue lists with the 78 | same key into one instead of overwriting. You can se this to ``False`` if you are 79 | using ``VDFDict`` and need to preserve the duplicates. 80 | """ 81 | if not issubclass(mapper, Mapping): 82 | raise TypeError("Expected mapper to be subclass of dict, got %s" % type(mapper)) 83 | if not hasattr(fp, 'readline'): 84 | raise TypeError("Expected fp to be a file-like object supporting line iteration") 85 | 86 | stack = [mapper()] 87 | expect_bracket = False 88 | 89 | re_keyvalue = re.compile(r'^("(?P(?:\\.|[^\\"])*)"|(?P#?[a-z0-9\-\_\\\?$%<>]+))' 90 | r'([ \t]*(' 91 | r'"(?P(?:\\.|[^\\"])*)(?P")?' 92 | r'|(?P(?:(? ])+)' 93 | r'|(?P{[ \t]*)(?P})?' 94 | r'))?', 95 | flags=re.I) 96 | 97 | for lineno, line in enumerate(fp, 1): 98 | if lineno == 1: 99 | line = strip_bom(line) 100 | 101 | line = line.lstrip() 102 | 103 | # skip empty and comment lines 104 | if line == "" or line[0] == '/': 105 | continue 106 | 107 | # one level deeper 108 | if line[0] == "{": 109 | expect_bracket = False 110 | continue 111 | 112 | if expect_bracket: 113 | raise SyntaxError("vdf.parse: expected openning bracket", 114 | (getattr(fp, 'name', '<%s>' % fp.__class__.__name__), lineno, 1, line)) 115 | 116 | # one level back 117 | if line[0] == "}": 118 | if len(stack) > 1: 119 | stack.pop() 120 | continue 121 | 122 | raise SyntaxError("vdf.parse: one too many closing parenthasis", 123 | (getattr(fp, 'name', '<%s>' % fp.__class__.__name__), lineno, 0, line)) 124 | 125 | # parse keyvalue pairs 126 | while True: 127 | match = re_keyvalue.match(line) 128 | 129 | if not match: 130 | try: 131 | line += next(fp) 132 | continue 133 | except StopIteration: 134 | raise SyntaxError("vdf.parse: unexpected EOF (open key quote?)", 135 | (getattr(fp, 'name', '<%s>' % fp.__class__.__name__), lineno, 0, line)) 136 | 137 | key = match.group('key') if match.group('qkey') is None else match.group('qkey') 138 | val = match.group('qval') 139 | if val is None: 140 | val = match.group('val') 141 | if val is not None: 142 | val = val.rstrip() 143 | if val == "": 144 | val = None 145 | 146 | if escaped: 147 | key = _unescape(key) 148 | 149 | # we have a key with value in parenthesis, so we make a new dict obj (level deeper) 150 | if val is None: 151 | if merge_duplicate_keys and key in stack[-1]: 152 | _m = stack[-1][key] 153 | # we've descended a level deeper, if value is str, we have to overwrite it to mapper 154 | if not isinstance(_m, mapper): 155 | _m = stack[-1][key] = mapper() 156 | else: 157 | _m = mapper() 158 | stack[-1][key] = _m 159 | 160 | if match.group('eblock') is None: 161 | # only expect a bracket if it's not already closed or on the same line 162 | stack.append(_m) 163 | if match.group('sblock') is None: 164 | expect_bracket = True 165 | 166 | # we've matched a simple keyvalue pair, map it to the last dict obj in the stack 167 | else: 168 | # if the value is line consume one more line and try to match again, 169 | # until we get the KeyValue pair 170 | if match.group('vq_end') is None and match.group('qval') is not None: 171 | try: 172 | line += next(fp) 173 | continue 174 | except StopIteration: 175 | raise SyntaxError("vdf.parse: unexpected EOF (open quote for value?)", 176 | (getattr(fp, 'name', '<%s>' % fp.__class__.__name__), lineno, 0, line)) 177 | 178 | stack[-1][key] = _unescape(val) if escaped else val 179 | 180 | # exit the loop 181 | break 182 | 183 | if len(stack) != 1: 184 | raise SyntaxError("vdf.parse: unclosed parenthasis or quotes (EOF)", 185 | (getattr(fp, 'name', '<%s>' % fp.__class__.__name__), lineno, 0, line)) 186 | 187 | return stack.pop() 188 | 189 | 190 | def loads(s, **kwargs): 191 | """ 192 | Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON 193 | document) to a Python object. 194 | """ 195 | if not isinstance(s, string_type): 196 | raise TypeError("Expected s to be a str, got %s" % type(s)) 197 | 198 | try: 199 | fp = unicodeIO(s) 200 | except TypeError: 201 | fp = strIO(s) 202 | 203 | return parse(fp, **kwargs) 204 | 205 | 206 | def load(fp, **kwargs): 207 | """ 208 | Deserialize ``fp`` (a ``.readline()``-supporting file-like object containing 209 | a JSON document) to a Python object. 210 | """ 211 | return parse(fp, **kwargs) 212 | 213 | 214 | def dumps(obj, pretty=False, escaped=True): 215 | """ 216 | Serialize ``obj`` to a VDF formatted ``str``. 217 | """ 218 | if not isinstance(obj, Mapping): 219 | raise TypeError("Expected data to be an instance of``dict``") 220 | if not isinstance(pretty, bool): 221 | raise TypeError("Expected pretty to be of type bool") 222 | if not isinstance(escaped, bool): 223 | raise TypeError("Expected escaped to be of type bool") 224 | 225 | return ''.join(_dump_gen(obj, pretty, escaped)) 226 | 227 | 228 | def dump(obj, fp, pretty=False, escaped=True): 229 | """ 230 | Serialize ``obj`` as a VDF formatted stream to ``fp`` (a 231 | ``.write()``-supporting file-like object). 232 | """ 233 | if not isinstance(obj, Mapping): 234 | raise TypeError("Expected data to be an instance of``dict``") 235 | if not hasattr(fp, 'write'): 236 | raise TypeError("Expected fp to have write() method") 237 | if not isinstance(pretty, bool): 238 | raise TypeError("Expected pretty to be of type bool") 239 | if not isinstance(escaped, bool): 240 | raise TypeError("Expected escaped to be of type bool") 241 | 242 | for chunk in _dump_gen(obj, pretty, escaped): 243 | fp.write(chunk) 244 | 245 | 246 | def _dump_gen(data, pretty=False, escaped=True, level=0): 247 | indent = "\t" 248 | line_indent = "" 249 | 250 | if pretty: 251 | line_indent = indent * level 252 | 253 | for key, value in data.items(): 254 | if escaped and isinstance(key, string_type): 255 | key = _escape(key) 256 | 257 | if isinstance(value, Mapping): 258 | yield '%s"%s"\n%s{\n' % (line_indent, key, line_indent) 259 | for chunk in _dump_gen(value, pretty, escaped, level+1): 260 | yield chunk 261 | yield "%s}\n" % line_indent 262 | else: 263 | if escaped and isinstance(value, string_type): 264 | value = _escape(value) 265 | 266 | yield '%s"%s" "%s"\n' % (line_indent, key, value) 267 | 268 | 269 | # binary VDF 270 | class BASE_INT(int_type): 271 | def __repr__(self): 272 | return "%s(%d)" % (self.__class__.__name__, self) 273 | 274 | class UINT_64(BASE_INT): 275 | pass 276 | 277 | class INT_64(BASE_INT): 278 | pass 279 | 280 | class POINTER(BASE_INT): 281 | pass 282 | 283 | class COLOR(BASE_INT): 284 | pass 285 | 286 | BIN_NONE = b'\x00' 287 | BIN_STRING = b'\x01' 288 | BIN_INT32 = b'\x02' 289 | BIN_FLOAT32 = b'\x03' 290 | BIN_POINTER = b'\x04' 291 | BIN_WIDESTRING = b'\x05' 292 | BIN_COLOR = b'\x06' 293 | BIN_UINT64 = b'\x07' 294 | BIN_END = b'\x08' 295 | BIN_INT64 = b'\x0A' 296 | BIN_END_ALT = b'\x0B' 297 | 298 | def binary_loads(b, mapper=dict, merge_duplicate_keys=True, alt_format=False, raise_on_remaining=True): 299 | """ 300 | Deserialize ``b`` (``bytes`` containing a VDF in "binary form") 301 | to a Python object. 302 | 303 | ``mapper`` specifies the Python object used after deserializetion. ``dict` is 304 | used by default. Alternatively, ``collections.OrderedDict`` can be used if you 305 | wish to preserve key order. Or any object that acts like a ``dict``. 306 | 307 | ``merge_duplicate_keys`` when ``True`` will merge multiple KeyValue lists with the 308 | same key into one instead of overwriting. You can se this to ``False`` if you are 309 | using ``VDFDict`` and need to preserve the duplicates. 310 | """ 311 | if not isinstance(b, bytes): 312 | raise TypeError("Expected s to be bytes, got %s" % type(b)) 313 | 314 | return binary_load(BytesIO(b), mapper, merge_duplicate_keys, alt_format, raise_on_remaining) 315 | 316 | def binary_load(fp, mapper=dict, merge_duplicate_keys=True, alt_format=False, raise_on_remaining=False): 317 | """ 318 | Deserialize ``fp`` (a ``.read()``-supporting file-like object containing 319 | binary VDF) to a Python object. 320 | 321 | ``mapper`` specifies the Python object used after deserializetion. ``dict` is 322 | used by default. Alternatively, ``collections.OrderedDict`` can be used if you 323 | wish to preserve key order. Or any object that acts like a ``dict``. 324 | 325 | ``merge_duplicate_keys`` when ``True`` will merge multiple KeyValue lists with the 326 | same key into one instead of overwriting. You can se this to ``False`` if you are 327 | using ``VDFDict`` and need to preserve the duplicates. 328 | """ 329 | if not hasattr(fp, 'read') or not hasattr(fp, 'tell') or not hasattr(fp, 'seek'): 330 | raise TypeError("Expected fp to be a file-like object with tell()/seek() and read() returning bytes") 331 | if not issubclass(mapper, Mapping): 332 | raise TypeError("Expected mapper to be subclass of dict, got %s" % type(mapper)) 333 | 334 | # helpers 335 | int32 = struct.Struct(' 1: 381 | stack.pop() 382 | continue 383 | break 384 | 385 | key = read_string(fp) 386 | 387 | if t == BIN_NONE: 388 | if merge_duplicate_keys and key in stack[-1]: 389 | _m = stack[-1][key] 390 | else: 391 | _m = mapper() 392 | stack[-1][key] = _m 393 | stack.append(_m) 394 | elif t == BIN_STRING: 395 | stack[-1][key] = read_string(fp) 396 | elif t == BIN_WIDESTRING: 397 | stack[-1][key] = read_string(fp, wide=True) 398 | elif t in (BIN_INT32, BIN_POINTER, BIN_COLOR): 399 | val = int32.unpack(fp.read(int32.size))[0] 400 | 401 | if t == BIN_POINTER: 402 | val = POINTER(val) 403 | elif t == BIN_COLOR: 404 | val = COLOR(val) 405 | 406 | stack[-1][key] = val 407 | elif t == BIN_UINT64: 408 | stack[-1][key] = UINT_64(uint64.unpack(fp.read(int64.size))[0]) 409 | elif t == BIN_INT64: 410 | stack[-1][key] = INT_64(int64.unpack(fp.read(int64.size))[0]) 411 | elif t == BIN_FLOAT32: 412 | stack[-1][key] = float32.unpack(fp.read(float32.size))[0] 413 | else: 414 | raise SyntaxError("Unknown data type at offset %d: %s" % (fp.tell() - 1, repr(t))) 415 | 416 | if len(stack) != 1: 417 | raise SyntaxError("Reached EOF, but Binary VDF is incomplete") 418 | if raise_on_remaining and fp.read(1) != b'': 419 | fp.seek(-1, 1) 420 | raise SyntaxError("Binary VDF ended at offset %d, but there is more data remaining" % (fp.tell() - 1)) 421 | 422 | return stack.pop() 423 | 424 | def binary_dumps(obj, alt_format=False): 425 | """ 426 | Serialize ``obj`` to a binary VDF formatted ``bytes``. 427 | """ 428 | buf = BytesIO() 429 | binary_dump(obj, buf, alt_format) 430 | return buf.getvalue() 431 | 432 | def binary_dump(obj, fp, alt_format=False): 433 | """ 434 | Serialize ``obj`` to a binary VDF formatted ``bytes`` and write it to ``fp`` filelike object 435 | """ 436 | if not isinstance(obj, Mapping): 437 | raise TypeError("Expected obj to be type of Mapping") 438 | if not hasattr(fp, 'write'): 439 | raise TypeError("Expected fp to have write() method") 440 | 441 | for chunk in _binary_dump_gen(obj, alt_format=alt_format): 442 | fp.write(chunk) 443 | 444 | def _binary_dump_gen(obj, level=0, alt_format=False): 445 | if level == 0 and len(obj) == 0: 446 | return 447 | 448 | int32 = struct.Struct('= 3: 5 | _iter_values = 'values' 6 | _range = range 7 | _string_type = str 8 | import collections.abc as _c 9 | class _kView(_c.KeysView): 10 | def __iter__(self): 11 | return self._mapping.iterkeys() 12 | class _vView(_c.ValuesView): 13 | def __iter__(self): 14 | return self._mapping.itervalues() 15 | class _iView(_c.ItemsView): 16 | def __iter__(self): 17 | return self._mapping.iteritems() 18 | else: 19 | _iter_values = 'itervalues' 20 | _range = xrange 21 | _string_type = basestring 22 | _kView = lambda x: list(x.iterkeys()) 23 | _vView = lambda x: list(x.itervalues()) 24 | _iView = lambda x: list(x.iteritems()) 25 | 26 | 27 | class VDFDict(dict): 28 | def __init__(self, data=None): 29 | """ 30 | This is a dictionary that supports duplicate keys and preserves insert order 31 | 32 | ``data`` can be a ``dict``, or a sequence of key-value tuples. (e.g. ``[('key', 'value'),..]``) 33 | The only supported type for key is str. 34 | 35 | Get/set duplicates is done by tuples ``(index, key)``, where index is the duplicate index 36 | for the specified key. (e.g. ``(0, 'key')``, ``(1, 'key')``...) 37 | 38 | When the ``key`` is ``str``, instead of tuple, set will create a duplicate and get will look up ``(0, key)`` 39 | """ 40 | self.__omap = [] 41 | self.__kcount = Counter() 42 | 43 | if data is not None: 44 | if not isinstance(data, (list, dict)): 45 | raise ValueError("Expected data to be list of pairs or dict, got %s" % type(data)) 46 | self.update(data) 47 | 48 | def __repr__(self): 49 | out = "%s(" % self.__class__.__name__ 50 | out += "%s)" % repr(list(self.iteritems())) 51 | return out 52 | 53 | def __len__(self): 54 | return len(self.__omap) 55 | 56 | def _verify_key_tuple(self, key): 57 | if len(key) != 2: 58 | raise ValueError("Expected key tuple length to be 2, got %d" % len(key)) 59 | if not isinstance(key[0], int): 60 | raise TypeError("Key index should be an int") 61 | if not isinstance(key[1], _string_type): 62 | raise TypeError("Key value should be a str") 63 | 64 | def _normalize_key(self, key): 65 | if isinstance(key, _string_type): 66 | key = (0, key) 67 | elif isinstance(key, tuple): 68 | self._verify_key_tuple(key) 69 | else: 70 | raise TypeError("Expected key to be a str or tuple, got %s" % type(key)) 71 | return key 72 | 73 | def __setitem__(self, key, value): 74 | if isinstance(key, _string_type): 75 | key = (self.__kcount[key], key) 76 | self.__omap.append(key) 77 | elif isinstance(key, tuple): 78 | self._verify_key_tuple(key) 79 | if key not in self: 80 | raise KeyError("%s doesn't exist" % repr(key)) 81 | else: 82 | raise TypeError("Expected either a str or tuple for key") 83 | super(VDFDict, self).__setitem__(key, value) 84 | self.__kcount[key[1]] += 1 85 | 86 | def __getitem__(self, key): 87 | return super(VDFDict, self).__getitem__(self._normalize_key(key)) 88 | 89 | def __delitem__(self, key): 90 | key = self._normalize_key(key) 91 | result = super(VDFDict, self).__delitem__(key) 92 | 93 | start_idx = self.__omap.index(key) 94 | del self.__omap[start_idx] 95 | 96 | dup_idx, skey = key 97 | self.__kcount[skey] -= 1 98 | tail_count = self.__kcount[skey] - dup_idx 99 | 100 | if tail_count > 0: 101 | for idx in _range(start_idx, len(self.__omap)): 102 | if self.__omap[idx][1] == skey: 103 | oldkey = self.__omap[idx] 104 | newkey = (dup_idx, skey) 105 | super(VDFDict, self).__setitem__(newkey, self[oldkey]) 106 | super(VDFDict, self).__delitem__(oldkey) 107 | self.__omap[idx] = newkey 108 | 109 | dup_idx += 1 110 | tail_count -= 1 111 | if tail_count == 0: 112 | break 113 | 114 | if self.__kcount[skey] == 0: 115 | del self.__kcount[skey] 116 | 117 | return result 118 | 119 | def __iter__(self): 120 | return iter(self.iterkeys()) 121 | 122 | def __contains__(self, key): 123 | return super(VDFDict, self).__contains__(self._normalize_key(key)) 124 | 125 | def __eq__(self, other): 126 | if isinstance(other, VDFDict): 127 | return list(self.items()) == list(other.items()) 128 | else: 129 | return False 130 | 131 | def __ne__(self, other): 132 | return not self.__eq__(other) 133 | 134 | def clear(self): 135 | super(VDFDict, self).clear() 136 | self.__kcount.clear() 137 | self.__omap = list() 138 | 139 | def get(self, key, *args): 140 | return super(VDFDict, self).get(self._normalize_key(key), *args) 141 | 142 | def setdefault(self, key, default=None): 143 | if key not in self: 144 | self.__setitem__(key, default) 145 | return self.__getitem__(key) 146 | 147 | def pop(self, key): 148 | key = self._normalize_key(key) 149 | value = self.__getitem__(key) 150 | self.__delitem__(key) 151 | return value 152 | 153 | def popitem(self): 154 | if not self.__omap: 155 | raise KeyError("VDFDict is empty") 156 | key = self.__omap[-1] 157 | return key[1], self.pop(key) 158 | 159 | def update(self, data=None, **kwargs): 160 | if isinstance(data, dict): 161 | data = data.items() 162 | elif not isinstance(data, list): 163 | raise TypeError("Expected data to be a list or dict, got %s" % type(data)) 164 | 165 | for key, value in data: 166 | self.__setitem__(key, value) 167 | 168 | def iterkeys(self): 169 | return (key[1] for key in self.__omap) 170 | 171 | def keys(self): 172 | return _kView(self) 173 | 174 | def itervalues(self): 175 | return (self[key] for key in self.__omap) 176 | 177 | def values(self): 178 | return _vView(self) 179 | 180 | def iteritems(self): 181 | return ((key[1], self[key]) for key in self.__omap) 182 | 183 | def items(self): 184 | return _iView(self) 185 | 186 | def get_all_for(self, key): 187 | """ Returns all values of the given key """ 188 | if not isinstance(key, _string_type): 189 | raise TypeError("Key needs to be a string.") 190 | return [self[(idx, key)] for idx in _range(self.__kcount[key])] 191 | 192 | def remove_all_for(self, key): 193 | """ Removes all items with the given key """ 194 | if not isinstance(key, _string_type): 195 | raise TypeError("Key need to be a string.") 196 | 197 | for idx in _range(self.__kcount[key]): 198 | super(VDFDict, self).__delitem__((idx, key)) 199 | 200 | self.__omap = list(filter(lambda x: x[1] != key, self.__omap)) 201 | 202 | del self.__kcount[key] 203 | 204 | def has_duplicates(self): 205 | """ 206 | Returns ``True`` if the dict contains keys with duplicates. 207 | Recurses through any all keys with value that is ``VDFDict``. 208 | """ 209 | for n in getattr(self.__kcount, _iter_values)(): 210 | if n != 1: 211 | return True 212 | 213 | def dict_recurse(obj): 214 | for v in getattr(obj, _iter_values)(): 215 | if isinstance(v, VDFDict) and v.has_duplicates(): 216 | return True 217 | elif isinstance(v, dict): 218 | return dict_recurse(v) 219 | return False 220 | 221 | return dict_recurse(self) 222 | -------------------------------------------------------------------------------- /vdf2json/Makefile: -------------------------------------------------------------------------------- 1 | clean: 2 | rm -rf dist vdf2json.egg-info vdf2json/*.pyc 3 | 4 | dist: clean 5 | python setup.py sdist 6 | 7 | upload: dist 8 | python setup.py register -r pypi 9 | twine upload -r pypi dist/* 10 | -------------------------------------------------------------------------------- /vdf2json/README.rst: -------------------------------------------------------------------------------- 1 | .. code:: text 2 | 3 | usage: vdf2json [-h] [-p] [-ei encoding] [-eo encoding] [infile] [outfile] 4 | 5 | positional arguments: 6 | infile VDF 7 | outfile JSON (utf8) 8 | 9 | optional arguments: 10 | -h, --help show this help message and exit 11 | -p, --pretty pretty json output 12 | -ei encoding input encoding E.g.: utf8, utf-16le, etc 13 | -eo encoding output encoding E.g.: utf8, utf-16le, etc 14 | -------------------------------------------------------------------------------- /vdf2json/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | from codecs import open 5 | from os import path 6 | 7 | here = path.abspath(path.dirname(__file__)) 8 | with open(path.join(here, 'README.rst'), encoding='utf-8') as f: 9 | long_description = f.read() 10 | 11 | setup( 12 | name='vdf2json', 13 | version='1.1', 14 | description='command line tool for converting VDF to JSON', 15 | long_description=long_description, 16 | url='https://github.com/rossengeorgiev/vdf-python', 17 | author='Rossen Georgiev', 18 | author_email='hello@rgp.io', 19 | license='MIT', 20 | classifiers=[ 21 | 'Development Status :: 5 - Production/Stable', 22 | 'License :: OSI Approved :: MIT License', 23 | 'Topic :: Text Processing ', 24 | 'Natural Language :: English', 25 | 'Operating System :: OS Independent', 26 | 'Environment :: Console', 27 | 'Programming Language :: Python :: 2.6', 28 | 'Programming Language :: Python :: 2.7', 29 | 'Programming Language :: Python :: 3', 30 | 'Programming Language :: Python :: 3.0', 31 | 'Programming Language :: Python :: 3.2', 32 | 'Programming Language :: Python :: 3.3', 33 | 'Programming Language :: Python :: 3.4', 34 | ], 35 | keywords='valve keyvalue vdf tf2 dota2 csgo cli commandline json', 36 | install_requires=['vdf>=1.4'], 37 | packages=['vdf2json'], 38 | entry_points={ 39 | 'console_scripts': [ 40 | 'vdf2json = vdf2json.cli:main' 41 | ] 42 | }, 43 | zip_safe=True, 44 | ) 45 | -------------------------------------------------------------------------------- /vdf2json/vdf2json/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ValvePython/vdf/d76292623e326fb165fe3bdb684832cdf30959d4/vdf2json/vdf2json/__init__.py -------------------------------------------------------------------------------- /vdf2json/vdf2json/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import sys 5 | import json 6 | import vdf 7 | import codecs 8 | from collections import OrderedDict 9 | 10 | 11 | def main(): 12 | p = argparse.ArgumentParser(prog='vdf2json') 13 | 14 | p.add_argument('infile', nargs='?', type=argparse.FileType('r'), 15 | default=sys.stdin, help="VDF") 16 | p.add_argument('outfile', nargs='?', type=argparse.FileType('w'), 17 | default=sys.stdout, help="JSON (utf8)") 18 | p.add_argument('-p', '--pretty', help='pretty json output', action='store_true') 19 | p.add_argument('-ei', default='utf-8', type=str, metavar='encoding', 20 | help='input encoding E.g.: utf8, utf-16le, etc') 21 | p.add_argument('-eo', default='utf-8', type=str, metavar='encoding', 22 | help='output encoding E.g.: utf8, utf-16le, etc') 23 | 24 | args = p.parse_args() 25 | 26 | # unicode pain 27 | if args.infile is not sys.stdin: 28 | args.infile.close() 29 | args.infile = codecs.open(args.infile.name, 'r', encoding=args.ei) 30 | else: 31 | args.infile = codecs.getreader(args.ei)(sys.stdin) 32 | 33 | if args.outfile is not sys.stdout: 34 | args.outfile.close() 35 | args.outfile = codecs.open(args.outfile.name, 'w', encoding=args.eo) 36 | else: 37 | args.outfile = codecs.getwriter(args.eo)(sys.stdout) 38 | 39 | data = vdf.loads(args.infile.read(), mapper=OrderedDict) 40 | 41 | json.dump(data, args.outfile, indent=4 if args.pretty else 0, ensure_ascii=False) 42 | 43 | 44 | if __name__ == '__main__': 45 | main() 46 | --------------------------------------------------------------------------------