├── .github ├── FEATURE_REQUEST.md ├── ISSUE_TEMPLATE.md ├── release-drafter.yml └── workflows │ ├── coverage.yml │ ├── docs.yml │ ├── main.yml │ ├── pip-publish.yml │ └── release-drafter.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── Makefile ├── README.md ├── docs ├── cryptography.md ├── index.md ├── installation.md ├── intended_usage.md ├── introduction.md ├── requirements.txt └── under_the_hood.md ├── lattice_algebra ├── __init__.py └── main.py ├── mkdocs.yml ├── requirements-dev.txt ├── setup.py ├── tests └── test_lattices.py └── tox.ini /.github/FEATURE_REQUEST.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request Template 3 | about: "For feature requests. Please search for existing issues first. Also see CONTRIBUTING." 4 | 5 | --- 6 | 7 | **Please Describe The Problem To Be Solved** 8 | (Replace This Text: Please present a concise description of the problem to be addressed by this feature request. Please be clear what parts of the problem are considered to be in-scope and out-of-scope.) 9 | 10 | **(Optional): Suggest A Solution** 11 | (Replace This Text: A concise description of your preferred solution. Things to address include: 12 | * Details of the technical implementation 13 | * Tradeoffs made in design decisions 14 | * Caveats and considerations for the future 15 | 16 | If there are multiple solutions, please present each one separately. Save comparisons for the very end.) -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | * rsis version: [latest] 2 | * Python version: [3.x] 3 | * Operating System: [linux] 4 | 5 | ### Description: 6 | 7 | // REPLACE ME: What are you trying to get done, what has happened, what went wrong, and what did you expect? 8 | 9 | ### What I've run: 10 | 11 | ``` 12 | // REPLACE ME: Paste a log of command(s) you ran and rsis's output, tracebacks, etc, here 13 | ``` 14 | 15 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | categories: 2 | - title: 'Breaking Changes' 3 | labels: 4 | - 'breaking-change' 5 | - title: 'Non-Breaking Changes' 6 | labels: 7 | - 'major' 8 | - title: 'Minor Changes' 9 | labels: 10 | - 'feature' 11 | - 'enhancement' 12 | - title: 'CI/CD and QA changes' 13 | labels: 14 | - 'CI/CD' 15 | - 'tests' 16 | - 'code style' 17 | - title: 'Documentation updates' 18 | labels: 19 | - 'documentation' 20 | - title: 'Bugfixes' 21 | labels: 22 | - 'bug' 23 | - title: 'Deprecations' 24 | labels: 25 | - 'deprecated' 26 | exclude-labels: 27 | - 'skip-changelog' 28 | template: | 29 | ## Changes 30 | 31 | $CHANGES 32 | 33 | ## This release is made by wonderfull contributors: 34 | 35 | $CONTRIBUTORS 36 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | #name: coverage-ci 2 | # 3 | #on: 4 | # push: 5 | # branches: 6 | # - main 7 | # - staging 8 | # tags: 9 | # - "*" 10 | # pull_request: 11 | # branches: 12 | # - "*" 13 | # 14 | #jobs: 15 | # build: 16 | # runs-on: ubuntu-latest 17 | # steps: 18 | # - uses: actions/checkout@v2 19 | # 20 | # - name: Set up Python 3.8 21 | # uses: actions/setup-python@v1 22 | # with: 23 | # python-version: "3.8" 24 | # 25 | # - name: Install dependencies 26 | # run: | 27 | # python -m pip install --upgrade pip 28 | # pip install tox virtualenv 29 | # 30 | # - name: Test build with coverage 31 | # run: "tox -e cov-report" 32 | # 33 | # - name: Send coverage report to codecov 34 | # uses: codecov/codecov-action@v1 35 | # with: 36 | # file: ./coverage.xml 37 | # token: ${{ secrets.CODECOV_TOKEN }} 38 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: documentation-ci 2 | on: 3 | pull_request: 4 | branches: 5 | - "*" 6 | push: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | docs: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - uses: actions/setup-python@v1 17 | with: 18 | python-version: "3.8" 19 | 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip 23 | pip install -e . 24 | pip install -r requirements-dev.txt 25 | pip install -r docs/requirements.txt 26 | 27 | - name: Build docs 28 | run: | 29 | mkdocs build 30 | 31 | - uses: actions/upload-artifact@v1 32 | with: 33 | name: DocumentationHTML 34 | path: site/ 35 | 36 | - name: Push changes gh-pages 37 | run: mkdocs gh-deploy --force 38 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: ci-tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - staging 8 | tags: 9 | - "*" 10 | pull_request: 11 | branches: 12 | - "*" 13 | 14 | jobs: 15 | build: 16 | runs-on: ${{ matrix.os }} 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | name: 21 | - "linting" 22 | - "ubuntu-py36" 23 | - "ubuntu-py37" 24 | - "ubuntu-py38" 25 | - "ubuntu-py39" 26 | - "ubuntu-py310" 27 | - "ubuntu-pypy3" 28 | - "macos-py37" 29 | - "macos-py38" 30 | - "macos-py39" 31 | 32 | include: 33 | - name: "linting" 34 | python: "3.8" 35 | os: ubuntu-latest 36 | tox_env: "lint" 37 | - name: "ubuntu-py36" 38 | python: "3.6" 39 | os: ubuntu-latest 40 | tox_env: "py36" 41 | - name: "ubuntu-py37" 42 | python: "3.7" 43 | os: ubuntu-latest 44 | tox_env: "py37" 45 | - name: "ubuntu-py38" 46 | python: "3.8" 47 | os: ubuntu-latest 48 | tox_env: "py38" 49 | - name: "ubuntu-py39" 50 | python: "3.9" 51 | os: ubuntu-latest 52 | tox_env: "py39" 53 | - name: "ubuntu-py310" 54 | python: "3.10" 55 | os: ubuntu-latest 56 | tox_env: "py310" 57 | - name: "ubuntu-pypy3" 58 | python: "pypy3" 59 | os: ubuntu-latest 60 | tox_env: "pypy3" 61 | - name: "macos-py37" 62 | python: "3.7" 63 | os: macos-latest 64 | tox_env: "py37" 65 | - name: "macos-py38" 66 | python: "3.8" 67 | os: macos-latest 68 | tox_env: "py38" 69 | - name: "macos-py39" 70 | python: "3.8" 71 | os: macos-latest 72 | tox_env: "py38" 73 | 74 | steps: 75 | - uses: actions/checkout@v2 76 | - name: Set up Python ${{ matrix.python }} 77 | uses: actions/setup-python@v1 78 | with: 79 | python-version: ${{ matrix.python }} 80 | - name: Install dependencies 81 | run: | 82 | python -m pip install --upgrade pip 83 | pip install tox virtualenv 84 | - name: Test build 85 | run: "tox -e ${{ matrix.tox_env }}" 86 | -------------------------------------------------------------------------------- /.github/workflows/pip-publish.yml: -------------------------------------------------------------------------------- 1 | name: upload-pypi-ci 2 | 3 | on: 4 | release: 5 | types: [ published ] 6 | 7 | jobs: 8 | upload: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v2 12 | 13 | - name: Set up Python 14 | uses: actions/setup-python@v1 15 | with: 16 | python-version: "3.10" 17 | 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | python -m pip install setuptools wheel twine 22 | 23 | - name: Package project 24 | run: python setup.py sdist bdist_wheel 25 | 26 | - name: Upload distributions 27 | env: 28 | TWINE_USERNAME: __token__ 29 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 30 | run: twine upload dist/* 31 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: release-drafter-ci 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | update_release_draft: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: release-drafter/release-drafter@v5 13 | env: 14 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # IDEs 7 | .idea 8 | .vscode 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | pip-wheel-metadata/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 99 | __pypackages__/ 100 | 101 | # Celery stuff 102 | celerybeat-schedule 103 | celerybeat.pid 104 | 105 | # SageMath parsed files 106 | *.sage.py 107 | 108 | # Environments 109 | .env 110 | .venv 111 | env/ 112 | venv/ 113 | ENV/ 114 | env.bak/ 115 | venv.bak/ 116 | 117 | # Spyder project settings 118 | .spyderproject 119 | .spyproject 120 | 121 | # Rope project settings 122 | .ropeproject 123 | 124 | # mkdocs documentation 125 | /site 126 | 127 | # mypy 128 | .mypy_cache/ 129 | .dmypy.json 130 | dmypy.json 131 | 132 | # Pyre type checker 133 | .pyre/ 134 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/compilerla/conventional-pre-commit 4 | rev: v1.2.0 5 | hooks: 6 | - id: conventional-pre-commit 7 | stages: [commit-msg] 8 | args: [] # optional: list of Conventional Commits types to allow 9 | 10 | - repo: https://github.com/python/black.git 11 | rev: 21.4b2 12 | hooks: 13 | - id: black 14 | language_version: python3 15 | 16 | - repo: https://github.com/pre-commit/pre-commit-hooks 17 | rev: v3.4.0 18 | hooks: 19 | - id: trailing-whitespace 20 | args: [ --markdown-linebreak-ext=md ] 21 | - id: mixed-line-ending 22 | - id: check-byte-order-marker 23 | - id: check-executables-have-shebangs 24 | - id: check-merge-conflict 25 | - id: check-symlinks 26 | - repo: https://gitlab.com/pycqa/flake8 27 | rev: 3.9.1 28 | hooks: 29 | - id: flake8 30 | args: 31 | - '--ignore=D106,D102,D107,E501,W503,D400,D205,E203,BLK999' 32 | - '--exclude=.git,__pycache__,docs,build,dist' 33 | additional_dependencies: 34 | - flake8-absolute-import 35 | - flake8-black 36 | - flake8-docstrings -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Geometry Labs, Inc. Funded by The QRL Foundation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PYPI_SERVER = pypitest 2 | 3 | .DEFAULT_GOAL := help 4 | 5 | .PHONY: clean-tox 6 | clean-tox: ## Remove tox testing artifacts 7 | @echo "+ $@" 8 | @rm -rf .tox/ 9 | 10 | .PHONY: clean-build 11 | clean-build: ## Remove build artifacts 12 | @echo "+ $@" 13 | @rm -fr build/ 14 | @rm -fr dist/ 15 | @rm -fr *.egg-info 16 | 17 | .PHONY: clean-pyc 18 | clean-pyc: ## Remove Python file artifacts 19 | @echo "+ $@" 20 | @find . -type d -name '__pycache__' -exec rm -rf {} + 21 | @find . -type f -name '*.py[co]' -exec rm -f {} + 22 | 23 | .PHONY: clean 24 | clean: clean-tox clean-build clean-pyc ## Remove all file artifacts 25 | 26 | .PHONY: lint 27 | lint: ## Check code style with flake8 28 | @echo "+ $@" 29 | @tox -e lint 30 | 31 | .PHONY: test 32 | test: ## Run tests quickly with the default Python 33 | @echo "+ $@" 34 | @tox -e py 35 | 36 | .PHONY: test-all 37 | test-all: ## Run tests on every Python version with tox 38 | @echo "+ $@" 39 | @tox 40 | 41 | .PHONY: coverage 42 | coverage: ## Check code coverage quickly with the default Python 43 | @echo "+ $@" 44 | @tox -e cov-report 45 | @$(BROWSER) htmlcov/index.html 46 | 47 | .PHONY: release 48 | release: clean ## Package and upload release - Done in CI normally 49 | @echo "+ $@" 50 | @python setup.py sdist bdist_wheel 51 | @twine upload -r $(PYPI_SERVER) dist/* 52 | 53 | .PHONY: help 54 | help: 55 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-16s\033[0m %s\n", $$1, $$2}' 56 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `lattice-algebra` 2 | 3 | [![pypi](https://img.shields.io/pypi/v/lattice-algebra.svg)](https://pypi.python.org/pypi/lattice-algebra) 4 | [![python](https://img.shields.io/pypi/pyversions/lattice-algebra.svg)](https://pypi.python.org/pypi/lattice-algebra) 5 | [![codecov](https://codecov.io/gh/geometry-labs/lattice-algebra/branch/main/graphs/badge.svg?branch=main)](https://codecov.io/github/geometry-labs/lattice-algebra?branch=main) 6 | [![main-tests](https://github.com/geometry-labs/lattice-algebra/actions/workflows/main.yml/badge.svg)](https://github.com/geometry-labs/lattice-algebra/actions) 7 | 8 | This library is a fundamental infrastructure package for building lattice-based cryptography. 9 | 10 | + Installation: `pip install lattice-algebra` 11 | + Documentation: https://geometry-labs.github.io/lattice-algebra/ 12 | 13 | ## Introduction 14 | 15 | The mathematical objects and calculations handled by this package are foundational for lattice algebra, with a variety of applications ranging from signature aggregation to zero-knowledge proofs. The module highly prioritizes developer experience for researchers and engineers, by allowing them to work with a few high level objects (e.g. polynomials, polynomial vectors) that contain built-in methods to abstractly handle the ways that they interact with each other. **The goal is to lower the barrier for creating lattice cryptography primitives and applications by allowing the developers to focus on securely building the higher-level constructs without having to worry about implementing the underlying algebra as well.** 16 | 17 | The module is specifically deesigned for building cryptographic schemes in the Ring/Module/Ideal Short Integer Solution setting with with secrets uniformly distributed with respect to the infinity-norm and one_with_const_time-norm; it can also be used to implement schemes in the Ring/Module/Ideal Learning With Errors setting. **The library’s lead author Brandon Goodell explained how the high level objects are efficiently implemented under the hood, “*to manipulate equations of polynomials, we carry out the computations with vectors and matrices, with highly optimized algebraic operations.*”** 18 | 19 | ## Features for cryptography developers 20 | 21 | The library is designed to make it **easy for developers to write clean code that securely implements lattice-based cryptography** for protocols and applications. The package is optimized to use the Number Theoretic Transform (NTT) to multiply polynomials in time ```O(2dlog(2d))```, and uses **constant-time modular arithmetic to avoid timing attacks**. For convenience, we included tools for both *hashing to* and *sampling from* these "suitably small" polynomials and vectors. Both the hashing and sampling are carried out such that the bias of the resulting distribution is negligibly different from uniform. 22 | 23 | One way that the `lattice_algebra` toolkit helps developers write succinct code is by leveraging python's **magic methods for arithmetic with elements from ```R``` and ```R^l```**. For example, suppose we have two_with_const_time polynomials ```f``` and ```g```. Simple expressions such as ```f + g```, ```f - g```, and ```f * g``` carry out **constant-time polynomial arithmetic** such as addition, subtraction, and multiplication (respectively). Likewise if we have two_with_const_time vectors of polynomials ```x``` and ``` y```, several vector arithmetic methods are at our disposal: we can add them like ```x + y```, or calculate the dot product as ```x * y```. Additionally, ```x ** f``` scales a vector ```x``` by the polynomial ```f```, which is useful for constructing digital signatures. 24 | 25 | ## Contributors 26 | 27 | Brandon Goodell (lead author), Mitchell "Isthmus" Krawiec-Thayer, Rob Cannon. 28 | 29 | Built by [Geometry Labs](https://www.geometrylabs.io) with funding from [The QRL Foundation](https://qrl.foundation/). 30 | 31 | ## Running Tests 32 | 33 | Use ```pytest test_lattices.py```. Basic tests cover almost every function, are generally short, and are all correct tests. However, test robustness can be greatly improved. For example, we implement the Number Theoretic transform function ```ntt``` that calls (or uses data from) ```bit_rev``` and ```make_zeta_and_invs```, among other functions, so we test all three of these with ```test_ntt```, ```test_bit_rev```, and ```test_make_zeta_and_invs```... but in all three of these tests, we only test a single example with small parameters. 34 | 35 | Our tests do not have full coverage; we have not mocked any hash functions to test ```hash2bddpoly``` and ```hash2bddpolyvec```. Interestingly, one_with_const_time can look at our tests as a Rosetta stone for our encoding and decoding of polynomials from binary strings, which is used in our hash functions. A keen-eyed reader can compare ```decode2coef``` in main with ```test_decode2coef``` in ```test_lattices.py```, for example, to see where the test comes from and how the decoding scheme works. See also ```test_decode2indices``` and ```test_decode2polycoefs```. 36 | 37 | ## Building Docs 38 | 39 | Docs are built with mkdocs. Run the following and navigate to [http://127.0.0.1:8000/rsis/](http://127.0.0.1:8000/rsis/) which should update automatically as you write the docs. 40 | 41 | ```shell 42 | pip install -r docs/requirements.txt 43 | mkdocs serve 44 | ``` 45 | 46 | ## License 47 | 48 | This library is released as free and open-source software under the MIT License, see LICENSE file for details. 49 | 50 | 51 | -------------------------------------------------------------------------------- /docs/cryptography.md: -------------------------------------------------------------------------------- 1 | 2 | # Cryptography 3 | 4 | For certain choices of ```d```, ```q```, and ```l```, it is thought to be hard to find any vector (or matrix) ```x``` that is small enough (in terms of one or more norms on the ring ```R```) such that some matrix equation ```A * x = 0``` is satisfied, where ```A``` is a suitably random challenge from ```V```. From this hardness assumption, the map carrying suitably small vectors (or matrices) ```x``` to their images ```A * x``` is a one-way function. If no additional information is leaked about a small secret vector (such as how long it takes to perform arithmetic operations), then this can be used to build secure cryptographic schemes. 5 | 6 | Simulation-based security proofs in the lattice setting are based on extracting a suitably small vector or matrix (called a *witness*) that satisfies some system of linear equations. Overall security of the scheme is based on how small the adversary can make this witness in terms of the norm. The infinity-norm and the one-norm are of particular interest: the infinity-norm of a polynomial is the absolute maximum coefficient, and the one-norm is the absolute sum of coefficients. We can extend this definition to vectors by taking the maximum norm of the entries of the vector. We note that if we count only the weight of a polynomial, in terms of the number of non-zero coefficients, then we have that ```one_norm <= infinity_norm * weight```. Consequently, bounding the infinity norm and the weight of a polynomial also has the effect of bounding the infinity norm and the one-norm. Taking into account both the infinity norm and the weight of the polynomial (number of non-zero entries) enables tighter inequalities that lead to smaller witnesses. This means we can achieve the **same security level with smaller parameters** (the CRYSTALS-Dilithium scheme is an exemplary implementation of this technique). 7 | 8 | Nothing in `lattice-algebra` limits which hardness assumptions are underlying the cryptographic scheme being constructed. Since the library merely handles polynomials from ```R``` and vectors from ```V=R^l```, **schemes based on other hardness assumptions (such as the Ring Learning With Errors assumption) that take place over the same ring can be securely implemented as well**. 9 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # `lattice-algebra` 2 | 3 | Infrastructure package for lattice-based crypto. 4 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | 2 | # Installation and Building Docs 3 | 4 | To install this library, run: 5 | 6 | ```shell 7 | pip install lattice-algebra 8 | ``` 9 | 10 | Docs are built with mkdocs; first use `pip install -r docs/requirements.txt`, then use `mkdocs serve`, then navigate to [http://127.0.0.1:8000/lattice-algebra/](http://127.0.0.1:8000/lattice-algebra/). 11 | -------------------------------------------------------------------------------- /docs/intended_usage.md: -------------------------------------------------------------------------------- 1 | 2 | # Intended Usage (and Some Background) 3 | 4 | ## Arithmetic 5 | 6 | For some TLDR code snippets, proceed to the Example below. 7 | 8 | In many lattice-based cryptographic schemes, primitives are constructed from polynomials in the ring ```R = Zq[X]/(X^d + 1)``` where we denote the integers modulo a prime ```q``` with ```Zq```, with a degree```d``` that is a power of two such that ```(q-1) % (2*d) = 0```. Keys are often vectors from the vector space ```V=R^l``` or matrices with entries from ```V = R^(k * l)``` for dimensions ```k```, ```l```. For example, the CRYSTALS-Dilithium scheme sets ```q = 2^23 - 2^13 + 1```, ```d = 256```, and uses ```4x4```, ```6x5```, and ```8x7``` matrices, depending on security level. 9 | 10 | We encode the vector space ```V``` using the ```LatticeParameters``` object which holds the ```degree```, ```modulus```, and ```length``` attributes. 11 | 12 | We create polynomials in ```R``` by using the ```Polynomial``` object, which has a ```LatticeParameters``` attribute called ```lp```. When we instantiate a Polynomial, we pass in the lattice parameters and a *coefficient representation* of that polynomial as a dictionary. The keys determine the power on the monomial and the value determines the coefficient. 13 | 14 | For example, if ```degree = 8``` and ```modulus = 257```, then the polynomial ```f(X) = 1 + 256 * X + 12 * X**2``` can be created with either ```{0: 1, 1: 256, 2: 12}``` or ```{0: 1, 1: -1, 2: 12}``` as the input coefficient representation, since ```(256 - (-1)) % 257 == 0```. 15 | 16 | We create polynomial vectors in ```V``` by using the ```PolynomialVector``` object, which has a ```LatticeParameters``` attribute and another attribute called ```entries``` which is just a list of ```Polynomial``` objects. 17 | 18 | For example, if ```f``` and ```g``` are ```Polynomial``` and have the same lattice parameters, which has ```l = 2```, then we can instantiate the vector ```v = [f, g]``` by passing in ```entries=[f, g]```. 19 | 20 | From a ```Polynomial```, we can access the NTT representation at any time for almost no cost with the ```ntt_representation``` attribute. However, regaining the coefficient representation requires computing the inverse NTT, which is costly. Checking norms and weights of a ```Polynomial``` or a ```PolynomialVector``` requires the coefficient representation. We re-emphasize that this representation is costly to compute. Thus, it should only be computed once (and norms and weights should only be checked at the end of algorithms). We regain the coefficient representation of a ```Polynomial``` or a ```PolynomialVector``` object by calling the ```get_coef_rep``` function. 21 | 22 | ### Example of Arithmetic 23 | 24 | Consider the vector space ```V``` defined with ```degree = 8```, ```modulus = 257```, and ```length = 3```, and the following 8 polynomials (which are proportional to the first seven Legendre polynomials, for a convenient example). 25 | 26 | 1. ```a(X) = 1``` 27 | 2. ```b(X) = X``` 28 | 3. ```c(X) = -1 + 3 * X**2``` 29 | 4. ```d(X) = -3 * X + 5 * X ** 3``` 30 | 5. ```e(X) = 3 - 30 * X ** 2 + 35 * X ** 4``` 31 | 6. ```f(X) = 15 * X - 70 * X ** 3 + 63 * X ** 5``` 32 | 7. ```g(X) = -5 + 105 * X ** 2 - 315 * X ** 4 + 231 * X ** 6``` 33 | 8. ```h(X) = -35 * X + 315 * X ** 3 - 693 * X ** 5 + 429 * X ** 7``` 34 | 35 | In the following code, we instantiate the vector space ```V = R^3```, we instantiate these polynomials, we compute a few of their sums and products, we create two vectors of polynomials, ```v = [a, b, c]``` and ```u = [d, e, f]```, we compute the dot product ```v * u``` of these two vectors, we compare it to the sums and products we just computed by calling the ```get_coef_rep``` function, we scale ```v``` by ```g(X)``` and we scale ```u``` by ```h(X)```, we compute this linear combination of ```v``` and ```u```, and we print the coefficient representation, norm, and weight. 36 | 37 | ``` 38 | from lattice_algebra import LatticeParameters, Polynomial, PolynomialVector 39 | 40 | lp = LatticeParameters(pars={'degree': 8, 'modulus': 257, 'length': 3}) # make V 41 | 42 | a = Polynomial(lp = lp, coefs = {0: 1}) # Make 8 polynomials proportional to the first 8 Legendre polys 43 | b = Polynomial(lp = lp, coefs = {1: 1}) 44 | c = Polynomial(lp = lp, coefs = {0: -1, 2: 3}) 45 | d = Polynomial(lp = lp, coefs = {1: -3, 3: 5}) 46 | e = Polynomial(lp = lp, coefs = {0: 3, 2: -30, 4: 35}) 47 | f = Polynomial(lp = lp, coefs = {1: 15, 3: -70, 5: 63}) 48 | g = Polynomial(lp = lp, coefs = {0: -5, 2: 105, 4: -315, 6: 231}) 49 | h = Polynomial(lp = lp, coefs = {1: -35, 3: 315, 5: -693, 7: 429}) 50 | 51 | prods = [a * d, b * e, c * f] # We can add, subtract, multiply, and use python built-in sum() 52 | sum_of_these = sum(prods) 53 | coef_rep_of_sum, n_sum, n_sum = sum_of_these.get_coef_rep() 54 | 55 | v = PolynomialVector(lp = lp, entries = [a, b, c]) # Make some polynomial vectors 56 | u = PolynomialVector(lp = lp, entries = [d, e, f]) 57 | 58 | dot_product = v * u # We can compute the dot product, which should match the sum above 59 | 60 | coef_rep_of_dot_prod, n_dot, w_dot = dot_product.get_coef_rep() 61 | assert n_sum == n_dot 62 | assert w_sum == w_dot 63 | assert list(coef_rep_of_dot_prod.keys()) == list(coef_rep_of_sum.keys()) 64 | for next_monomial in coef_rep_of_dot_prod: 65 | assert (coef_rep_of_dot_prod[next_monomial] - coef_rep_of_sum[next_monomial]) % lp.modulus == 0 66 | 67 | scaled_v = v ** g # We can also scale a vector by a polynomial with __pow__ 68 | scaled_u = u ** h 69 | lin_combo = scaled_v + scaled_u # We can add vectors (and subtract!) 70 | also_lin_combo = sum([i ** j for i, j in zip([v, u], [g, h])]) # more pythonically 71 | assert also_lin_combo == lin_combo 72 | 73 | # Lastly, let's print the coefficient representation, norm, and weight of this lin combo 74 | coef_rep, n, w = lin_combo.get_coef_rep() 75 | print(f"Coefficient representation of linear combination = {coef_rep}") 76 | print(f"Norm of linear combination = {n}") 77 | print(f"Weight of linear combination = {w}") 78 | ``` 79 | 80 | ## Randomness and Hashing 81 | 82 | The library also contains functions ```random_polynomial```, ```hash2bddpoly```, ```random_polynomialvector```, and ```hash2bddpolyvec``` for generating random ```Polynomial``` and ```PolynomialVector``` objects, either with system randomness or by hashing a message. The output of these functions are uniformly random (at least up to a negligible difference) among the ```Polynomial``` and ```PolynomialVector``` objects with a specified infinity norm bound and Hamming weight. Randomness is generated using the ```secrets``` module. 83 | 84 | ### Example of Randomness and Hashing. 85 | 86 | In the following code, we first use the salt ```'SOME_SALT'``` to hash the string ```hello world``` to an instance of the ```Polynomial``` class, say ```x```, and an instance of the ```PolynomialVector``` class, say ```v```. In both cases, the polynomials in the hash output should have at most ```4``` non-zero coefficients since ```wt = 4```, and all of those should be in the list ```[-1, 0, 1]``` since ```bd = 1```. Then, we sample a new random ```Polynomial```, say ```y```, and a new random ```PolynomialVector```, say ```u```, using ```random_polynomial``` and ```random_polynomialvector```, respectively. Note that there are around ```2 ** 12``` possible outputs of ```random_polynomial``` and ```hash2bddpoly``` using these parameters, and around ```2 ** 36``` possible outputs of ```random_polynomialvector``` and ```hash2bddpolyvec```. In particular, the chance that we obtain ```x == y``` and ```v == u``` under these conditions is around ```2 ** -48```. While this is not cryptographically small, it is pretty durned small, so the following code should pass assertions. 87 | 88 | ``` 89 | from lattice_algebra import hash2bddpoly, hash2bddpolyvec, random_polynomial, random_polynomialvector 90 | 91 | lp = LatticeParameters(pars={'degree': 8, 'modulus': 257, 'length': 3}) # make V 92 | 93 | x = hash2bddpoly(secpar = lp.secpar, lp = lp, bd = 1, wt = 4, salt = 'SOME_SALT', m='hello world') 94 | coef_rep, n, w = x.get_coef_rep() 95 | assert n <= 1 # should always pass 96 | assert len(coef_rep) <= w <= 4 # should always pass 97 | 98 | v = hash2bddpoly(secpar = lp.secpar, lp = lp, bd = 1, wt = 4, salt = 'SOME_SALT', m='hello world') 99 | coef_rep, n, w = v.get_coef_rep() 100 | assert n <= 1 # should always pass 101 | assert len(coef_rep) <= w <= 4 # should always pass 102 | 103 | y = random_polynomial(secpar = lp.secpar, lp = lp, bd = 1, wt = 4) 104 | coef_rep, n, w = y.get_coef_rep() 105 | assert n <= 1 # should always pass 106 | assert len(coef_rep) <= w <= 4 # should always pass 107 | 108 | u = random_polynomialvector(secpar = lp.secpar, lp = lp, bd = 1, wt = 4) 109 | coef_rep, n, w = u.get_coef_rep() 110 | assert n <= 1 # should always pass 111 | sassert len(coef_rep) <= w <= 4 # should always pass 112 | 113 | assert x != y or v != u # should pass with probability 1 - 2 ** - 48 114 | ``` 115 | 116 | In order for the hash functions to work requires decoding bitstrings of certain lengths to ```Polynomial``` and ```PolynomialVector``` objects in a way that keeps the output uniformly random (or at least with a negligible difference from uniform). These are the functions ```decode2coeef```, ```decode2coefs```, ```decode2indices```, and ```decode2polycoefs```. 117 | 118 | -------------------------------------------------------------------------------- /docs/introduction.md: -------------------------------------------------------------------------------- 1 | 2 | # Introduction 3 | 4 | The math handled by this package is foundational for lattice algebra, with a variety of applications ranging from signature aggregation to zero-knowledge proofs. The module highly prioritizes developer experience for researchers and engineers, by allowing them to work with a few high level objects (e.g. polynomials, polynomial vectors) that contain built-in methods to abstractly handle the ways that they interact with each other. ** The goal is to lower the barrier for creating lattice cryptography primitives and applications by allowing the developers to focus on securely building the higher-level constructs without having to worry about implementing the underlying algebra as well. ** 5 | 6 | The module is specifically designed for building cryptographic schemes in the Ring/Module/Ideal Short Integer Solution setting with secrets uniformly distributed with respect to the infinity-norm and one-norm; it can also be used to implement schemes in the Ring/Module/Ideal Learning With Errors setting. High level objects are efficiently implemented under the hood: to manipulate equations of polynomials, we carry out the computations with vectors and matrices, with optimized algebraic operations. 7 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs>="1.2.1" 2 | mkdocs-material>="8.1.4" 3 | mdx-include>="1.4.1" -------------------------------------------------------------------------------- /docs/under_the_hood.md: -------------------------------------------------------------------------------- 1 | 2 | # Under the Hood 3 | 4 | The library is designed to make it **easy for developers to write clean code that securely implements lattice-based cryptography** for protocols and applications. The package is optimized to use the Number Theoretic Transform (NTT) to multiply polynomials in time ```O(2dlog(2d))```, and uses **constant-time modular arithmetic to avoid timing attacks**. For convenience, we included tools for both *hashing to* and *sampling from* these "suitably small" polynomials and vectors. Both the hashing and sampling are carried out such that the bias of the resulting distribution is negligibly different from uniform. 5 | 6 | One way that the `lattice-algebra` toolkit helps developers write succinct code is by leveraging python's **magic methods for arithmetic with elements from ```R``` and ```R^l```**. For example, suppose we have two polynomials ```f``` and ```g```. Simple expressions such as ```f + g```, ```f - g```, and ```f * g``` carry out **constant-time polynomial arithmetic** such as addition, subtraction, and multiplication (respectively). Likewise if we have two vectors of polynomials ```x``` and ``` y```, several vector arithmetic methods are at our disposal: we can add them like ```x + y```, or calculate the dot product as ```x * y```. Additionally, ```x ** f``` scales a vector ```x``` by the polynomial ```f```, which is useful for constructing digital signatures. 7 | 8 | ## Class Details 9 | 10 | This package handles three fundamental objects: LatticeParameters, Polynomial, and PolynomialVector. The Polynomial and PolynomialVector objects have a LatticeParameters attribute, and the package handles computations with Polynomial and PolynomialVector objects with matching LatticeParameters. 11 | 12 | ### LatticeParameters 13 | 14 | The LatticeParameters class contains attributes describing the ring ```R```, namely the degree ```d```, the module length ```l```, and the modulus ```q```. From these, additional data are pre-computed for use in various algorithms later. We instantiate a LatticeParameters object by specifying the degree, length, and modulus in the following way. 15 | 16 | ```lp = LatticeParameters(degree=2**10, length=2**4, modulus=12289)``` 17 | 18 | We must instantiate LatticeParameters objects by passing in degree, length, and modulus. These must all be positive integers such that the degree is a power of two and ```(modulus - 1) % (2 * degree) == 0``` otherwise a ValueError is raised. 19 | 20 | ### Polynomial 21 | 22 | #### Attributes and Instantiation 23 | 24 | The Polynomial and PolynomialVector objects have a LatticeParameters attribute, ```lp```, and the package handles computations with Polynomial and PolynomialVector objects with matching LatticeParameters. 25 | 26 | Other than the LatticeParameters object attached to each Polynomial, the Polynomial object also has an ```ntt_representation``` attribute, which is a list of integers. To instantiate a Polynomial, we pass in the coefficient representation of the polynomial as a dictionary of key-value pairs, where the keys are integers in the set ```[0, 1, ..., degree - 1]``` and the value associated with a key is the coefficient of the associated monomial, which is assumed to be a representative of an equivalence class of integers modulo ```modulus```. The coefficients are centralized to be in the list ```[-(modulus//2), -(modulus//2)+1, ..., modulus//2 - 1, modulus//2]``` with constant-time modular arithmetic. 27 | 28 | For example, if ```modulus = 61``` and we want to represent ```3 * X**2 + 9 * X + 17```, we see the coefficient on the monomial ```X**0 = 1``` is ```17```, the coefficient on the monomial ```X``` is ```9```, and the coefficient on the monomial ```X**2``` is ```3```. So we can pack the coefficient representation of this polynomial into a dictionary like ```{0: 17, 1: 9, 2: 3}```. So, to create a Polynomial object representing this polynomial, we use the following. 29 | 30 | ```f = Polynomial(pars=lp, coefs={0: 17, 1: 9, 2: 3})``` 31 | 32 | #### Arithmetic 33 | 34 | Polynomials support ```__add__```, ```__radd__```, ```__sub__```, ```__mul__```, and ```__rmul__```. Thus, for two polynomials, say ```f``` and ```g```, we simply use ```f + g```, ```f - g```, and ```f*g``` for addition, subtraction, and multiplication. Arithmetic for these operations take place coordinate-wise with the ```ntt_representation``` list, so they are very fast. 35 | 36 | #### Polynomial Norm, Weight, and String Representation 37 | 38 | Polynomials have a ```cooefficient_representation_and_norm_and_weight``` method, which inverts the ```ntt_representation``` list to obtain the coefficient representation of the polynomial, and returns this coefficient representation together with the infinity norm and the Hamming weight of the polynomial. 39 | 40 | The package uses ```__repr___``` to cast the output of ```get_coef_rep``` as a string. 41 | 42 | _WARNING_: Computing the ```ntt_representation``` requires computing the NTT of the polynomial, and calling ```get_coef_rep``` requires computing the inverse NTT of the polynomial. These are relatively expensive operations compared to arithmetic. Hence, _creating polynomials_, _printing them to strings_, and _computing the norm and weight_ of polynomials should be done once, after all other computations are complete. 43 | 44 | ### PolynomialVector 45 | 46 | #### PolynomialVector Attributes and Instantiation 47 | 48 | The Polynomial and PolynomialVector objects have a LatticeParameters attribute, ```par```, and the package handles computations with Polynomial and PolynomialVector objects with matching LatticeParameters. 49 | 50 | Other than the LatticeParameters object attached to each PolynomialVector has an ```entries``` attribute, which is just a list of Polynomial objects. To instantiate a PolynomialVector, we pass in a list of Polynomial objects as the entries. 51 | 52 | For example, if ```f``` is the Polynomial from the previous section and ```g(X) = -17 + 12 * X ** 2```, we can create ```g``` and create a PolynomialVector object in the following way. 53 | 54 | ``` 55 | g = Polynomial(pars=lp, coefs={0: -17, 2: 12}) 56 | v = PolynomialVector(pars=lp, entries=[f, g]) 57 | ``` 58 | 59 | Each Polynomial in ```entries``` must have the same LatticeParameters object as ```v``` and we must have ```len(entries) == lp.length```. 60 | 61 | #### PolynomialVector Addition, Subtraction, Scaling, and Dot Products 62 | 63 | PolynomialVector objects support ```__add__```, ```__radd__```, and ```__sub__``` to define addition and subtraction between PolynomialVector objects. This way, for two PolynomialVector objects, say ```v``` and ```w```, we can just use ```v + w``` and ```v - w``` to compute the sum and difference, respectively. 64 | 65 | The package uses ```__mul__``` and ```__rmul__``` to define the **dot product** between two PolynomialVector objects. The dot product outputs a Polynomial object. For example, if ```v.entries == [f, g]``` and ```w.entries == [a, b]```, then ```v * w``` returns ```f * a + g * b```. 66 | 67 | The package repurposes ```__pow__``` to scale a PolynomialVector by a Polynomial. For example, if ```v.entries = [f, g]``` and ```a``` is some Polynomial object, then ```v ** a = [a * f, a * g]```. This is **not** exponentiation, although we use the notation for exponentiation. 68 | 69 | Hence, to compute a linear combination of PolynomialVectors whose coefficients are Polynomials, we compute the sum of "exponents" with something like this: ```sum(f ** a for f, a in zip(some_polynomial_vectors, some_polynomials))```. As before, arithmetic operations are done using the ```ntt_representation``` of the involved polynomials, and are thus quite fast. 70 | 71 | #### PolynomialVector Norm, Weight, and String Representation. 72 | 73 | The string representation of a PolynomialVector, defined in ```__repr__``` is merely ```str(entries)```. 74 | 75 | _WARNING_: Like for ```Polynomial```, instantiation requires computing the NTT of polynomials. So our previous warning about the cost of computing the NTT and the inverse NTT applies here, but with the added curse of dimensionality. 76 | 77 | 78 | -------------------------------------------------------------------------------- /lattice_algebra/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "v0.1.1" 2 | 3 | from lattice_algebra.main import * 4 | -------------------------------------------------------------------------------- /lattice_algebra/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | The lattice_algebra module is an (unaudited) prototype containing basic algebraic infrastructure for employing lattice- 3 | based crypto schemes in the Ring (aka Module or Ideal) Short Integer Solution and Ring (Module, Ideal) Learning With 4 | Errors settings where secret/error vectors are a uniformly distributed from the subset of vectors with bounded infinity 5 | norm and constant weight. 6 | 7 | Todo 8 | 1. Modify decode2coefs, decode2indices, decode2polycoefs to be more pythonic if possible 9 | 2. Add const_time attribute to Polynomials, add support for non-const-time arithmetic 10 | 11 | Documentation 12 | ------------- 13 | 14 | Documentation is hosted at [https://geometry-labs.github.io/lattice-algebra/](https://geometry-labs.github.io/lattice-algebra/) 15 | 16 | Hosting 17 | ------- 18 | 19 | The repository for this project can be found at [https://www.github.com/geometry-labs/lattice-algebra](https://www.github.com/geometry-labs/lattice-algebra). 20 | 21 | License 22 | ------- 23 | 24 | Released under the MIT License, see LICENSE file for details. 25 | 26 | Copyright 27 | --------- 28 | 29 | Copyright (c) 2022 Geometry Labs, Inc. 30 | Funded by The QRL Foundation. 31 | 32 | Contributors 33 | ------------ 34 | Brandon Goodell (lead author), Mitchell Krawiec-Thayer, Rob Cannon. 35 | 36 | """ 37 | from math import ceil, sqrt, log2 38 | from copy import deepcopy 39 | from secrets import randbits 40 | from hashlib import shake_256 as shake 41 | from typing import List, Dict, Tuple, Union 42 | 43 | 44 | def bits_to_indices(secpar: int, degree: int, wt: int) -> int: 45 | return ceil(log2(degree)) + (wt - 1) * (ceil(log2(degree)) + secpar) 46 | 47 | 48 | def bits_to_decode(secpar: int, bd: int) -> int: 49 | return ceil(log2(bd)) + 1 + secpar 50 | 51 | 52 | def is_prime(val: int) -> bool: 53 | """ 54 | Test whether input integer is prime with the Sieve of Eratosthenes. 55 | 56 | :param val: Input value 57 | :type val: int 58 | 59 | :return: Indicate whether q is prime. 60 | :rtype: bool 61 | """ 62 | return all([val % i != 0 for i in range(2, ceil(sqrt(val)) + 1)]) 63 | 64 | 65 | def is_pow_two(val: int) -> bool: 66 | """ 67 | Test if input integer is power of two by summing the bits in its binary expansion and testing for 1. 68 | 69 | :param val: Input value 70 | :type val: int 71 | 72 | :return: Indicate whether x is a power-of-two. 73 | :rtype: bool 74 | """ 75 | return val > 0 and not (val & (val - 1)) 76 | 77 | 78 | def has_prim_rou(modulus: int, degree: int) -> bool: 79 | """ 80 | Test whether Z/qZ has a primitive 2d-th root of unity. 81 | """ 82 | return modulus % (2 * degree) == 1 83 | 84 | 85 | def is_ntt_friendly_prime(modulus: int, degree: int) -> bool: 86 | """ 87 | Test whether input integer pair can be used to construct an NTT-friendly ring. 88 | 89 | :param modulus: Input modulus 90 | :type modulus: int 91 | :param degree: Input degree 92 | :type degree: int 93 | 94 | :return: Indicate whether q is prime and q-1 == 0 mod 2d. 95 | :rtype: bool 96 | """ 97 | return is_prime(modulus) and is_pow_two(degree) and has_prim_rou(modulus=modulus, degree=degree) 98 | 99 | 100 | def is_prim_rou(modulus: int, degree: int, val: int) -> bool: 101 | """ 102 | Test whether input x is a primitive 2d^th root of unity in the integers modulo q. Does not require q and d to be 103 | NTT-friendly pair. 104 | 105 | :param modulus: Input modulus 106 | :type modulus: int 107 | :param degree: Input degree 108 | :type degree: int 109 | :param val: Purported root of unity 110 | :type val: int 111 | 112 | :return: Boolean indicating x**(2d) == 1 and x**i != 1 for 1 <= i < 2d. 113 | :rtype: bool 114 | """ 115 | return all(val ** k % modulus != 1 for k in range(1, 2 * degree)) and val ** (2 * degree) % modulus == 1 116 | 117 | 118 | def get_prim_rou_and_rou_inv(modulus: int, degree: int) -> Union[None, Tuple[int, int]]: 119 | """ 120 | Compute a primitive 2d-th root of unity modulo q and its inverse. Raises a ValueError if (d, q) are not NTT- 121 | friendly pair. Works by finding the first (in natural number order) primitive root of unity and its inverse. 122 | 123 | :param modulus: Input modulus 124 | :type modulus: int 125 | :param degree: Input degree 126 | :type degree: int 127 | 128 | :return: Root of unity and its inverse in a tuple. 129 | :rtype: Tuple[int, int] 130 | """ 131 | if not (is_ntt_friendly_prime(modulus, degree)): 132 | raise ValueError('Input q and d are not ntt-friendly prime and degree.') 133 | # If we do not raise a ValueError, then there exists a primitive root of unity 2 <= x < q. 134 | x: int = 2 135 | while x < modulus: 136 | if is_prim_rou(modulus, degree, x): 137 | break 138 | x += 1 139 | return x, ((x ** (2 * degree - 1)) % modulus) 140 | 141 | 142 | def is_bitstring(val: str) -> bool: 143 | """ 144 | Ensure that an ostensible bitstring does not contain any characters besides 0 and 1 (without type conversion) 145 | 146 | :param val: Input to be validated as a bitstring 147 | :type val: str 148 | 149 | :return: True if the input is a bitstring (i.e. contains only 0's and 1's) and False otherwise. 150 | :rtype: bool 151 | """ 152 | if isinstance(val, str): 153 | return ''.join(sorted(list(set(val)))) in '01' 154 | return False 155 | 156 | 157 | touched_bit_rev: Dict[Tuple[int, int], int] = dict() 158 | 159 | 160 | def bit_rev(num_bits: int, val: int) -> int: 161 | """ 162 | Reverse the bits in the binary expansion of x and interpret that as an integer. 163 | 164 | :param num_bits: Input length in bits 165 | :type num_bits: int 166 | :param val: Input value 167 | :type val: int 168 | 169 | :return: Output the bit-reversed value of x 170 | :rtype: int 171 | """ 172 | if (num_bits, val) not in touched_bit_rev: 173 | for x in range(2 ** num_bits): 174 | if (num_bits, x) not in touched_bit_rev: 175 | x_in_bin: str = bin(x)[2:].zfill(num_bits) 176 | touched_bit_rev[(num_bits, x)] = int(x_in_bin[::-1], 2) 177 | return touched_bit_rev[(num_bits, val)] 178 | 179 | 180 | def bit_rev_cp(val: List[int], n: int) -> List[int]: 181 | """ 182 | Permute the indices of the input list x to bit-reversed order. Note: does not compute the bit-reverse of the values 183 | in the input list, just shuffles the input list around. 184 | 185 | :param val: Input values 186 | :type val: List[int] 187 | :param n: Length of bit string before copying (the code pre-pends with zeros to get to this length). 188 | :type n: int 189 | 190 | :return: Output permuted list 191 | :rtype: List[int] 192 | """ 193 | if not is_pow_two(len(val)): 194 | raise ValueError("Can only bit-reverse-copy arrays with power-of-two lengths.") 195 | return [val[bit_rev(n, i)] for i in range(len(val))] 196 | 197 | 198 | def cent(q: int, halfmod: int, logmod: int, val: int) -> int: 199 | """ 200 | Constant-time remainder. 201 | 202 | :param q: Input modulus 203 | :type q: int 204 | :param val: Input value 205 | :type val: int 206 | :param halfmod: q//2 207 | :type halfmod: int 208 | :param logmod: ceil(log2(q)) 209 | :type logmod: int 210 | 211 | :return: Value in the set [-(q//2), ..., q//2] equivalent to x mod q. 212 | :rtype: int 213 | """ 214 | y: int = val % q 215 | intermediate_value: int = y - halfmod - 1 216 | return y - (1 + (intermediate_value >> logmod)) * q 217 | 218 | 219 | def make_zetas_and_invs(q: int, d: int, n: int, lgn: int) -> Tuple[List[int], List[int]]: 220 | """ 221 | Compute powers of primitive root of unity and its inverse for use in the NTT function. Finds the root of unity, say 222 | zeta, and its inverse, say zeta_inv, then outputs [zeta ** ((2d) // (2**(s+1))) for s in range(log2(2d))] and 223 | [zeta_inv ** ((2d) // (2**(s+1))) for s in range(log2(2d))], modulo q. 224 | 225 | :param q: Input modulus 226 | :type q: int 227 | :param d: Input degree 228 | :type d: int 229 | :param n: 2*d 230 | :type n: int 231 | :param lgn: ceil(log2(n)) 232 | :type lgn: int 233 | 234 | :return: Return the powers of zeta and zeta_inv for use in NTT, two lists of integers, in a tuple 235 | :rtype: Tuple[List[int], List[int]] 236 | """ 237 | powers: List[int] = [n // (2 ** (s + 1)) for s in range(lgn)] 238 | zeta, zeta_inv = get_prim_rou_and_rou_inv(q, d) 239 | left: List[int] = [int(zeta ** i) % q for i in powers] 240 | left = [i if i <= q // 2 else i - q for i in left] 241 | right: List[int] = [int(zeta_inv ** i) % q for i in powers] 242 | right = [i if i <= q // 2 else i - q for i in right] 243 | return left, right 244 | 245 | 246 | def ntt(q: int, zetas: List[int], zetas_inv: List[int], inv_flag: bool, halfmod: int, logmod: int, n: int, lgn: int, 247 | val: List[int], const_time_flag: bool = True) -> List[int]: 248 | """ 249 | Compute the NTT of the input list of integers. Implements the algorithm from Cormen, T. H., Leiserson, C. E., 250 | Rivest, R. L., & Stein, C, (2009), "Introduction to algorithms," but replacing exp(-i*2*pi/n) with zeta. 251 | 252 | :param q: Input modulus 253 | :type q: int 254 | :param zetas: Input powers of the root of unity 255 | :type zetas: List[int] 256 | :param zetas_inv: Input powers of the root of unity 257 | :type zetas_inv: List[int] 258 | :param val: Input values 259 | :type val: List[int] 260 | :param inv_flag: Indicates whether we are performing forward NTT or inverse NTT 261 | :type inv_flag: bool 262 | :param halfmod: q//2 263 | :type halfmod: int 264 | :param logmod: ceil(log2(q)) 265 | :type logmod: int 266 | :param n: 2*d 267 | :type n: int 268 | :param lgn: ceil(log2(n)) 269 | :type lgn: int 270 | :param const_time_flag: Indicates whether arithmetic should be constant-time. 271 | :type const_time_flag: bool 272 | 273 | :return: Return the NTT (or inverse) of the inputs x. 274 | :rtype: List[int] 275 | """ 276 | if sum(int(i) for i in bin(len(val))[2:]) != 1: 277 | raise ValueError("Can only NTT arrays with lengths that are powers of two.") 278 | bit_rev_x: List[int] = bit_rev_cp(val=val, n=ceil(log2(len(val)))) 279 | m: int = 1 280 | for s in range(1, lgn + 1): 281 | m *= 2 282 | if inv_flag: 283 | this_zeta: int = zetas_inv[s - 1] 284 | else: 285 | this_zeta: int = zetas[s - 1] 286 | for k in range(0, n, m): 287 | w: int = 1 288 | for j in range(m // 2): 289 | t: int = w * bit_rev_x[k + j + m // 2] 290 | u: int = bit_rev_x[k + j] 291 | if const_time_flag: 292 | bit_rev_x[k + j]: int = cent(q=q, halfmod=halfmod, logmod=logmod, val=u + t) 293 | bit_rev_x[k + j + m // 2]: int = cent(q=q, halfmod=halfmod, logmod=logmod, val=u - t) 294 | else: 295 | bit_rev_x[k + j]: int = (u + t) % q 296 | if bit_rev_x[k + j] > q // 2: 297 | bit_rev_x[k + j] = bit_rev_x[k + j] - q 298 | bit_rev_x[k + j + m // 2]: int = (u - t) % q 299 | if bit_rev_x[k + j + m // 2] > q // 2: 300 | bit_rev_x[k + j + m // 2] = bit_rev_x[k + j + m // 2] - q 301 | w *= this_zeta 302 | if inv_flag: 303 | n_inv: int = 1 304 | while (n_inv * n) % q != 1: 305 | n_inv += 1 306 | if const_time_flag: 307 | bit_rev_x: List[int] = [cent(q=q, halfmod=halfmod, logmod=logmod, val=(n_inv * i)) for i in bit_rev_x] 308 | else: 309 | bit_rev_x: List[int] = [(n_inv * i) % q for i in bit_rev_x] 310 | bit_rev_x = [i if i <= q // 2 else i - q for i in bit_rev_x] 311 | return bit_rev_x 312 | 313 | 314 | def binary_digest(msg: str, num_bytes: int, salt: str) -> str: 315 | """ 316 | Compute input num_bytes bytes from SHAKE256 using the input salt and message. 317 | 318 | :param msg: Input message 319 | :type msg: str 320 | :param num_bytes: Input number of bits 321 | :type num_bytes: int 322 | :param salt: Input salt 323 | :type salt: str 324 | 325 | :return: Return the digest of num_bytes bits in binary. 326 | :rtype: str 327 | """ 328 | m = shake() 329 | m.update(salt.encode() + msg.encode()) 330 | return bin(int(m.hexdigest(num_bytes), 16))[2:].zfill(8 * num_bytes) 331 | 332 | 333 | class LatticeParameters(object): 334 | """ 335 | Class for handling lattice parameters. 336 | 337 | Attributes 338 | ---------- 339 | degree: int 340 | Degree bound for all polynomials. 341 | length: int 342 | Length of vectors of polynomials. 343 | modulus: int 344 | Modulus for all coefficients 345 | halfmod: int 346 | Modulus // 2 347 | logmod: int 348 | log2(modulus) 349 | n: int 350 | 2 * degree 351 | rou: int 352 | Primitive 2*degree-th root of unity modulo modulus 353 | rou_inv: int 354 | Inverse of rou modulo modulus 355 | zetas: List[int] 356 | Powers of rou for use in computing NTT/inverse 357 | zetas_invs: List[int] 358 | Powers of rou for use in computing NTT/inverse 359 | 360 | Methods 361 | ------- 362 | __init__(self) 363 | Initialize 364 | __eq__(self, other) 365 | Check for equality 366 | __repr__(self) 367 | String representation of attributes 368 | """ 369 | degree: int 370 | length: int 371 | modulus: int 372 | halfmod: int 373 | logmod: int 374 | n: int 375 | lgn: int 376 | rou: int 377 | rou_inv: int 378 | zetas: List[int] 379 | zetas_invs: List[int] 380 | 381 | def __init__(self, degree: int, length: int, modulus: int): 382 | """ 383 | Create a new LatticeParameters object with input pars, compute the rou, rou_inv, zetas, and zetas_inv 384 | 385 | :param degree: Polynomial degree 386 | :type degree: int 387 | :param length: Vector length 388 | :type length: int 389 | :param modulus: Prime modulus 390 | :type length: int 391 | """ 392 | if degree < 2 or not is_pow_two(val=degree): 393 | raise ValueError('LatticeParameters requires power-of-two integer degree.') 394 | elif length < 1: 395 | raise ValueError('LatticeParameters requires positive integer length.') 396 | elif modulus < 3 or not is_ntt_friendly_prime(modulus=modulus, degree=degree): 397 | raise ValueError('LatticeParameters requires NTT-friendly prime modulus-degree pair.') 398 | 399 | self.degree = degree 400 | self.length = length 401 | self.modulus = modulus 402 | self.halfmod = self.modulus // 2 403 | self.logmod = ceil(log2(self.modulus)) 404 | self.n: int = 2 * self.degree 405 | self.lgn: int = ceil(log2(self.n)) 406 | self.rou, self.rou_inv = get_prim_rou_and_rou_inv(modulus=self.modulus, degree=self.degree) 407 | self.zetas, self.zetas_invs = make_zetas_and_invs(q=self.modulus, d=self.degree, n=self.n, lgn=self.lgn) 408 | 409 | def __eq__(self, other) -> bool: 410 | """ 411 | Compare two LatticeParameters objects for equality. We only check degree, length, and modulus. This is due to 412 | the fact that all the other parameters are derived from these three. 413 | 414 | :param other: Another LatticeParameters object 415 | :type other: LatticeParameters 416 | 417 | :return: Equality boolean. 418 | :rtype: bool 419 | """ 420 | return self.degree == other.degree and self.length == other.length and self.modulus == other.modulus 421 | 422 | def __repr__(self) -> str: 423 | """ 424 | Output a canonical representation of the LatticeParameters object as a string. 425 | 426 | :return: Tuple containing degree, length, and modulus, cast as a string. 427 | :rtype: str 428 | """ 429 | return str((self.degree, self.length, self.modulus)) 430 | 431 | 432 | UNIFORM_INFINITY_WEIGHT: str = 'inf,wt,unif' 433 | 434 | 435 | def decode2coef_inf_unif(secpar: int, lp: LatticeParameters, val: str, btd: int, 436 | dist_pars: Dict[str, int]) -> int: 437 | if btd < 1: 438 | raise ValueError('Cannot decode2coef_inf_unif without a positive integer number of bits required to decode.') 439 | elif 'bd' not in dist_pars or not isinstance(dist_pars['bd'], int) or not (1 <= dist_pars['bd'] <= lp.modulus // 2): 440 | raise ValueError('Cannot decode2coef_inf_unif without an integer bound 0 <= bd <= modulus//2.') 441 | elif btd < ceil(log2(dist_pars['bd'])) + 1 + secpar: 442 | b = dist_pars['bd'] 443 | raise ValueError( 444 | f'Cannot decode2coef_inf_unif with secpar = {secpar}, bd = {b} without requiring ' + 445 | f'at least {ceil(log2(b)) + 1 + secpar} bits.' 446 | ) 447 | elif not is_bitstring(val): 448 | raise ValueError('Cannot decode2coef_inf_unif without bitstring val.') 449 | elif len(val) < btd: 450 | raise ValueError(f'Cannot decode2coef_inf_unif without at least {btd} bits.') 451 | signum_bit: str = val[0] 452 | magnitude_minus_one_bits: str = val[1:] 453 | sign: int = 2 * int(signum_bit) - 1 454 | big_bd_flag = int(dist_pars['bd'] > 1) 455 | magnitude_minus_one: int = int(magnitude_minus_one_bits, 2) 456 | mag_minus_one_mod_bd: int = magnitude_minus_one % dist_pars['bd'] 457 | magnitude: int = 1 + big_bd_flag * mag_minus_one_mod_bd 458 | return sign * magnitude 459 | 460 | 461 | def decode2coef(secpar: int, lp: LatticeParameters, val: str, distribution: str, dist_pars: Dict[str, int], 462 | btd: int) -> int: 463 | """ 464 | Decode an input string x to a coefficient in [-bd, -bd+1, ...,-2, -1, 1, 2, ..., bd-1, bd] with bias O(2**-secpar), 465 | if possible, and raise a ValueError if not possible. If bd = 1, this set is [-1, 1] and we only need 1 bit to 466 | sample from exactly the uniform distribution. On the other hand, if bd > 1, then we use the first bit of x as a sign 467 | bit, and we use the rest as the binary expansion of an integer. We mod this integer out by bd, add 1 to the result 468 | to get an integer in the set [1, 2, ..., bd], and then we multiply by +1 if the sign bit is 1 and -1 if the sign bit 469 | is 0 to get an integer in the set [-bd, -bd+1, ..., -2, -1, 1, 2, ..., bd - 1, bd]. 470 | 471 | We require len(x) = ceil(log2(bd)) + 1 + secpar. This way, we have the ceil(log2(bd)) + secpar bits to determine the 472 | binary expansion of the integer, and an additional sign bit. 473 | 474 | The information-theoretic minimum of the number of bits required to uniquely determine an integer modulo bd is 475 | exactly ceil(log2(bd)). However, if x is a uniformly sampled ceil(log2(bd)) bit integer, then unless bd is a power 476 | of two, x % bd is not a uniformly distributed element of the integers modulo bd. If x is a uniformly 477 | sampled ceil(log2(bd))+k bit integer for some integer k, then the bias of x % bd away from the uniform distribution 478 | is O(2**-k). So to keep the bias negligible, we use secpar additional bits. 479 | 480 | :param secpar: Input security parameter 481 | :type secpar: int 482 | :param lp: Lattice parameters 483 | :type lp: LatticeParameters 484 | :param val: Input bitstring 485 | :type val: str 486 | :param distribution: String code indicating which distribution to use 487 | :type distribution: str 488 | :param dist_pars: Distribution parameters 489 | :type dist_pars: Dict[str, int] 490 | :param btd: Bits necessary to decode to an unbiased sample an integer modulo the modulus. 491 | :type btd: int 492 | 493 | :return: Return an integer uniformly selected from [-bd, 1-bd, ..., bd-1, bd] (or raise a ValueError). 494 | :rtype: int 495 | """ 496 | if not is_bitstring(val): 497 | raise ValueError('Cannot decode2coef without a non-empty bitstring val.') 498 | elif not isinstance(distribution, str): 499 | raise ValueError('Cannot decode2coef without a string code indicating the distribution.') 500 | elif distribution == UNIFORM_INFINITY_WEIGHT: 501 | return decode2coef_inf_unif(secpar=secpar, lp=lp, val=val, dist_pars=dist_pars, btd=btd) 502 | raise ValueError('Tried to decode2coef with a distribution that has not yet been implemented.') 503 | 504 | 505 | def decode2coefs( 506 | secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], 507 | val: str, num_coefs: int, btd: int 508 | ) -> List[int]: 509 | """ 510 | Decode an input string x to a list of integer coefficients. In general, breaks the input string into blocks of 511 | 1 + ceil(log2(bd)) + secpar bits each, and then merely calls decode2coef on each block. We do handle a weird edge 512 | case, when the bound is 1 (see decode2coef for more info on that). If bd == 1, we need wt bits, and otherwise we 513 | need wt * (ceil(log2(bd)) + 1 + secpar) bits. 514 | 515 | :param secpar: Security parameter 516 | :type secpar: int 517 | :param lp: Lattice parameters 518 | :type lp: LatticeParameters 519 | :param distribution: String code describing the distribution 520 | :type distribution: str 521 | :param dist_pars: Dictionary containing distribution parameters 522 | :type dist_pars: Dict[str, int] 523 | :param val: Input value/bitstring to decode. 524 | :type val: str 525 | :param num_coefs: Number of coefficients to sample. 526 | :type num_coefs: int 527 | :param btd: Number of bits required to decode a string to an unbiased sample of an integer modulo the modulus in lp. 528 | :type btd: int 529 | 530 | :return: Return a list of integers uniformly selected from [-bd, 1-bd, ..., bd-1, bd] (or raise a ValueError). 531 | :rtype: List[int] 532 | """ 533 | if not isinstance(distribution, str): 534 | raise ValueError('Cannot decode2coefs without a string code describing the distribution.') 535 | elif num_coefs < 1: 536 | raise ValueError('Cannot decode2coefs without a number of coefficients to which we want to decode.') 537 | elif btd < 1: 538 | raise ValueError('Cannot decode2coefs without bits_to_decode, used to decode a single coefficient.') 539 | elif len(val) < num_coefs * btd: 540 | raise ValueError(f'Cannot decode2coefs without val with length at least {num_coefs * btd} but had {len(val)}') 541 | result = [] 542 | for i in range(num_coefs): 543 | result += [decode2coef( 544 | secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, 545 | btd=btd, val=val[i * btd: (i + 1) * btd] 546 | )] 547 | return result 548 | 549 | 550 | def decode2indices(secpar: int, lp: LatticeParameters, num_coefs: int, val: str, bti: int) -> List[int]: 551 | """ 552 | Decode input string x to a list of distinct, uniformly and independently sampled integer indices in [0, 1, ..., d-1] 553 | with constant weight equal to the input weight, wt, and with bias O(2**-secpar), if possible, and raise a ValueError 554 | if not possible. Requires ceil(log2(d)) + (wt - 1) * (ceil(log2(d)) + secpar) bits as input. 555 | 556 | First, all possible indices are stored into a possible_indices list, which is just list(range(d)). 557 | 558 | Next, the first ceil(log2(d)) bits of the input x are used directly to describe an integer modulo d with no bias, 559 | which we can call i for the purpose of this docstring. We pop possible_indices[i] out of the list (decreasing the 560 | length of the possible_indices list by 1) and store it in our result. 561 | 562 | Next, the remaining bits are split up into blocks of ceil(log2(d)) + secpar bits. Each block is cast as an integer 563 | with ceil(log2(d)) + secpar bits, and then modded out by len(possible_indices), resulting in another index i which 564 | has a distribution that is within O(2**-secpar) of uniform. We pop possible_indices[i] out again, and then move onto 565 | the next block until no blocks remain. 566 | 567 | :param secpar: Input security parameter 568 | :type secpar: int 569 | :param lp: Lattice parameters 570 | :type lp: LatticeParameters 571 | :param num_coefs: Number of coefficients to generate 572 | :type num_coefs: int 573 | :param val: Input bitstring 574 | :type val: str 575 | :param bti: Number of bits required to sample num_coefs worth of indices unbiasedly without replacement. 576 | :type bti: int 577 | 578 | :return: Return a list of length num_coefs, where each entry is a distinct integer in [0, 1, ..., d-1]. 579 | :rtype: List[int] 580 | """ 581 | if secpar < 1: 582 | raise ValueError('Cannot decode2indices without a positive integer security parameter.') 583 | elif not is_bitstring(val): 584 | raise ValueError('Cannot decode2indices without a bitstring val.') 585 | elif num_coefs < 1: 586 | raise ValueError('Cannot decode2indices with a sample size that is not a positive integer.') 587 | elif bti < ceil(log2(lp.degree)) + (num_coefs - 1) * (ceil(log2(lp.degree)) + secpar): 588 | a = ceil(log2(lp.degree)) 589 | b = ceil(log2(lp.degree)) + secpar 590 | c = num_coefs - 1 591 | k = a + c * b 592 | raise ValueError( 593 | f'Cannot decode2indices without requiring at least {k} bits, but had {bti}.') 594 | elif len(val) < bti: 595 | raise ValueError( 596 | f'Cannot decode2indices without an input bitstring val with at least {bti} bits, ' + 597 | 'but the input is only of length {len(val)}.' 598 | ) 599 | possible_indices: List[int] = list(range(lp.degree)) 600 | k: int = ceil(log2(len(possible_indices))) 601 | first_coef_bits: str = val[:k] 602 | first_coef: int = int(first_coef_bits, 2) 603 | result: list = list([possible_indices.pop(first_coef)]) 604 | z: str = val[k:] 605 | k = ceil(log2(len(possible_indices))) 606 | j: int = k + secpar 607 | z: List[str] = [z[i * j: (i + 1) * j] for i in range(num_coefs - 1)] 608 | for next_z in z: 609 | next_z_as_int = int(next_z, 2) 610 | modded = next_z_as_int % len(possible_indices) 611 | result += [possible_indices.pop(modded)] 612 | return result 613 | 614 | 615 | def decode2polycoefs(secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], val: str, 616 | num_coefs: int, bti: int, btd: int) -> Dict[int, int]: 617 | """ 618 | Decode an input string x to a dictionary with integer keys and values, suitable for use in creating a Polynomial 619 | object with norm bound bd and weight wt. We use the first ceil(log2(d)) + (wt-1) + (ceil(log2(d)) + secpar) bits to 620 | uniquely determine the index set, which we use decode2indices to compute. We use the rest of the bit string to 621 | determine the coefficients, which we use decode2coefs to compute. We always require at least 622 | ceil(log2(d)) + (wt - 1) * (ceil(log2(d)) + secpar) + wt bits, but when bd > 1, we also require an additional 623 | wt * (ceil(log2(bd)) + secpar) bits. 624 | 625 | :param secpar: Input security parameter 626 | :type secpar: int 627 | :param lp: Lattice parameters 628 | :type lp: LatticeParameters 629 | :param distribution: String code describing which distribution to use 630 | :type distribution: str 631 | :param dist_pars: Distribution parameters 632 | :type dist_pars: dict 633 | :param val: Input bitstring 634 | :type val: str 635 | :param num_coefs: Number of coefficients to generate 636 | :type num_coefs: int 637 | :param bti: Number of bits required to unbiasedly sample indices without replacement. 638 | :type bti: int 639 | :param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp 640 | :type btd: int 641 | 642 | :return: Return a dict with integer keys and values, with wt distinct keys and all values in [-bd, ..., bd] 643 | :rtype: Dict[int, int] 644 | """ 645 | if secpar < 1: 646 | raise ValueError('Cannot decode2polycoefs without an integer security parameter.') 647 | elif num_coefs < 1: 648 | raise ValueError('Cannot decode2polycoefs without an integer number of coefficients.') 649 | elif not is_bitstring(val): 650 | raise ValueError('Cannot decode2polycoefs without a bitstring val.') 651 | elif len(val) < 8 * get_gen_bytes_per_poly( 652 | secpar=secpar, 653 | lp=lp, 654 | distribution=distribution, 655 | dist_pars=dist_pars, 656 | num_coefs=dist_pars['wt'], 657 | btd=btd, 658 | bti=bti): 659 | raise ValueError('Cannot decode2polycoefs without a sufficiently long bitstring val.') 660 | elif bti != bits_to_indices(secpar=secpar, degree=lp.degree, wt=dist_pars['wt']): 661 | x = bits_to_indices(secpar=secpar, degree=lp.degree, wt=dist_pars['wt']) 662 | raise ValueError(f'Cannot decode2polycoefs without bits_to_ind == bits_to_indices, but had {bti} != {x}.') 663 | elif btd != bits_to_decode(secpar=secpar, bd=dist_pars['bd']): 664 | x = bits_to_decode(secpar=secpar, bd=dist_pars['bd']) 665 | raise ValueError(f'Cannot decode2polycoefs without bits_to_coef == bits_to_decode, but had {btd} != {x}.') 666 | x_for_indices: str = val[:bti] 667 | x_for_coefficients: str = val[bti:] 668 | indices: List[int] = decode2indices( 669 | secpar=secpar, lp=lp, bti=bti, num_coefs=num_coefs, val=x_for_indices 670 | ) 671 | coefs: List[int] = decode2coefs( 672 | secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, 673 | btd=btd, val=x_for_coefficients, num_coefs=num_coefs 674 | ) 675 | return {index: coefficient for index, coefficient in zip(indices, coefs)} 676 | 677 | 678 | def get_gen_bytes_per_poly_inf_wt_unif( 679 | secpar: int, lp: LatticeParameters, dist_pars: Dict[str, int], num_coefs: int 680 | ) -> int: 681 | if secpar < 1: 682 | raise ValueError('Cannot get_gen_bytes_per_poly_inf_wt_unif without an integer security parameter.') 683 | elif 'bd' not in dist_pars: 684 | raise ValueError('Cannot get_gen_bytes_per_poly_inf_wt_unif without a bound in dist_pars.') 685 | elif not isinstance(dist_pars['bd'], int): 686 | raise ValueError('Cannot get_gen_bytes_per_poly_inf_wt_unif without an integer bound in dist_pars') 687 | elif dist_pars['bd'] < 1 or dist_pars['bd'] > lp.modulus // 2: 688 | raise ValueError( 689 | 'Cannot get_gen_bytes_per_poly_inf_wt_unif without an integer bound on [1, 2, ..., lp.modulus // 2].') 690 | elif 'wt' not in dist_pars: 691 | raise ValueError('Cannot get_gen_bytes_per_poly_inf_wt_unif without a weight in dist_pars.') 692 | elif not isinstance(dist_pars['wt'], int): 693 | raise ValueError('Cannot get_gen_bytes_per_poly_inf_wt_unif without an integer weight in dist_pars.') 694 | elif dist_pars['wt'] < 1 or dist_pars['wt'] > lp.degree: 695 | raise ValueError( 696 | 'Cannot get_gen_bytes_per_poly_inf_wt_unif without an integer weight on [1, 2, .., lp.degree - 1].') 697 | elif dist_pars['wt'] != num_coefs: 698 | raise ValueError('Cannot get_gen_bytes_per_poly_inf_wt_unif with num_coefs != weight.') 699 | btd: int = bits_to_decode(secpar=secpar, bd=dist_pars['bd']) 700 | bti: int = bits_to_indices(secpar=secpar, degree=lp.degree, wt=num_coefs) 701 | return ceil((num_coefs * btd + bti)/8) 702 | 703 | 704 | def get_gen_bytes_per_poly(secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], 705 | num_coefs: int, bti: int, btd: int) -> int: 706 | """ 707 | Compute bits required to decode a random bitstring to a polynomial of a certain weight and bound given some lattice 708 | parameters with a negligible bias away from uniformity. Note that this is not the same as the number of bits 709 | required to store a polynomial from the lattice! We require many more bits to sample, in order to ensure the 710 | distribution is negligibly different from uniform. 711 | 712 | Assumes a polynomial (which is a sum of monomials) is represented only by the coefficients and exponents of the 713 | nonzero summands. For example, to describe f(X) = 1 + 2 * X ** 7 + 5 * X ** 15, we can just store (0, 1), (7, 2), 714 | and (15, 5). If the polynomial has weight wt, then we store wt pairs. Each pair consists of an exponent from 715 | 0, 1, ..., d-1, and each coefficient is an integer from [-(bd-1)//2, ..., (bd-1)//2]. In fact, to sample the first 716 | monomial exponent from [0, 1, ..., d - 1] with zero bias requires only ceil(log2(d)) bits, but sampling each 717 | subsequent monomial exponent requires an additional secpar bits. And, to sample the coefficient with negligible bias 718 | from uniform requires 1 + ceil(log2(bd)) + secpar bits. 719 | 720 | :param secpar: Input security parameter 721 | :type secpar: int 722 | :param lp: Lattice parameters 723 | :type lp: LatticeParameters 724 | :param distribution: String code describing which distribution to use 725 | :type distribution: str 726 | :param dist_pars: Distribution parameters 727 | :type dist_pars: dict 728 | :param num_coefs: Number of coefficients to generate 729 | :type num_coefs: int 730 | :param bti: Number of bits required to unbiasedly sample indices without replacement. 731 | :type bti: int 732 | :param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp 733 | :type btd: int 734 | 735 | :return: Bits required to decode to a polynomial without bias. 736 | :rtype: int 737 | """ 738 | if secpar < 1: 739 | raise ValueError('Cannot decode2polycoefs without an integer security parameter.') 740 | elif num_coefs < 1 or bti < 1 or btd < 1: 741 | raise ValueError( 742 | 'Cannot decode2polycoefs without positive integer number of coefficients to generate and an ' + 743 | 'integer number of indices to generate.' 744 | ) 745 | elif distribution == UNIFORM_INFINITY_WEIGHT: 746 | return get_gen_bytes_per_poly_inf_wt_unif(secpar=secpar, lp=lp, dist_pars=dist_pars, num_coefs=num_coefs) 747 | raise ValueError( 748 | 'We tried to compute the number of bits required to generate a polynomial for a distribution ' + 749 | 'that is not supported.' 750 | ) 751 | 752 | 753 | class Polynomial(object): 754 | """ 755 | Hybrid object with some container, some callable, and some numeric properties. Instantiated with a coefficient 756 | representation of a polynomial using the representation described in the comments of get_n_per_poly. On 757 | instantiation, the NTT value representation of the coefficient representation is computed and stored in self.vals 758 | and the coefficients are forgotten; all arithmetic is done with the NTT values. 759 | 760 | Attributes 761 | ---------- 762 | lp: LatticeParameters 763 | For use in all arithmetic 764 | const_time_flag: bool 765 | Determines whether arithmetic should be done in constant time. 766 | ntt_representation: List[int] 767 | The NTT of the polynomial. 768 | 769 | Methods 770 | ------- 771 | __init__(self) 772 | Initialize 773 | __eq__(self, other) 774 | Check for equality of polynomials 775 | __add__(self, other) 776 | Add two polynomials 777 | __radd__(self, other) 778 | Add two polynomials (or zero and a polynomial) 779 | __sub__(self, other) 780 | Subtract two polynomials 781 | __mul__(self, other) 782 | Multiply two polynomials 783 | __repr__(self) 784 | String representation of the polynomial 785 | reset_vals(self, coefs: Dict[int, int]) 786 | Computes and stores ntt_values from coefs in self.vals, over-writing old value. 787 | get_coef_rep() 788 | Recompute and output the coefs from ntt_values, the infinity norm, and the weight 789 | to_bits() 790 | Convert to a bitstring 791 | to_bytes() 792 | Convert to a bytes object 793 | """ 794 | lp: LatticeParameters 795 | const_time_flag: bool 796 | ntt_representation: List[int] 797 | 798 | def __init__(self, lp: LatticeParameters, coefs: Dict[int, int], const_time_flag: bool = True): 799 | """ 800 | Initialize a polynomial object by passing in a LatticeParameters object and coefs, which is a Dict[int, int] 801 | where keys are monomial exponents and values are coefficients. 802 | 803 | :param lp: Input LatticeParameters 804 | :type lp: LatticeParameters 805 | :param coefs: Coefficient dictionary 806 | :type coefs: Dict[int, int] 807 | """ 808 | if len(coefs) > lp.degree: 809 | raise ValueError('Cannot create polynomial with too many coefficients specified.') 810 | elif not all(0 <= i < lp.degree for i in coefs): 811 | raise ValueError( 812 | f'Cannot create a polynomial with monomial exponents outside of [0, 1, ..., {lp.degree - 1}]' 813 | ) 814 | self.lp = lp 815 | self.const_time_flag = const_time_flag 816 | self._reset_vals(coefs=coefs) # set the ntt_representation 817 | 818 | def __eq__(self, other) -> bool: 819 | """ 820 | Check if self and other have the same lattice parameters and generate the same coefficients. 821 | 822 | :param other: Another Polynomial. 823 | :type other: Polynomial 824 | 825 | :return: Boolean indicating Polynomial equality 826 | :rtype: bool 827 | """ 828 | if self.lp != other.lp: 829 | return False 830 | x = self.get_coef_rep() 831 | y = other.get_coef_rep() 832 | return x == y 833 | 834 | def __add__(self, other): 835 | """ 836 | Add two polynomials. 837 | 838 | :param other: Another Polynomial 839 | :type other: Polynomial 840 | :return: Sum of self and other. 841 | :rtype: Polynomial 842 | """ 843 | if isinstance(other, int) and other == 0: 844 | return self 845 | result = deepcopy(self) 846 | if self.const_time_flag: 847 | result.ntt_representation = [ 848 | cent(q=self.lp.modulus, halfmod=self.lp.halfmod, logmod=self.lp.logmod, val=x + y) 849 | for x, y in zip(result.ntt_representation, other.ntt_representation) 850 | ] 851 | else: 852 | result.ntt_representation = [ 853 | (x + y) % self.lp.modulus for x, y in zip(result.ntt_representation, other.ntt_representation) 854 | ] 855 | result.ntt_representation = [ 856 | x if x <= self.lp.modulus // 2 else x - self.lp.modulus for x in result.ntt_representation 857 | ] 858 | return result 859 | 860 | def __radd__(self, other): 861 | """ 862 | Add two polynomials. 863 | 864 | :param other: Another Polynomial 865 | :type other: Polynomial 866 | :return: Sum of self and other. 867 | :rtype: Polynomial 868 | """ 869 | if isinstance(other, int) and other == 0: 870 | return self 871 | return self.__add__(other) 872 | 873 | def __sub__(self, other): 874 | """ 875 | Subtract two polynomials. 876 | 877 | :param other: Another Polynomial 878 | :type other: Polynomial 879 | :return: Sum of self and other. 880 | :rtype: Polynomial 881 | """ 882 | if isinstance(other, int) and other == 0: 883 | return self 884 | result = deepcopy(self) 885 | if self.const_time_flag: 886 | result.ntt_representation = [ 887 | cent(q=self.lp.modulus, halfmod=self.lp.halfmod, logmod=self.lp.logmod, val=x - y) 888 | for x, y in zip(result.ntt_representation, other.ntt_representation) 889 | ] 890 | else: 891 | result.ntt_representation = [ 892 | (x - y) % self.lp.modulus for x, y in zip(result.ntt_representation, other.ntt_representation) 893 | ] 894 | result.ntt_representation = [ 895 | x if x <= self.lp.modulus // 2 else x - self.lp.modulus for x in result.ntt_representation 896 | ] 897 | return result 898 | 899 | def __mul__(self, other): 900 | """ 901 | Multiply two polynomials 902 | 903 | :param other: Other Polynomial 904 | :type other: Polynomial 905 | :return: 906 | :rtype: Polynomial 907 | """ 908 | if isinstance(other, int) and other == 0: 909 | return 0 910 | result = deepcopy(self) 911 | if self.const_time_flag: 912 | result.ntt_representation = [ 913 | cent(q=self.lp.modulus, halfmod=self.lp.halfmod, logmod=self.lp.logmod, val=x * y) 914 | for x, y in zip(result.ntt_representation, other.ntt_representation) 915 | ] 916 | else: 917 | result.ntt_representation = [ 918 | (x * y) % self.lp.modulus for x, y in zip(result.ntt_representation, other.ntt_representation) 919 | ] 920 | result.ntt_representation = [ 921 | x if x <= self.lp.modulus // 2 else x - self.lp.modulus for x in result.ntt_representation 922 | ] 923 | return result 924 | 925 | def __rmul__(self, other): 926 | """ Use NTT representation to point-wise multiply polynomial ntt_values. 927 | 928 | :param other: Other Polynomial 929 | :type other: Polynomial 930 | :return: 931 | :rtype: Polynomial 932 | """ 933 | if isinstance(other, int) and other == 0: 934 | return 0 935 | return self.__mul__(other) 936 | 937 | def __repr__(self) -> str: 938 | """ 939 | Return a canonical string representation of the Polynomial; WARNING: calls get_coefs. 940 | 941 | :return: 942 | :rtype: str 943 | """ 944 | coef_rep, norm, wt = self.get_coef_rep() 945 | sorted_keys = sorted(list(coef_rep.keys())) 946 | sorted_coefs = [(i, coef_rep[i]) for i in sorted_keys] 947 | return str((sorted_coefs, norm, wt)) 948 | 949 | def _ntt(self, inv_flag: bool, val: List[int]) -> List[int]: 950 | """ 951 | Very thin wrapper that attaches ntt method to the Polynomial object 952 | 953 | :param inv_flag: Indicates whether we are performing forward NTT or inverse NTT 954 | :type inv_flag: bool 955 | :return: Return the NTT (or inverse) of the inputs x. 956 | :rtype: List[int] 957 | """ 958 | return ntt( 959 | q=self.lp.modulus, zetas=self.lp.zetas, zetas_inv=self.lp.zetas_invs, val=val, 960 | halfmod=self.lp.halfmod, logmod=self.lp.logmod, n=self.lp.n, lgn=self.lp.lgn, inv_flag=inv_flag, 961 | const_time_flag=self.const_time_flag 962 | ) 963 | 964 | def _reset_vals(self, coefs: Dict[int, int]) -> None: 965 | """ Input a Dict[int, int] of input coefficients (keys are exponents, values are coefficients), compute the NTT 966 | of the result, then overwrite self.vals with the new NTT data. 967 | 968 | :param coefs: Input coefficients 969 | :type coefs: Dict[int, int] 970 | """ 971 | tmp: List[int] = [0 for _ in range(self.lp.n)] 972 | for i in range(self.lp.n): 973 | if i in coefs: 974 | tmp[i] += coefs[i] 975 | self.ntt_representation = self._ntt(inv_flag=False, val=tmp) 976 | 977 | def get_coef_rep(self) -> Tuple[Dict[int, int], int, int]: 978 | """ 979 | Compute the coefficient representation of the polynomial by performing the inverse NTT on self.vals, compute the 980 | norm and the wight, and return all these. 981 | 982 | :return: Coefficient representation of the Polynomial, norm, and weight. 983 | :rtype: Tuple[Dict[int, int], int, int] 984 | """ 985 | tmp: List[int] = self._ntt(inv_flag=True, val=self.ntt_representation) 986 | left: List[int] = tmp[:self.lp.degree] 987 | right: List[int] = tmp[self.lp.degree:] 988 | if self.const_time_flag: 989 | coefs: List[int] = [ 990 | cent(q=self.lp.modulus, halfmod=self.lp.halfmod, logmod=self.lp.logmod, val=x - y) 991 | for x, y in zip(left, right) 992 | ] 993 | else: 994 | coefs: List[int] = [(x - y) % self.lp.modulus for x, y in zip(left, right)] 995 | coefs = [x if x <= self.lp.modulus // 2 else x - self.lp.modulus for x in coefs] 996 | coefs_dict: Dict[int, int] = {index: value for index, value in enumerate(coefs) if value != 0} 997 | if not coefs_dict: 998 | return coefs_dict, 0, 0 999 | return coefs_dict, max(abs(coefs_dict[value]) for value in coefs_dict), len(coefs_dict) 1000 | 1001 | def to_bytes(self) -> bytearray: 1002 | return bytearray(self.ntt_representation) 1003 | 1004 | def to_bits(self) -> str: 1005 | return sum(bin(i % self.lp.modulus)[2:] for i in self.ntt_representation) 1006 | 1007 | 1008 | def decode2poly( 1009 | secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], val: str, 1010 | num_coefs: int, bti: int, btd: int, const_time_flag: bool = True 1011 | ) -> Polynomial: 1012 | return Polynomial( 1013 | lp=lp, 1014 | coefs=decode2polycoefs(secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, val=val, 1015 | num_coefs=num_coefs, bti=bti, btd=btd), 1016 | const_time_flag=const_time_flag, 1017 | ) 1018 | 1019 | 1020 | def hash2polynomial(secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], salt: str, 1021 | msg: str, num_coefs: int, bti: int, btd: int, 1022 | const_time_flag: bool = True) -> Polynomial: 1023 | """ 1024 | Hash an input message msg and salt to a polynomial with norm bound at most bd and weight at most wt. 1025 | 1026 | :param secpar: Input security parameter 1027 | :type secpar: int 1028 | :param lp: Lattice parameters 1029 | :type lp: LatticeParameters 1030 | :param distribution: String code describing which distribution to use 1031 | :type distribution: str 1032 | :param dist_pars: Distribution parameters 1033 | :type dist_pars: dict 1034 | :param salt: Salt 1035 | :type salt: str 1036 | :param msg: Message being hashed 1037 | :type msg: str 1038 | :param num_coefs: Number of coefficients to generate 1039 | :type num_coefs: int 1040 | :param bti: Number of bits required to unbiasedly sample indices without replacement. 1041 | :type bti: int 1042 | :param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp 1043 | :type btd: int 1044 | :param const_time_flag: Boolean indicating whether arithmetic should be const-time. 1045 | :type const_time_flag: bool 1046 | 1047 | :return: 1048 | :rtype: Polynomial 1049 | """ 1050 | num_bytes_for_hashing: int = get_gen_bytes_per_poly( 1051 | secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, num_coefs=num_coefs, bti=bti, btd=btd 1052 | ) 1053 | val: str = binary_digest(msg, num_bytes_for_hashing, salt) 1054 | coefs: Dict[int, int] = decode2polycoefs(secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, 1055 | val=val, num_coefs=num_coefs, bti=bti, btd=btd) 1056 | return Polynomial(lp=lp, coefs=coefs, const_time_flag=const_time_flag) 1057 | 1058 | 1059 | def random_polynomial( 1060 | secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], num_coefs: int, 1061 | bti: int, btd: int, const_time_flag: bool = True 1062 | ) -> Polynomial: 1063 | """ 1064 | Generate a random polynomial with norm bounded by bd and weight bounded by wt. Relies on randbits from 1065 | the secrets package to generate data. Since the secrets package is thought to be secure for cryptographic use, the 1066 | results should have a negligible bias away from uniformity. 1067 | 1068 | :param secpar: Input security parameter 1069 | :type secpar: int 1070 | :param lp: Lattice parameters 1071 | :type lp: LatticeParameters 1072 | :param distribution: String code describing which distribution to use 1073 | :type distribution: str 1074 | :param dist_pars: Distribution parameters 1075 | :type dist_pars: dict 1076 | :param num_coefs: Number of coefficients to generate 1077 | :type num_coefs: int 1078 | :param bti: Number of bits required to unbiasedly sample indices without replacement. 1079 | :type bti: int 1080 | :param btd: Number of bits required to decode to an unbiased sample an integer modulo the modulus in lp 1081 | :type btd: int 1082 | :param const_time_flag: Indicates whether arithmetic should be constant time. 1083 | :type const_time_flag: bool 1084 | 1085 | :return: 1086 | :rtype: Polynomial 1087 | """ 1088 | num_bits_for_hashing: int = 8 * get_gen_bytes_per_poly( 1089 | secpar=secpar, 1090 | lp=lp, 1091 | distribution=distribution, 1092 | dist_pars=dist_pars, 1093 | num_coefs=num_coefs, 1094 | btd=btd, 1095 | bti=bti) 1096 | val: str = bin(randbits(num_bits_for_hashing))[2:].zfill(num_bits_for_hashing) 1097 | 1098 | return decode2poly( 1099 | secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, val=val, num_coefs=num_coefs, 1100 | bti=bti, btd=btd, const_time_flag=const_time_flag 1101 | ) 1102 | 1103 | 1104 | class PolynomialVector(object): 1105 | """ 1106 | Contains LatticeParameters and a list of polynomials. WARNING: We repurpose the pow notation for scaling by a poly. 1107 | 1108 | Attributes 1109 | ---------- 1110 | lp: LatticeParameters 1111 | For use in all arithmetic. 1112 | const_time_flag: bool 1113 | Indicates whether arithmetic should be constant time. 1114 | entries: List[Polynomial] 1115 | The "vector" of polynomials. 1116 | 1117 | Methods 1118 | ------- 1119 | __init__(self) 1120 | Initialize 1121 | __eq__(self, other) 1122 | Check for equality of PolynomialVectors 1123 | __add__(self, other) 1124 | Add two PolynomialVectors 1125 | __radd__(self, other) 1126 | Add two PolynomialVectors (or zero and a PolynomialVectors) 1127 | __sub__(self, other) 1128 | Subtract two PolynomialVectors 1129 | __mul__(self, other) 1130 | Compute the dot product of two polynomial vectors. 1131 | __pow__(self, other: Polynomial) 1132 | Scale self 1133 | __repr__(self) 1134 | String representation of the polynomial 1135 | norm_and_weight() 1136 | Return a tuple with both norm and weight. 1137 | norm() 1138 | Return the maximum of the absolute value of the values in coefs 1139 | weight() 1140 | Return the length of coefs. 1141 | """ 1142 | lp: LatticeParameters 1143 | const_time_flag: bool 1144 | entries: List[Polynomial] 1145 | 1146 | def __init__(self, lp: LatticeParameters, entries: List[Polynomial], const_time_flag: bool = True): 1147 | """ 1148 | Instantiate with some input LatticeParameters and a list of Polynomial entries. 1149 | 1150 | :param lp: Input lattice parameters 1151 | :type lp: LatticeParameters 1152 | :param entries: Input polynomial entries 1153 | :type entries: List[Polynomial] 1154 | """ 1155 | if not all(i.lp == lp for i in entries): 1156 | raise ValueError('Can only create PolynomialVector with all common lattice parameters.') 1157 | elif not all(i.const_time_flag == const_time_flag for i in entries): 1158 | raise ValueError('The const_time_flag for each entry in the PolynomialVector must match ' + 1159 | 'the const_time_flag of the PolynomialVector.') 1160 | self.lp = lp 1161 | self.const_time_flag = const_time_flag 1162 | self.entries = entries 1163 | 1164 | def __eq__(self, other) -> bool: 1165 | """ 1166 | PolynomialVector equality is determined if parameters match and all entries match. 1167 | 1168 | :param other: Other PolynomialVector 1169 | :type other: PolynomialVector 1170 | :return: Boolean indicating whether parameters and all entries match 1171 | :rtype: bool 1172 | """ 1173 | return self.lp == other.lp and self.entries == other.entries 1174 | 1175 | def __add__(self, other): 1176 | """ 1177 | Add PolynomialVectors 1178 | 1179 | :param other: Other PolynomialVector 1180 | :type other: PolynomialVector 1181 | :return: self + other 1182 | :rtype: PolynomialVector 1183 | """ 1184 | result = deepcopy(self) 1185 | result.entries = [x + y for x, y in zip(result.entries, other.entries)] 1186 | return result 1187 | 1188 | def __radd__(self, other): 1189 | """ 1190 | Add PolynomialVectors 1191 | 1192 | :param other: Other PolynomialVector 1193 | :type other: PolynomialVector 1194 | :return: self + other 1195 | :rtype: PolynomialVector 1196 | """ 1197 | if other == 0: 1198 | return self 1199 | return self.__add__(other) 1200 | 1201 | def __sub__(self, other): 1202 | """ 1203 | Add PolynomialVectors 1204 | 1205 | :param other: Other PolynomialVector 1206 | :type other: PolynomialVector 1207 | :return: self - other 1208 | :rtype: PolynomialVector 1209 | """ 1210 | result = deepcopy(self) 1211 | result.entries = [x - y for x, y in zip(result.entries, other.entries)] 1212 | return result 1213 | 1214 | def __mul__(self, other) -> Polynomial: 1215 | """ 1216 | Dot product between two polynomial vectors. 1217 | 1218 | :param other: Other PolynomialVector 1219 | :type other: PolynomialVector 1220 | 1221 | :return: The dot product 1222 | :rtype: PolynomialVector 1223 | """ 1224 | if self.lp != other.lp: 1225 | raise ValueError('Can only compute dot products with polynomials with the same parameters.') 1226 | each_product = [x * y for x, y in zip(self.entries, other.entries)] 1227 | return sum(each_product) 1228 | 1229 | def __pow__(self, scalar: Polynomial): 1230 | """ 1231 | Scale a PolynomialVector by a Polynomial scalar. So if x is a PolynomialVector and y is a Polynomial, z = x ** y 1232 | gives us the PolynomialVector such that the ith entry is z.entries[i] == x.entries[i] * y. 1233 | 1234 | :param scalar: Input polynomial for scaling 1235 | :type scalar: Polynomial 1236 | :return: 1237 | :rtype: PolynomialVector 1238 | """ 1239 | # Abuse ** operator to scale vectors: cv = v**c 1240 | if scalar.const_time_flag != self.const_time_flag: 1241 | raise ValueError('Cannot scale a PolynomialVector with a different const_time_flag than the scalar.') 1242 | result = deepcopy(self) 1243 | result.entries = [scalar * i for i in result.entries] 1244 | return result 1245 | 1246 | def __repr__(self) -> str: 1247 | """ 1248 | A canonical string representation of a PolynomialVector object: str(self.entries). 1249 | 1250 | :return: 1251 | :rtype: str 1252 | """ 1253 | return str(self.entries) 1254 | 1255 | def get_coef_rep(self) -> List[Tuple[Dict[int, int], int, int]]: 1256 | """ 1257 | Calls get_coef_rep for each entry. 1258 | 1259 | :return: 1260 | :rtype: List[Tuple[Dict[int, int], int, int]] 1261 | """ 1262 | return [val.get_coef_rep() for i, val in enumerate(self.entries)] 1263 | 1264 | def to_bytes(self) -> bytearray: 1265 | return sum(i.to_bytes() for i in self.entries) 1266 | 1267 | def to_bits(self) -> str: 1268 | return sum(i.to_bits() for i in self.entries) 1269 | 1270 | 1271 | def decode2polynomialvector( 1272 | secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], val: str, 1273 | num_coefs: int, bti: int, btd: int, const_time_flag: bool = True 1274 | ) -> PolynomialVector: 1275 | if not is_bitstring(val): 1276 | raise ValueError('Can only decode to a polynomial vector with an input bitstring val.') 1277 | k: int = 8 * get_gen_bytes_per_poly( 1278 | secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, num_coefs=num_coefs, 1279 | bti=bti, btd=btd 1280 | ) 1281 | if len(val) < k * lp.length: 1282 | raise ValueError( 1283 | f'Cannot decode2polynomialvector without an input bitstring val with length at ' + 1284 | f'least {k} bits, but had length {len(val)}.' 1285 | ) 1286 | entries = [ 1287 | Polynomial( 1288 | lp=lp, 1289 | coefs=decode2polycoefs(secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, 1290 | val=val[i * k: (i + 1) * k], num_coefs=num_coefs, bti=bti, btd=btd), 1291 | const_time_flag=const_time_flag, 1292 | ) for i in range(lp.length) 1293 | ] 1294 | return PolynomialVector(lp=lp, entries=entries, const_time_flag=const_time_flag) 1295 | 1296 | 1297 | def random_polynomial_vector_inf_wt_unif( 1298 | secpar: int, lp: LatticeParameters, dist_pars: Dict[str, int], num_coefs: int, 1299 | bti: int, btd: int, const_time_flag: bool = True 1300 | ) -> PolynomialVector: 1301 | if 'bd' not in dist_pars or not isinstance(dist_pars['bd'], int) or \ 1302 | dist_pars['bd'] < 1 or dist_pars['bd'] > lp.modulus // 2: 1303 | raise ValueError( 1304 | 'Cannot random_polynomial_vector_inf_wt_unif without positive integer bound less than half the modulus.' 1305 | ) 1306 | elif 'wt' not in dist_pars or not isinstance(dist_pars['wt'], int) or \ 1307 | dist_pars['wt'] < 1 or dist_pars['wt'] > lp.degree: 1308 | raise ValueError('Cannot random_polynomial_vector_inf_wt_unif without positive integer weight.') 1309 | k = 8 * lp.length * get_gen_bytes_per_poly( 1310 | secpar=secpar, lp=lp, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=dist_pars, 1311 | num_coefs=num_coefs, bti=bti, btd=btd 1312 | ) 1313 | return decode2polynomialvector( 1314 | secpar=secpar, lp=lp, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=dist_pars, 1315 | num_coefs=num_coefs, bti=bti, btd=btd, val=bin(randbits(k))[2:].zfill(k), const_time_flag=const_time_flag 1316 | ) 1317 | 1318 | 1319 | def random_polynomialvector( 1320 | secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], num_coefs: int, 1321 | bti: int, btd: int, const_time_flag: bool = True 1322 | ) -> PolynomialVector: 1323 | """ 1324 | Generate a random PolynomialVector with bounded Polynomial entries. Essentially just instantiates a PolynomialVector 1325 | object with a list of random Polynomial objects as entries, which are in turn generated by random_polynomial 1326 | 1327 | :param secpar: Input security parameter 1328 | :type secpar: int 1329 | :param lp: Lattice parameters 1330 | :type lp: LatticeParameters 1331 | :param distribution: String code describing which distribution to use 1332 | :type distribution: str 1333 | :param dist_pars: Distribution parameters 1334 | :type dist_pars: dict 1335 | :param num_coefs: Number of coefficients to generate 1336 | :type num_coefs: int 1337 | :param bti: Number of bits required to unbiasedly sample indices without replacement. 1338 | :type bti: int 1339 | :param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp 1340 | :type btd: int 1341 | :param const_time_flag: Indicates whether arithmetic should be constant time. 1342 | :type const_time_flag: bool 1343 | 1344 | :return: 1345 | :rtype: PolynomialVector 1346 | """ 1347 | if secpar < 1: 1348 | raise ValueError('Cannot random_polynomialvector without an integer security parameter.') 1349 | elif distribution == UNIFORM_INFINITY_WEIGHT: 1350 | return random_polynomial_vector_inf_wt_unif( 1351 | secpar=secpar, lp=lp, dist_pars=dist_pars, num_coefs=num_coefs, 1352 | bti=bti, btd=btd, const_time_flag=const_time_flag 1353 | ) 1354 | raise ValueError('Tried to random_polynomialvector with a distribution that is not supported.') 1355 | 1356 | 1357 | def hash2polynomialvector( 1358 | secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], num_coefs: int, 1359 | bti: int, btd: int, msg: str, salt: str, const_time_flag: bool = True 1360 | ) -> PolynomialVector: 1361 | """ 1362 | Hash an input message msg and salt to a polynomial vector with norm bound at most bd and weight at most wt. Just 1363 | calls decode2polycoefs repeatedly. 1364 | 1365 | :param secpar: Input security parameter 1366 | :type secpar: int 1367 | :param lp: Lattice parameters 1368 | :type lp: LatticeParameters 1369 | :param distribution: String code describing which distribution to use 1370 | :type distribution: str 1371 | :param dist_pars: Distribution parameters 1372 | :type dist_pars: dict 1373 | :param salt: Salt 1374 | :type salt: str 1375 | :param msg: Message being hashed 1376 | :type msg: str 1377 | :param num_coefs: Number of coefficients to generate 1378 | :type num_coefs: int 1379 | :param bti: Number of bits required to unbiasedly sample indices without replacement. 1380 | :type bti: int 1381 | :param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp 1382 | :type btd: int 1383 | :param const_time_flag: Indicates whether arithmetic should be constant time. 1384 | :type const_time_flag: bool 1385 | 1386 | :return: Call decode2polycoefs for length, create Polynomial for each, return a PolynomialVector with these entries 1387 | :rtype: PolynomialVector 1388 | """ 1389 | k: int = lp.length * get_gen_bytes_per_poly( 1390 | secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, num_coefs=num_coefs, bti=bti, btd=btd 1391 | ) 1392 | val: str = binary_digest(msg=msg, num_bytes=k, salt=salt) 1393 | return decode2polynomialvector( 1394 | secpar=secpar, lp=lp, distribution=distribution, dist_pars=dist_pars, val=val, 1395 | num_coefs=num_coefs, bti=bti, btd=btd, const_time_flag=const_time_flag 1396 | ) 1397 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: lattice-algebra 2 | site_description: 3 | site_url: https://geometry-labs.github.io/lattice-algebra 4 | theme: 5 | name: material 6 | palette: 7 | - scheme: default 8 | primary: deep purple 9 | accent: amber 10 | toggle: 11 | icon: material/lightbulb 12 | name: Switch to dark mode 13 | - scheme: slate 14 | primary: deep purple 15 | accent: amber 16 | toggle: 17 | icon: material/lightbulb-outline 18 | name: Switch to light mode 19 | features: 20 | - search.suggest 21 | - search.highlight 22 | - content.tabs.link 23 | icon: 24 | repo: fontawesome/brands/github-alt 25 | language: en 26 | repo_name: geometry-labs/lattice-algebra 27 | repo_url: https://github.com/geometry-labs/lattice-algebra 28 | edit_uri: '' 29 | plugins: 30 | - search 31 | 32 | nav: 33 | - Index: index.md 34 | - Introduction: introduction.md 35 | - Cryptography: cryptography.md 36 | - Under The Hood: under_the_hood.md 37 | - Installation: installation.md 38 | - Intended Usage: intended_usage.md 39 | 40 | markdown_extensions: 41 | - toc: 42 | permalink: true 43 | - markdown.extensions.codehilite: 44 | guess_lang: false 45 | - admonition 46 | - codehilite 47 | - extra 48 | - pymdownx.superfences: 49 | custom_fences: 50 | - name: mermaid 51 | class: mermaid 52 | format: !!python/name:pymdownx.superfences.fence_code_format '' 53 | - pymdownx.tabbed: 54 | alternate_style: true 55 | - mdx_include 56 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-cov 3 | pytest-mock -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """lattice-algebra distutils configuration.""" 3 | import os 4 | import codecs 5 | 6 | from setuptools import setup, find_packages 7 | 8 | 9 | with open("README.md", encoding="utf-8") as readme_file: 10 | readme = readme_file.read() 11 | 12 | 13 | def _read(rel_path): 14 | here = os.path.abspath(os.path.dirname(__file__)) 15 | with codecs.open(os.path.join(here, rel_path), "r") as fp: 16 | return fp.read() 17 | 18 | 19 | def get_version(rel_path): 20 | """Get the version from the __version__ file in the lattice_algebra dir.""" 21 | for line in _read(rel_path).splitlines(): 22 | if line.startswith("__version__"): 23 | delim = '"' if '"' in line else "'" 24 | return line.split(delim)[1] 25 | else: 26 | raise RuntimeError("Unable to find version string.") 27 | 28 | 29 | setup( 30 | name="lattice_algebra", 31 | version=get_version(os.path.join("lattice_algebra", "__init__.py")), 32 | description=( 33 | "Algebraic infrastructure for cryptographic schemes in lattice settings." 34 | ), 35 | long_description=readme, 36 | long_description_content_type="text/markdown", 37 | author="Geometry Labs", 38 | author_email="info@geometrylabs.io", 39 | url="https://github.com/geometry-labs/lattice-algebra", 40 | packages=find_packages(exclude=["tests*", "docs*", ".github*"]), 41 | package_dir={"lattice_algebra": "lattice_algebra"}, 42 | include_package_data=True, 43 | python_requires=">=3.6", 44 | license="MIT", 45 | zip_safe=False, 46 | classifiers=[ 47 | "Development Status :: 5 - Production/Stable", 48 | "Environment :: Console", 49 | "Intended Audience :: Developers", 50 | "Natural Language :: English", 51 | "License :: OSI Approved :: BSD License", 52 | "Programming Language :: Python :: 3 :: Only", 53 | "Programming Language :: Python :: 3", 54 | "Programming Language :: Python :: 3.6", 55 | "Programming Language :: Python :: 3.7", 56 | "Programming Language :: Python :: 3.8", 57 | "Programming Language :: Python :: 3.9", 58 | "Programming Language :: Python :: 3.10", 59 | "Programming Language :: Python :: Implementation :: CPython", 60 | "Programming Language :: Python :: Implementation :: PyPy", 61 | "Programming Language :: Python", 62 | "Topic :: Software Development", 63 | ], 64 | keywords=[ 65 | "lattice", 66 | "cryptography", 67 | "crypto", 68 | ], 69 | ) 70 | -------------------------------------------------------------------------------- /tests/test_lattices.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests the lattices package. 3 | 4 | Todo list: 5 | 1. Test to_bits and to_bytes for both Polynomial and PolynomialVector 6 | 2. Test hash functions 7 | """ 8 | import pytest 9 | from lattice_algebra.main import * 10 | from copy import deepcopy 11 | from secrets import randbelow, randbits 12 | from typing import List, Tuple, Dict 13 | 14 | sample_size_for_random_tests: int = 2 ** 3 15 | 16 | small_q_for_testing: int = 17 17 | small_non_prime_for_testing: int = 16 18 | small_d_for_testing: int = 8 19 | small_non_ntt_prime_for_testing: int = 11 20 | allowed_primitive_roots_of_unity_for_small_q_and_small_d: List[int] = [3, 5, 6, 7, 10, 11, 12, 14] 21 | small_halfmod_for_testing: int = small_q_for_testing // 2 22 | small_logmod_for_testing: int = ceil(log2(small_q_for_testing)) 23 | small_n_for_testing: int = 2 * small_d_for_testing 24 | small_lgn_for_testing: int = ceil(log2(small_n_for_testing)) 25 | 26 | modulus_for_testing: int = 40961 27 | degree_for_testing: int = 4096 28 | length_for_testing: int = 3 29 | norm_for_testing: int = 1 30 | weight_for_testing: int = 2 31 | halfmod_for_testing: int = modulus_for_testing // 2 32 | logmod_for_testing: int = ceil(log2(modulus_for_testing)) 33 | n_for_testing: int = 2 * degree_for_testing 34 | lgn_for_testing: int = ceil(log2(n_for_testing)) 35 | 36 | pars_for_testing: dict = { 37 | 'modulus': modulus_for_testing, 'degree': degree_for_testing, 'length': length_for_testing, 38 | 'halfmod': halfmod_for_testing, 'logmod': logmod_for_testing, 'n': n_for_testing, 39 | 'lgn': lgn_for_testing 40 | } 41 | lp_for_testing = LatticeParameters( 42 | degree=pars_for_testing['degree'], length=pars_for_testing['length'], modulus=pars_for_testing['modulus'] 43 | ) 44 | secpar4testing = 8 45 | 46 | IS_PRIME_CASES = [ 47 | (17, True), 48 | (8675309, True), 49 | (16, False), 50 | (small_q_for_testing, True), 51 | (small_q_for_testing + 1, False), 52 | (modulus_for_testing, True), 53 | (modulus_for_testing - 1, False) 54 | ] 55 | 56 | 57 | # @pytest.mark.skip 58 | @pytest.mark.parametrize("q,expected_output", IS_PRIME_CASES) 59 | def test_is_prime(q, expected_output): 60 | assert is_prime(val=q) == expected_output 61 | 62 | 63 | IS_POW_TWO_CASES = [ 64 | (2, True), 65 | (4, True), 66 | (8, True), 67 | (16, True), 68 | (3, False), 69 | (5, False), 70 | (9, False), 71 | (17, False) 72 | ] 73 | 74 | 75 | @pytest.mark.parametrize("d,expected_output", IS_POW_TWO_CASES) 76 | def test_is_pow_two(d, expected_output): 77 | assert is_pow_two(val=d) == expected_output 78 | 79 | 80 | HAS_PRIM_ROU_CASES = [ 81 | (17, 8, True), 82 | (18, 8, False), 83 | (33, 8, True), 84 | (34, 8, False), 85 | (257, 64, True), # SWIFFT parameters 86 | (258, 64, False), 87 | (8380417, 256, True), # CRYSTALS-Dilithium 88 | (8380418, 256, False), 89 | (201, 25, True), # We don't need the primality of q or power-of-two d 90 | ] 91 | 92 | 93 | @pytest.mark.parametrize("q,d,expected_output", HAS_PRIM_ROU_CASES) 94 | def test_has_prim_rou(q, d, expected_output): 95 | assert has_prim_rou(modulus=q, degree=d) == expected_output 96 | 97 | 98 | NTT_FRIENDLY_CASES = [ 99 | (True, True, True, True), 100 | (True, True, False, False), 101 | (True, False, True, False), 102 | (False, True, True, False), 103 | (True, False, False, False), 104 | (False, True, False, False), 105 | (False, False, True, False), 106 | (False, False, False, False), 107 | ] 108 | 109 | 110 | @pytest.mark.parametrize("foo,bar,baz,expected_output", NTT_FRIENDLY_CASES) 111 | def test_is_ntt_friendly_prime_with_mock(mocker, foo, bar, baz, expected_output): 112 | mocker.patch('lattice_algebra.main.is_prime', return_value=foo) 113 | mocker.patch('lattice_algebra.main.is_pow_two', return_value=bar) 114 | mocker.patch('lattice_algebra.main.has_prim_rou', return_value=baz) 115 | assert is_ntt_friendly_prime(modulus=1, degree=1) == expected_output # the actual input doesn't matter 116 | 117 | 118 | IS_PRIM_ROU_CASES = [ 119 | (17, 8, 3, True), 120 | (17, 8, 4, False), 121 | (17, 8, 5, True), 122 | (17, 8, 6, True), 123 | (17, 8, 7, True), 124 | (17, 8, 8, False), 125 | (17, 8, 9, False), 126 | (17, 8, 10, True), 127 | (17, 8, 11, True), 128 | (17, 8, 12, True), 129 | (17, 8, 13, False), 130 | (17, 8, 14, True), 131 | (17, 8, 15, False), 132 | (17, 8, 16, False), 133 | (8380417, 256, 1753, True), # CRYSTALS-Dilithium 134 | (8380417, 257, 1753, False), 135 | (8380418, 256, 1753, False), 136 | (257, 64, 42, True), # SWIFFT 137 | (257, 65, 42, False), 138 | (258, 64, 42, False), 139 | ] 140 | 141 | 142 | @pytest.mark.parametrize("q,d,i,expected_output", IS_PRIM_ROU_CASES) 143 | def test_is_prim_rou(q, d, i, expected_output): 144 | assert is_prim_rou(modulus=q, degree=d, val=i) == expected_output 145 | 146 | 147 | def test_get_prim_rou_and_rou_inv_value_errors(): 148 | with pytest.raises(ValueError): 149 | get_prim_rou_and_rou_inv(modulus=small_non_prime_for_testing, degree=small_d_for_testing) 150 | with pytest.raises(ValueError): 151 | get_prim_rou_and_rou_inv(modulus=small_non_ntt_prime_for_testing, degree=small_d_for_testing) 152 | 153 | 154 | GET_PRIM_ROU_AND_ROU_INV_CASES = [ 155 | (257, 64, (9, 200)), 156 | (8380417, 256, (1753, 731434)), 157 | (12289, 2, (1479, 10810)), 158 | (12289, 4, (4043, 5146)), 159 | (12289, 8, (722, 6553)), 160 | (12289, 16, (1212, 2545)), 161 | (12289, 32, (563, 5828)), 162 | (12289, 64, (81, 11227)), 163 | (12289, 128, (9, 2731)), 164 | (12289, 256, (3, 8193)), 165 | (12289, 512, (49, 1254)), 166 | (12289, 1024, (7, 8778)), 167 | (12289, 2048, (41, 4496)), 168 | ] 169 | 170 | 171 | @pytest.mark.parametrize("q,d,expected_output", GET_PRIM_ROU_AND_ROU_INV_CASES) 172 | def test_get_prim_rou_and_rou_inv(q, d, expected_output): 173 | x, y = get_prim_rou_and_rou_inv(modulus=q, degree=d) 174 | assert x, y == expected_output 175 | assert x != 0 176 | assert y != 0 177 | assert (x * y) % q == 1 178 | assert all(x ** k % q != 0 for k in range(2, 2 * d)) 179 | 180 | 181 | IS_BITSTRING_CASES = [ 182 | ('0101100101', True), 183 | ('1010011010', True), 184 | (8675309, False), 185 | ('hello world', False) 186 | ] 187 | 188 | 189 | @pytest.mark.parametrize("x,expected_output", IS_BITSTRING_CASES) 190 | def test_is_bitstring(x, expected_output): 191 | assert is_bitstring(val=x) == expected_output 192 | 193 | 194 | BIT_REV_CASES = [ 195 | (2, 0, 0), 196 | (2, 1, 2), 197 | (2, 2, 1), 198 | (2, 3, 3), 199 | (3, 0, 0), 200 | (3, 1, 4), 201 | (3, 2, 2), 202 | (3, 3, 6), 203 | (3, 4, 1), 204 | (3, 5, 5), 205 | (3, 6, 3), 206 | (3, 7, 7), 207 | (4, 0, 0), 208 | (4, 1, 8), 209 | (4, 2, 4), 210 | (4, 3, 12), 211 | (4, 4, 2), 212 | (4, 5, 10), 213 | (4, 6, 6), 214 | (4, 7, 14), 215 | (4, 8, 1), 216 | (4, 9, 9), 217 | (4, 10, 5), 218 | (4, 11, 13), 219 | (4, 12, 3), 220 | (4, 13, 11), 221 | (4, 14, 7), 222 | (4, 15, 15), 223 | ] 224 | 225 | 226 | @pytest.mark.parametrize("n,val,expected_output", BIT_REV_CASES) 227 | def test_bit_rev(n, val, expected_output): 228 | assert bit_rev(num_bits=n, val=val) == expected_output 229 | 230 | 231 | BIT_REV_CP_CASES = [ 232 | ([0, 1], [0, 1]), 233 | ([0, 1, 2, 3], [0, 2, 1, 3]), 234 | ([0, 1, 2, 3, 4, 5, 6, 7], [0, 4, 2, 6, 1, 5, 3, 7]), 235 | ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]) 236 | ] 237 | 238 | 239 | @pytest.mark.parametrize("x,expected_output", BIT_REV_CP_CASES) 240 | def test_bit_rev_cp(x, expected_output): 241 | assert bit_rev_cp(x, ceil(log2(len(x)))) == expected_output 242 | 243 | 244 | CENT_CASES = [ 245 | (17, 8, 5, 0, 0), # (q: int, halfmod: int, logmod: int, val: int) 246 | (17, 8, 5, 1, 1), 247 | (17, 8, 5, 2, 2), 248 | (17, 8, 5, 3, 3), 249 | (17, 8, 5, 4, 4), 250 | (17, 8, 5, 5, 5), 251 | (17, 8, 5, 6, 6), 252 | (17, 8, 5, 7, 7), 253 | (17, 8, 5, 8, 8), 254 | (17, 8, 5, 9, -8), 255 | (17, 8, 5, 10, -7), 256 | (17, 8, 5, 11, -6), 257 | (17, 8, 5, 12, -5), 258 | (17, 8, 5, 13, -4), 259 | (17, 8, 5, 14, -3), 260 | (17, 8, 5, 15, -2), 261 | (17, 8, 5, 16, -1), 262 | ] 263 | 264 | 265 | @pytest.mark.parametrize("q,halfmod,logmod,val,expected_output", CENT_CASES) 266 | def test_cent(q, halfmod, logmod, val, expected_output): 267 | assert cent(q=q, halfmod=halfmod, logmod=logmod, val=val) == expected_output 268 | 269 | 270 | ZETAS_AND_INVS_CASES = [ 271 | (17, 8, 8, 5, 16, 4, ([-1, -4, -8, 3], [-1, 4, 2, 6])) 272 | ] 273 | 274 | 275 | @pytest.mark.parametrize("q,d,halfmod,logmod,n,lgn,expected_output", ZETAS_AND_INVS_CASES) 276 | def test_make_zetas_and_invs(q, d, halfmod, logmod, n, lgn, expected_output): 277 | assert make_zetas_and_invs(q=q, d=d, n=n, lgn=lgn) == expected_output 278 | 279 | 280 | ZETAS = [ 281 | (17, 8, 8, 5, 16, 4, 3, 6, ([-1, -4, -8, 3], [-1, 4, 2, 6])), # (q, d, halfmod, logmod, n, lgn, expected_output) 282 | (17, 8, 8, 5, 16, 4, 1, 1, ([1, 1, 1, 1], [1, 1, 1, 1])), 283 | (17, 8, 8, 5, 16, 4, 1, 2, ([1, 1, 1, 1], [1, -1, 4, 2])), 284 | (17, 8, 8, 5, 16, 4, 1, 3, ([1, 1, 1, 1], [-1, -4, -8, 3])), 285 | ] 286 | 287 | 288 | @pytest.mark.parametrize("q,d,halfmod,logmod,n,lgn,rou,rou_inv,expected_output", ZETAS) 289 | def test_make_zetas(mocker, q, d, halfmod, logmod, n, lgn, rou, rou_inv, expected_output): 290 | mocker.patch('lattice_algebra.main.get_prim_rou_and_rou_inv', return_value=(rou, rou_inv)) 291 | assert make_zetas_and_invs(q=q, d=d, n=n, lgn=lgn) == expected_output 292 | 293 | 294 | # We only test the NTT and inverse of constant polynomials here; more thorough tests are advisable 295 | NTT_CASES = [ 296 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [1] + [0] * 15, [1] * 16), 297 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [2] + [0] * 15, [2] * 16), 298 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [3] + [0] * 15, [3] * 16), 299 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [4] + [0] * 15, [4] * 16), 300 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [5] + [0] * 15, [5] * 16), 301 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [6] + [0] * 15, [6] * 16), 302 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [7] + [0] * 15, [7] * 16), 303 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [8] + [0] * 15, [8] * 16), 304 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-8] + [0] * 15, [-8] * 16), 305 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-7] + [0] * 15, [-7] * 16), 306 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-6] + [0] * 15, [-6] * 16), 307 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-5] + [0] * 15, [-5] * 16), 308 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-4] + [0] * 15, [-4] * 16), 309 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-3] + [0] * 15, [-3] * 16), 310 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-2] + [0] * 15, [-2] * 16), 311 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], False, 8, 5, 16, 4, [-1] + [0] * 15, [-1] * 16), 312 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [1] * 16, [1] + [0] * 15), 313 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [2] * 16, [2] + [0] * 15), 314 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [3] * 16, [3] + [0] * 15), 315 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [4] * 16, [4] + [0] * 15), 316 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [5] * 16, [5] + [0] * 15), 317 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [6] * 16, [6] + [0] * 15), 318 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [7] * 16, [7] + [0] * 15), 319 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [8] * 16, [8] + [0] * 15), 320 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-8] * 16, [-8] + [0] * 15), 321 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-7] * 16, [-7] + [0] * 15), 322 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-6] * 16, [-6] + [0] * 15), 323 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-5] * 16, [-5] + [0] * 15), 324 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-4] * 16, [-4] + [0] * 15), 325 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-3] * 16, [-3] + [0] * 15), 326 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-2] * 16, [-2] + [0] * 15), 327 | (17, [-1, -4, -8, 3], [-1, 4, 2, 6], True, 8, 5, 16, 4, [-1] * 16, [-1] + [0] * 15), 328 | ] 329 | 330 | 331 | @pytest.mark.parametrize("q,zetas,zetas_inv,inv_flag,halfmod,logmod,n,lgn,val,expected_output", NTT_CASES) 332 | def test_ntt(q, zetas, zetas_inv, inv_flag, halfmod, logmod, n, lgn, val, expected_output): 333 | assert expected_output == ntt( 334 | q=q, zetas=zetas, zetas_inv=zetas_inv, inv_flag=inv_flag, halfmod=halfmod, logmod=logmod, n=n, lgn=lgn, val=val 335 | ) 336 | 337 | 338 | small_bd_for_testing: int = 2 339 | small_wt_for_testing: int = 2 340 | small_dist_pars: Dict[str, int] = {'bd': small_bd_for_testing, 'wt': small_wt_for_testing} 341 | bits_to_decode_for_testing: int = ceil(log2(small_bd_for_testing)) + 1 + secpar4testing 342 | bits_to_indices_for_testing: int = ceil(log2(degree_for_testing)) 343 | bits_to_indices_for_testing += (small_wt_for_testing - 1) * (ceil(log2(degree_for_testing)) + secpar4testing) 344 | 345 | DECODE2COEF_CASES = [ 346 | ( 347 | secpar4testing, 348 | UNIFORM_INFINITY_WEIGHT, 349 | lp_for_testing, 350 | small_dist_pars, 351 | '0' + bin(i)[2:].zfill(bits_to_decode_for_testing - 1), 352 | - (i % small_bd_for_testing) - 1 353 | ) for i in range(2 ** 7) 354 | ] + [ 355 | ( 356 | secpar4testing, 357 | UNIFORM_INFINITY_WEIGHT, 358 | lp_for_testing, 359 | small_dist_pars, 360 | '1' + bin(i)[2:].zfill(bits_to_decode_for_testing - 1), 361 | (i % small_bd_for_testing) + 1 362 | ) for i in range(2 ** 7) 363 | ] 364 | 365 | 366 | @pytest.mark.parametrize("secpar, dist, lp, dist_pars, val, expected_output", DECODE2COEF_CASES) 367 | def test_decode2coef(secpar, dist, lp, dist_pars, val, expected_output): 368 | k = ceil(log2(dist_pars['bd'])) + 1 + secpar 369 | assert expected_output == decode2coef( 370 | secpar=secpar, lp=lp, distribution=dist, dist_pars=dist_pars, val=val, btd=k 371 | ) 372 | 373 | 374 | DECODE2COEFS_CASES = [ 375 | ( 376 | secpar4testing, 377 | UNIFORM_INFINITY_WEIGHT, 378 | lp_for_testing, 379 | small_dist_pars, 380 | bin(randbits(2 * bits_to_decode_for_testing * small_dist_pars['wt'])), 381 | [(1 + (i % (lp_for_testing.modulus // 2))) * sign_i, (1 + (j % (lp_for_testing.modulus // 2))) * sign_j], 382 | [(1 + (i % (lp_for_testing.modulus // 2))) * sign_i, (1 + (j % (lp_for_testing.modulus // 2))) * sign_j], 383 | ) for i in range(2 ** 7) for j in range(2 ** 3) for sign_i in [-1, 1] for sign_j in [-1, 1] 384 | ] 385 | 386 | 387 | @pytest.mark.parametrize("secpar,dist,lp,dist_pars,val,responses,expected_output", DECODE2COEFS_CASES) 388 | def test_decode2coefs(mocker, secpar, dist, lp, dist_pars, val, responses, expected_output): 389 | assert responses == expected_output 390 | 391 | mocker.patch('lattice_algebra.main.decode2coef', side_effect=responses) 392 | assert dist == UNIFORM_INFINITY_WEIGHT 393 | k = ceil(log2(dist_pars['bd'])) + 1 + secpar 394 | observed_output = decode2coefs( 395 | secpar=secpar, lp=lp, distribution=dist, dist_pars=dist_pars, val=val, 396 | num_coefs=dist_pars['wt'], btd=k 397 | ) 398 | assert observed_output == expected_output 399 | 400 | 401 | def int2bin(x: int, n: int): 402 | return bin(x)[2:].zfill(n) 403 | 404 | 405 | def logdeg(d: int): 406 | return ceil(log2(d)) 407 | 408 | 409 | DECODE2INDICES_CASES = [ 410 | ( 411 | secpar4testing, 412 | UNIFORM_INFINITY_WEIGHT, 413 | lp_for_testing, 414 | small_dist_pars, 415 | int2bin(x=a, n=logdeg(lp_for_testing.degree)) + int2bin(x=b, n=logdeg(lp_for_testing.degree) + secpar4testing), 416 | small_wt_for_testing, 417 | [a, b] if b < a else [a, b + 1], 418 | ) for a in range(2 ** 5) for b in range(2 ** 5) 419 | ] 420 | 421 | 422 | @pytest.mark.parametrize("secpar,dist,lp,dist_pars,val,num_coefs,expected_output", DECODE2INDICES_CASES) 423 | def test_decode2indices(secpar, dist, lp, dist_pars, val, num_coefs, expected_output): 424 | observed_output = decode2indices(secpar=secpar, lp=lp, num_coefs=dist_pars['wt'], val=val, bti=bits_to_indices_for_testing) 425 | assert observed_output == expected_output 426 | 427 | 428 | DECODE2POLYCOEFS_CASES = [ 429 | ( 430 | secpar4testing, 431 | lp_for_testing, 432 | UNIFORM_INFINITY_WEIGHT, 433 | {'wt': small_wt_for_testing, 'bd': small_bd_for_testing}, 434 | bin(randbits(8*get_gen_bytes_per_poly( 435 | secpar=secpar4testing, 436 | lp=lp_for_testing, 437 | distribution=UNIFORM_INFINITY_WEIGHT, 438 | dist_pars={'wt': small_wt_for_testing, 'bd': small_bd_for_testing}, 439 | num_coefs=small_wt_for_testing, 440 | bti=bits_to_indices_for_testing, 441 | btd=bits_to_decode_for_testing)))[2:].zfill(8*get_gen_bytes_per_poly( 442 | secpar=secpar4testing, 443 | lp=lp_for_testing, 444 | distribution=UNIFORM_INFINITY_WEIGHT, 445 | dist_pars={'wt': small_wt_for_testing, 'bd': small_bd_for_testing}, 446 | num_coefs=small_wt_for_testing, 447 | bti=bits_to_indices_for_testing, 448 | btd=bits_to_decode_for_testing)), 449 | small_wt_for_testing, 450 | bits_to_indices_for_testing, 451 | bits_to_decode_for_testing, 452 | [(i + j) % lp_for_testing.degree for j in range(small_wt_for_testing)], 453 | [2 ** j % lp_for_testing.modulus for j in range(small_wt_for_testing)], 454 | {(i + j) % lp_for_testing.degree: 2 ** j % lp_for_testing.modulus for j in range(small_wt_for_testing)} 455 | ) 456 | for i in range(2 ** 10) 457 | ] 458 | 459 | 460 | @pytest.mark.parametrize("secpar,lp,dist,dist_pars,val,num_coefs,bits_to_indices,bits_to_decode,expected_indices,expected_coefs,expected_output", DECODE2POLYCOEFS_CASES) 461 | def test_decode2polycoefs( 462 | mocker, secpar, lp, dist, dist_pars, val, num_coefs, bits_to_indices, bits_to_decode, 463 | expected_indices, expected_coefs, expected_output 464 | ): 465 | mocker.patch("lattice_algebra.main.decode2indices", return_value=expected_indices) 466 | mocker.patch("lattice_algebra.main.decode2coefs", return_value=expected_coefs) 467 | assert expected_output == decode2polycoefs(secpar=secpar, lp=lp, distribution=dist, dist_pars=dist_pars, val=val, 468 | num_coefs=num_coefs, bti=bits_to_indices, 469 | btd=bits_to_decode) 470 | 471 | 472 | exp_bits_per_poly_for_testing: int = small_dist_pars['wt'] 473 | exp_bits_per_poly_for_testing *= bits_to_decode(secpar=secpar4testing, bd=small_dist_pars['bd']) 474 | exp_bits_per_poly_for_testing += bits_to_indices( 475 | secpar=secpar4testing, degree=lp_for_testing.degree, wt=small_dist_pars['wt'] 476 | ) 477 | exp_bytes_per_poly_for_testing = ceil(exp_bits_per_poly_for_testing / 8) 478 | GET_GEN_BYTES_PER_POLY_CASES = [ 479 | ( 480 | secpar4testing, 481 | lp_for_testing, 482 | UNIFORM_INFINITY_WEIGHT, 483 | small_dist_pars, 484 | small_wt_for_testing, 485 | bits_to_indices_for_testing, 486 | bits_to_decode_for_testing, 487 | exp_bytes_per_poly_for_testing 488 | ) 489 | ] 490 | 491 | 492 | @pytest.mark.parametrize( 493 | "secpar,lp,dist,dist_pars,num_coefs, bits_to_indices, bits_to_decode,expected_output", 494 | GET_GEN_BYTES_PER_POLY_CASES 495 | ) 496 | def test_get_gen_bits(secpar, lp, dist, dist_pars, num_coefs, bits_to_indices, bits_to_decode, expected_output): 497 | observed_output = get_gen_bytes_per_poly( 498 | secpar=secpar, lp=lp, distribution=dist, dist_pars=dist_pars, num_coefs=num_coefs, 499 | bti=bits_to_indices, btd=bits_to_decode 500 | ) 501 | assert observed_output == expected_output 502 | 503 | 504 | # TODO: Pick up parameterizing from here. 505 | @pytest.fixture 506 | def one_with_const_time() -> Polynomial: 507 | return Polynomial(lp=lp_for_testing, coefs={0: 1}) 508 | 509 | 510 | @pytest.fixture 511 | def one_without_const_time() -> Polynomial: 512 | return Polynomial(lp=lp_for_testing, coefs={0: 1}, const_time_flag=False) 513 | 514 | 515 | @pytest.fixture 516 | def some_ran_lin_polys_with_const_time() -> List[Tuple[int, int, Polynomial]]: 517 | result = [] 518 | for i in range(2 * sample_size_for_random_tests): 519 | a = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 520 | b = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 521 | result += [(a, b, Polynomial(lp=lp_for_testing, coefs={0: a, 1: b}))] 522 | return result 523 | 524 | 525 | @pytest.fixture 526 | def some_ran_lin_polys_without_const_time() -> List[Tuple[int, int, Polynomial]]: 527 | result = [] 528 | for i in range(2 * sample_size_for_random_tests): 529 | a = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 530 | b = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 531 | result += [(a, b, Polynomial(lp=lp_for_testing, coefs={0: a, 1: b}, const_time_flag=False))] 532 | return result 533 | 534 | 535 | # @pytest.mark.skip 536 | def test_polynomial_init_with_const_time(one_with_const_time, some_ran_lin_polys_with_const_time): 537 | lp: LatticeParameters = lp_for_testing 538 | 539 | # First, let's mess with the identity polynomial 540 | assert one_with_const_time.ntt_representation == [1 for _ in range(n_for_testing)] 541 | 542 | # Now let's make some random linear polynomials 543 | for next_tuple in some_ran_lin_polys_with_const_time: 544 | a, b, f = next_tuple 545 | assert -lp.halfmod <= a <= lp.halfmod 546 | assert -lp.halfmod <= b <= lp.halfmod 547 | assert isinstance(f, Polynomial) 548 | assert all((j - (a + b * lp.rou ** k)) % lp.modulus == 0 for k, j in zip(range(lp.n), f.ntt_representation)) 549 | 550 | 551 | # @pytest.mark.skip 552 | def test_polynomial_init_without_const_time(one_without_const_time, some_ran_lin_polys_without_const_time): 553 | lp: LatticeParameters = lp_for_testing 554 | 555 | # First, let's mess with the identity polynomial 556 | assert one_without_const_time.ntt_representation == [1 for _ in range(n_for_testing)] 557 | 558 | # Now let's make some random linear polynomials 559 | for next_tuple in some_ran_lin_polys_without_const_time: 560 | a, b, f = next_tuple 561 | assert -lp.halfmod <= a <= lp.halfmod 562 | assert -lp.halfmod <= b <= lp.halfmod 563 | assert isinstance(f, Polynomial) 564 | assert all((j - (a + b * lp.rou ** k)) % lp.modulus == 0 for k, j in zip(range(lp.n), f.ntt_representation)) 565 | 566 | 567 | # @pytest.mark.skip 568 | def test_polynomial_eq_with_const_time(one_with_const_time, some_ran_lin_polys_with_const_time): 569 | lp: LatticeParameters = lp_for_testing 570 | 571 | # First, let's make two identity polynomials and check they are equal. 572 | another_one = Polynomial(lp=lp, coefs={0: 1}) 573 | assert one_with_const_time == another_one 574 | 575 | # Now check that if we change a single coefficient, the result changes 576 | not_one = Polynomial(lp=lp, coefs={0: -1}) 577 | assert one_with_const_time != not_one 578 | 579 | # Now let's do the same with some random linear polynomials 580 | for next_tuple in some_ran_lin_polys_with_const_time: 581 | a, b, next_poly = next_tuple 582 | another_poly = Polynomial(lp=lp, coefs={0: a, 1: b}) 583 | assert next_poly == another_poly 584 | 585 | # Now check that if we change a single coefficient, the result changes 586 | not_another_poly = Polynomial(lp=lp, coefs={0: a, 1: -b}) 587 | assert next_poly != not_another_poly 588 | 589 | 590 | # @pytest.mark.skip 591 | def test_polynomial_eq_without_const_time(one_without_const_time, some_ran_lin_polys_without_const_time): 592 | lp: LatticeParameters = lp_for_testing 593 | 594 | # First, let's make two identity polynomials and check they are equal. 595 | another_one = Polynomial(lp=lp, coefs={0: 1}, const_time_flag=False) 596 | assert one_without_const_time == another_one 597 | 598 | # Now check that if we change a single coefficient, the result changes 599 | not_one = Polynomial(lp=lp, coefs={0: -1}, const_time_flag=False) 600 | assert one_without_const_time != not_one 601 | 602 | # Now let's do the same with some random linear polynomials 603 | for next_tuple in some_ran_lin_polys_without_const_time: 604 | a, b, next_poly = next_tuple 605 | another_poly = Polynomial(lp=lp, coefs={0: a, 1: b}, const_time_flag=False) 606 | assert next_poly == another_poly 607 | 608 | # Now check that if we change a single coefficient, the result changes 609 | not_another_poly = Polynomial(lp=lp, coefs={0: a, 1: -b}, const_time_flag=False) 610 | assert next_poly != not_another_poly 611 | 612 | 613 | @pytest.fixture 614 | def two_with_const_time() -> Polynomial: 615 | return Polynomial(lp=lp_for_testing, coefs={0: 2}) 616 | 617 | 618 | @pytest.fixture 619 | def two_without_const_time() -> Polynomial: 620 | return Polynomial(lp=lp_for_testing, coefs={0: 2}, const_time_flag=False) 621 | 622 | 623 | @pytest.fixture 624 | def pairs_ran_lin_poly_with_const_time(some_ran_lin_polys_with_const_time) -> List[ 625 | Tuple[Tuple[int, int, Polynomial], Tuple[int, int, Polynomial]]]: 626 | result = [] 627 | for i in range(0, sample_size_for_random_tests, 2): 628 | result += [(some_ran_lin_polys_with_const_time[i], some_ran_lin_polys_with_const_time[i + 1])] 629 | return result 630 | 631 | 632 | @pytest.fixture 633 | def pairs_ran_lin_poly_without_const_time(some_ran_lin_polys_without_const_time) -> List[ 634 | Tuple[Tuple[int, int, Polynomial], Tuple[int, int, Polynomial]]]: 635 | result = [] 636 | for i in range(0, sample_size_for_random_tests, 2): 637 | result += [(some_ran_lin_polys_without_const_time[i], some_ran_lin_polys_without_const_time[i + 1])] 638 | return result 639 | 640 | 641 | @pytest.fixture 642 | def pairs_of_random_polys_and_their_sums_with_const_time(pairs_ran_lin_poly_with_const_time) -> List[ 643 | Tuple[ 644 | Tuple[int, int, Polynomial], 645 | Tuple[int, int, Polynomial], 646 | Tuple[int, int, Polynomial, int, int], 647 | Tuple[dict, Polynomial, int, int] 648 | ] 649 | ]: 650 | result = [] 651 | for next_pair in pairs_ran_lin_poly_with_const_time: 652 | next_f, next_g = next_pair 653 | a_f, b_f, f = next_f 654 | a_g, b_g, g = next_g 655 | observed_h = f + g 656 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h.get_coef_rep() 657 | a_h = (a_f + a_g) % lp_for_testing.modulus 658 | if a_h > lp_for_testing.modulus // 2: 659 | a_h -= lp_for_testing.modulus 660 | b_h = (b_f + b_g) % lp_for_testing.modulus 661 | if b_h > lp_for_testing.modulus // 2: 662 | b_h -= lp_for_testing.modulus 663 | expected_h_coefs = {} 664 | if a_h != 0: 665 | expected_h_coefs[0] = a_h 666 | if b_h != 0: 667 | expected_h_coefs[1] = b_h 668 | expected_h = Polynomial(lp=lp_for_testing, coefs=expected_h_coefs) 669 | expected_h_norm = max(abs(a_h), abs(b_h)) 670 | expected_h_wt = len(expected_h_coefs) 671 | result += [ 672 | ( 673 | next_f, 674 | next_g, 675 | (a_h, b_h, expected_h, expected_h_norm, expected_h_wt), 676 | (obs_h_coefs, observed_h, obs_h_norm, obs_h_wt) 677 | ) 678 | ] 679 | return result 680 | 681 | 682 | @pytest.fixture 683 | def pairs_of_random_polys_and_their_sums_without_const_time(pairs_ran_lin_poly_without_const_time) -> List[ 684 | Tuple[ 685 | Tuple[int, int, Polynomial], 686 | Tuple[int, int, Polynomial], 687 | Tuple[int, int, Polynomial, int, int], 688 | Tuple[dict, Polynomial, int, int] 689 | ] 690 | ]: 691 | result = [] 692 | for next_pair in pairs_ran_lin_poly_without_const_time: 693 | next_f, next_g = next_pair 694 | a_f, b_f, f = next_f 695 | a_g, b_g, g = next_g 696 | observed_h = f + g 697 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h.get_coef_rep() 698 | a_h = (a_f + a_g) % lp_for_testing.modulus 699 | if a_h > lp_for_testing.modulus // 2: 700 | a_h -= lp_for_testing.modulus 701 | b_h = (b_f + b_g) % lp_for_testing.modulus 702 | if b_h > lp_for_testing.modulus // 2: 703 | b_h -= lp_for_testing.modulus 704 | expected_h_coefs = {} 705 | if a_h != 0: 706 | expected_h_coefs[0] = a_h 707 | if b_h != 0: 708 | expected_h_coefs[1] = b_h 709 | expected_h = Polynomial(lp=lp_for_testing, coefs=expected_h_coefs) 710 | expected_h_norm = max(abs(a_h), abs(b_h)) 711 | expected_h_wt = len(expected_h_coefs) 712 | result += [ 713 | ( 714 | next_f, 715 | next_g, 716 | (a_h, b_h, expected_h, expected_h_norm, expected_h_wt), 717 | (obs_h_coefs, observed_h, obs_h_norm, obs_h_wt) 718 | ) 719 | ] 720 | return result 721 | 722 | 723 | # @pytest.mark.skip 724 | def test_polynomial_add_with_const_time(one_with_const_time, two_with_const_time, 725 | pairs_of_random_polys_and_their_sums_with_const_time): 726 | lp = lp_for_testing 727 | # First, let's make an identity polynomials and add it to itself 728 | assert one_with_const_time + one_with_const_time == two_with_const_time 729 | 730 | # Now let's do some addition with some random linear polynomials (AND the unity) 731 | for next_item in pairs_of_random_polys_and_their_sums_with_const_time: 732 | f_dat, g_dat, expected_h_dat, observed_h_dat = next_item 733 | a_f, b_f, f = f_dat 734 | a_g, b_g, g = g_dat 735 | a_h, b_h, exp_h, exp_h_norm, exp_h_wt = expected_h_dat 736 | obs_h_coefs, obs_h, obs_h_norm, obs_h_wt = observed_h_dat 737 | assert f + g == exp_h == obs_h 738 | assert len(obs_h_coefs) == 2 739 | assert 0 in obs_h_coefs 740 | assert 1 in obs_h_coefs 741 | assert (obs_h_coefs[0] - a_h) % lp.modulus == 0 742 | assert (obs_h_coefs[1] - b_h) % lp.modulus == 0 743 | assert (a_f + a_g - a_h) % lp.modulus == 0 744 | assert (b_f + b_g - b_h) % lp.modulus == 0 745 | 746 | 747 | # @pytest.mark.skip 748 | def test_polynomial_add_without_const_time(one_without_const_time, two_without_const_time, 749 | pairs_of_random_polys_and_their_sums_without_const_time): 750 | lp = lp_for_testing 751 | # First, let's make an identity polynomials and add it to itself 752 | assert one_without_const_time + one_without_const_time == two_without_const_time 753 | 754 | # Now let's do some addition with some random linear polynomials (AND the unity) 755 | for next_item in pairs_of_random_polys_and_their_sums_without_const_time: 756 | f_dat, g_dat, expected_h_dat, observed_h_dat = next_item 757 | a_f, b_f, f = f_dat 758 | a_g, b_g, g = g_dat 759 | a_h, b_h, exp_h, exp_h_norm, exp_h_wt = expected_h_dat 760 | obs_h_coefs, obs_h, obs_h_norm, obs_h_wt = observed_h_dat 761 | assert f + g == exp_h == obs_h 762 | assert len(obs_h_coefs) == 2 763 | assert 0 in obs_h_coefs 764 | assert 1 in obs_h_coefs 765 | assert (obs_h_coefs[0] - a_h) % lp.modulus == 0 766 | assert (obs_h_coefs[1] - b_h) % lp.modulus == 0 767 | assert (a_f + a_g - a_h) % lp.modulus == 0 768 | assert (b_f + b_g - b_h) % lp.modulus == 0 769 | 770 | 771 | @pytest.fixture 772 | def pairs_of_random_polys_and_their_diffs_with_const_time(pairs_ran_lin_poly_with_const_time) -> \ 773 | List[ 774 | Tuple[ 775 | Tuple[int, int, Polynomial], 776 | Tuple[int, int, Polynomial], 777 | Tuple[int, int, Polynomial, int, int], 778 | Tuple[Dict[int, int], Polynomial, int, int]] 779 | ]: 780 | result = [] 781 | for next_pair in pairs_ran_lin_poly_with_const_time: 782 | next_f, next_g = next_pair 783 | a_f, b_f, f = next_f 784 | a_g, b_g, g = next_g 785 | a_h = (a_f - a_g) % lp_for_testing.modulus 786 | b_h = (b_f - b_g) % lp_for_testing.modulus 787 | expected_h_coefs: dict = {} 788 | if a_h != 0: 789 | expected_h_coefs[0] = a_h 790 | if b_h != 0: 791 | expected_h_coefs[1] = b_h 792 | expected_h_norm: int = max(abs(expected_h_coefs[i]) for i in expected_h_coefs) 793 | expected_h_wt: int = len(expected_h_coefs) 794 | 795 | observed_h = f - g 796 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h.get_coef_rep() 797 | expected_h = Polynomial(lp=lp_for_testing, coefs=expected_h_coefs) 798 | result += [ 799 | ( 800 | next_f, 801 | next_g, 802 | (a_h, b_h, expected_h, expected_h_norm, expected_h_wt), 803 | (obs_h_coefs, observed_h, obs_h_norm, obs_h_wt) 804 | ) 805 | ] 806 | return result 807 | 808 | 809 | @pytest.fixture 810 | def pairs_of_random_polys_and_their_diffs_without_const_time(pairs_ran_lin_poly_without_const_time) -> \ 811 | List[ 812 | Tuple[ 813 | Tuple[int, int, Polynomial], 814 | Tuple[int, int, Polynomial], 815 | Tuple[int, int, Polynomial, int, int], 816 | Tuple[Dict[int, int], Polynomial, int, int]] 817 | ]: 818 | result = [] 819 | for next_pair in pairs_ran_lin_poly_without_const_time: 820 | next_f, next_g = next_pair 821 | a_f, b_f, f = next_f 822 | a_g, b_g, g = next_g 823 | a_h = (a_f - a_g) % lp_for_testing.modulus 824 | b_h = (b_f - b_g) % lp_for_testing.modulus 825 | expected_h_coefs: dict = {} 826 | if a_h != 0: 827 | expected_h_coefs[0] = a_h 828 | if b_h != 0: 829 | expected_h_coefs[1] = b_h 830 | expected_h_norm: int = max(abs(expected_h_coefs[i]) for i in expected_h_coefs) 831 | expected_h_wt: int = len(expected_h_coefs) 832 | 833 | observed_h = f - g 834 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h.get_coef_rep() 835 | expected_h = Polynomial(lp=lp_for_testing, coefs=expected_h_coefs) 836 | result += [ 837 | ( 838 | next_f, 839 | next_g, 840 | (a_h, b_h, expected_h, expected_h_norm, expected_h_wt), 841 | (obs_h_coefs, observed_h, obs_h_norm, obs_h_wt) 842 | ) 843 | ] 844 | return result 845 | 846 | 847 | # @pytest.mark.skip 848 | def test_polynomial_sub_with_const_time(pairs_of_random_polys_and_their_diffs_with_const_time): 849 | lp: LatticeParameters = lp_for_testing 850 | # Now let's do some addition with some random linear polynomials (AND the unity) 851 | for next_item in pairs_of_random_polys_and_their_diffs_with_const_time: 852 | f_dat, g_dat, expected_h_dat, observed_h_dat = next_item 853 | a_f, b_f, f = f_dat 854 | a_g, b_g, g = g_dat 855 | a_h, b_h, exp_h, exp_h_norm, exp_h_wt = expected_h_dat 856 | obs_h_coefs, obs_h, obs_h_norm, obs_h_wt = observed_h_dat 857 | assert f - g == exp_h == obs_h 858 | assert len(obs_h_coefs) == 2 859 | assert 0 in obs_h_coefs 860 | assert 1 in obs_h_coefs 861 | assert (obs_h_coefs[0] - a_h) % lp.modulus == 0 862 | assert (obs_h_coefs[1] - b_h) % lp.modulus == 0 863 | assert (a_f - a_g - a_h) % lp.modulus == 0 864 | assert (b_f - b_g - b_h) % lp.modulus == 0 865 | 866 | 867 | # @pytest.mark.skip 868 | def test_polynomial_sub_without_const_time(pairs_of_random_polys_and_their_diffs_without_const_time): 869 | lp: LatticeParameters = lp_for_testing 870 | # Now let's do some addition with some random linear polynomials (AND the unity) 871 | for next_item in pairs_of_random_polys_and_their_diffs_without_const_time: 872 | f_dat, g_dat, expected_h_dat, observed_h_dat = next_item 873 | a_f, b_f, f = f_dat 874 | a_g, b_g, g = g_dat 875 | a_h, b_h, exp_h, exp_h_norm, exp_h_wt = expected_h_dat 876 | obs_h_coefs, obs_h, obs_h_norm, obs_h_wt = observed_h_dat 877 | assert f - g == exp_h == obs_h 878 | assert len(obs_h_coefs) == 2 879 | assert 0 in obs_h_coefs 880 | assert 1 in obs_h_coefs 881 | assert (obs_h_coefs[0] - a_h) % lp.modulus == 0 882 | assert (obs_h_coefs[1] - b_h) % lp.modulus == 0 883 | assert (a_f - a_g - a_h) % lp.modulus == 0 884 | assert (b_f - b_g - b_h) % lp.modulus == 0 885 | 886 | 887 | @pytest.fixture 888 | def pairs_of_random_polys_and_their_products_with_const_time(pairs_ran_lin_poly_with_const_time) -> \ 889 | List[ 890 | Tuple[ 891 | Tuple[int, int, Polynomial], 892 | Tuple[int, int, Polynomial], 893 | Tuple[int, int, int, Polynomial, int, int], 894 | Tuple[Dict[int, int], Polynomial, int, int] 895 | ] 896 | ]: 897 | result = [] 898 | for next_pair in pairs_ran_lin_poly_with_const_time: 899 | next_f, next_g = next_pair 900 | a_f, b_f, f = next_f 901 | a_g, b_g, g = next_g 902 | a_h = (a_f * a_g) % lp_for_testing.modulus 903 | b_h = (a_f * b_g + b_f * a_g) % lp_for_testing.modulus 904 | c_h = (b_f * b_g) % lp_for_testing.modulus 905 | exp_h_coefs = {} 906 | if a_h != 0: 907 | exp_h_coefs[0] = a_h 908 | if b_h != 0: 909 | exp_h_coefs[1] = b_h 910 | if c_h != 0: 911 | exp_h_coefs[2] = c_h 912 | exp_h_norm = max(abs(a_h), abs(b_h), abs(c_h)) 913 | exp_h_wt = len(exp_h_coefs) 914 | observed_h = f * g 915 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h.get_coef_rep() 916 | expected_h = Polynomial(lp=lp_for_testing, coefs=exp_h_coefs) 917 | result += [ 918 | ( 919 | next_f, 920 | next_g, 921 | (a_f * a_g, a_f * b_g + a_g * b_f, b_f * b_g, expected_h, exp_h_norm, exp_h_wt), 922 | (obs_h_coefs, observed_h, obs_h_norm, obs_h_wt) 923 | ) 924 | ] 925 | return result 926 | 927 | 928 | @pytest.fixture 929 | def pairs_of_random_polys_and_their_products_without_const_time(pairs_ran_lin_poly_without_const_time) -> \ 930 | List[ 931 | Tuple[ 932 | Tuple[int, int, Polynomial], 933 | Tuple[int, int, Polynomial], 934 | Tuple[int, int, int, Polynomial, int, int], 935 | Tuple[Dict[int, int], Polynomial, int, int] 936 | ] 937 | ]: 938 | result = [] 939 | for next_pair in pairs_ran_lin_poly_without_const_time: 940 | next_f, next_g = next_pair 941 | a_f, b_f, f = next_f 942 | a_g, b_g, g = next_g 943 | a_h = (a_f * a_g) % lp_for_testing.modulus 944 | b_h = (a_f * b_g + b_f * a_g) % lp_for_testing.modulus 945 | c_h = (b_f * b_g) % lp_for_testing.modulus 946 | exp_h_coefs = {} 947 | if a_h != 0: 948 | exp_h_coefs[0] = a_h 949 | if b_h != 0: 950 | exp_h_coefs[1] = b_h 951 | if c_h != 0: 952 | exp_h_coefs[2] = c_h 953 | exp_h_norm = max(abs(a_h), abs(b_h), abs(c_h)) 954 | exp_h_wt = len(exp_h_coefs) 955 | observed_h = f * g 956 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h.get_coef_rep() 957 | expected_h = Polynomial(lp=lp_for_testing, coefs=exp_h_coefs) 958 | result += [ 959 | ( 960 | next_f, 961 | next_g, 962 | (a_f * a_g, a_f * b_g + a_g * b_f, b_f * b_g, expected_h, exp_h_norm, exp_h_wt), 963 | (obs_h_coefs, observed_h, obs_h_norm, obs_h_wt) 964 | ) 965 | ] 966 | return result 967 | 968 | 969 | # @pytest.mark.skip 970 | def test_polynomial_mul_small_with_const_time(one_with_const_time, 971 | pairs_of_random_polys_and_their_products_with_const_time): 972 | lp = lp_for_testing 973 | # First, let's make an identity polynomials and add it to itself 974 | assert one_with_const_time * one_with_const_time == one_with_const_time 975 | 976 | # Now let's do some addition with some random linear polynomials (AND the unity) 977 | for next_item in pairs_of_random_polys_and_their_products_with_const_time: 978 | f_dat, g_dat, expected_h_dat, observed_h_dat = next_item 979 | a_f, b_f, f = f_dat 980 | a_g, b_g, g = g_dat 981 | a_h, b_h, c_h, exp_h, exp_h_norm, exp_h_wt = expected_h_dat 982 | obs_h_coefs, obs_h, obs_h_norm, obs_h_wt = observed_h_dat 983 | assert one_with_const_time * f == f 984 | assert f * one_with_const_time == f 985 | assert one_with_const_time * g == g 986 | assert g * one_with_const_time == g 987 | assert f * g == exp_h == obs_h 988 | assert len(obs_h_coefs) == 3 989 | assert 0 in obs_h_coefs 990 | assert 1 in obs_h_coefs 991 | assert 2 in obs_h_coefs 992 | assert (obs_h_coefs[0] - a_h) % lp.modulus == 0 993 | assert (obs_h_coefs[1] - b_h) % lp.modulus == 0 994 | assert (obs_h_coefs[2] - c_h) % lp.modulus == 0 995 | assert (a_f * a_g - a_h) % lp.modulus == 0 996 | assert (a_f * b_g + b_f * a_g - b_h) % lp.modulus == 0 997 | assert (b_f * b_g - c_h) % lp.modulus == 0 998 | 999 | 1000 | # @pytest.mark.skip 1001 | def test_polynomial_mul_small_without_const_time(one_without_const_time, 1002 | pairs_of_random_polys_and_their_products_without_const_time): 1003 | lp = lp_for_testing 1004 | # First, let's make an identity polynomials and add it to itself 1005 | assert one_without_const_time * one_without_const_time == one_without_const_time 1006 | 1007 | # Now let's do some addition with some random linear polynomials (AND the unity) 1008 | for next_item in pairs_of_random_polys_and_their_products_without_const_time: 1009 | f_dat, g_dat, expected_h_dat, observed_h_dat = next_item 1010 | a_f, b_f, f = f_dat 1011 | a_g, b_g, g = g_dat 1012 | a_h, b_h, c_h, exp_h, exp_h_norm, exp_h_wt = expected_h_dat 1013 | obs_h_coefs, obs_h, obs_h_norm, obs_h_wt = observed_h_dat 1014 | assert one_without_const_time * f == f 1015 | assert f * one_without_const_time == f 1016 | assert one_without_const_time * g == g 1017 | assert g * one_without_const_time == g 1018 | assert f * g == exp_h == obs_h 1019 | assert len(obs_h_coefs) == 3 1020 | assert 0 in obs_h_coefs 1021 | assert 1 in obs_h_coefs 1022 | assert 2 in obs_h_coefs 1023 | assert (obs_h_coefs[0] - a_h) % lp.modulus == 0 1024 | assert (obs_h_coefs[1] - b_h) % lp.modulus == 0 1025 | assert (obs_h_coefs[2] - c_h) % lp.modulus == 0 1026 | assert (a_f * a_g - a_h) % lp.modulus == 0 1027 | assert (a_f * b_g + b_f * a_g - b_h) % lp.modulus == 0 1028 | assert (b_f * b_g - c_h) % lp.modulus == 0 1029 | 1030 | 1031 | # @pytest.mark.skip 1032 | def test_polynomial_repr_with_const_time(some_ran_lin_polys_with_const_time): 1033 | for next_item in some_ran_lin_polys_with_const_time: 1034 | a, b, f = next_item 1035 | coef_rep, norm, wt = f.get_coef_rep() 1036 | assert norm == max(abs(a), abs(b)) 1037 | if a != 0 and b != 0: 1038 | assert wt == 2 1039 | elif a != 0 or b != 0: 1040 | assert wt == 1 1041 | else: 1042 | assert wt == 0 1043 | 1044 | sorted_keys = sorted(list(coef_rep.keys())) 1045 | sorted_coefs = [(i, coef_rep[i]) for i in sorted_keys] 1046 | assert str(f) == str((sorted_coefs, norm, wt)) 1047 | 1048 | 1049 | # @pytest.mark.skip 1050 | def test_polynomial_repr_without_const_time(some_ran_lin_polys_without_const_time): 1051 | for next_item in some_ran_lin_polys_without_const_time: 1052 | a, b, f = next_item 1053 | coef_rep, norm, wt = f.get_coef_rep() 1054 | assert norm == max(abs(a), abs(b)) 1055 | if a != 0 and b != 0: 1056 | assert wt == 2 1057 | elif a != 0 or b != 0: 1058 | assert wt == 1 1059 | else: 1060 | assert wt == 0 1061 | 1062 | sorted_keys = sorted(list(coef_rep.keys())) 1063 | sorted_coefs = [(i, coef_rep[i]) for i in sorted_keys] 1064 | assert str(f) == str((sorted_coefs, norm, wt)) 1065 | 1066 | 1067 | # @pytest.mark.skip 1068 | def test_polynomial_reset_vals_with_const_time(one_with_const_time): 1069 | x = deepcopy(one_with_const_time) 1070 | x._reset_vals(coefs={0: 2}) 1071 | assert x.ntt_representation == [2 for _ in range(lp_for_testing.n)] 1072 | x._reset_vals(coefs={0: 1}) 1073 | assert x == one_with_const_time 1074 | 1075 | 1076 | # @pytest.mark.skip 1077 | def test_polynomial_reset_vals_without_const_time(one_without_const_time): 1078 | x = deepcopy(one_without_const_time) 1079 | x._reset_vals(coefs={0: 2}) 1080 | assert x.ntt_representation == [2 for _ in range(lp_for_testing.n)] 1081 | x._reset_vals(coefs={0: 1}) 1082 | assert x == one_without_const_time 1083 | 1084 | 1085 | # @pytest.mark.skip 1086 | def test_polynomial_get_coefs_with_const_time(one_with_const_time): 1087 | x = Polynomial(lp=lp_for_testing, coefs={1: 1}) 1088 | f = one_with_const_time + x 1089 | assert f.get_coef_rep() == ({0: 1, 1: 1}, 1, 2) 1090 | 1091 | 1092 | # @pytest.mark.skip 1093 | def test_polynomial_get_coefs_without_const_time(one_without_const_time): 1094 | x = Polynomial(lp=lp_for_testing, coefs={1: 1}) 1095 | f = one_without_const_time + x 1096 | assert f.get_coef_rep() == ({0: 1, 1: 1}, 1, 2) 1097 | 1098 | 1099 | # @pytest.mark.skip 1100 | def test_polynomial_norm_and_weight_with_const_time(some_ran_lin_polys_with_const_time): 1101 | for next_item in some_ran_lin_polys_with_const_time: 1102 | a, b, f = next_item 1103 | f_coefs, n, w = f.get_coef_rep() 1104 | assert n == max(abs(a), abs(b)) 1105 | if a != 0 and b != 0: 1106 | assert w == 2 1107 | elif (a == 0 and b != 0) or (a != 0 and b == 0): 1108 | assert w == 1 1109 | else: 1110 | assert w == 0 1111 | 1112 | 1113 | # @pytest.mark.skip 1114 | def test_polynomial_norm_and_weight_without_const_time(some_ran_lin_polys_without_const_time): 1115 | for next_item in some_ran_lin_polys_without_const_time: 1116 | a, b, f = next_item 1117 | f_coefs, n, w = f.get_coef_rep() 1118 | assert n == max(abs(a), abs(b)) 1119 | if a != 0 and b != 0: 1120 | assert w == 2 1121 | elif (a == 0 and b != 0) or (a != 0 and b == 0): 1122 | assert w == 1 1123 | else: 1124 | assert w == 0 1125 | 1126 | 1127 | # @pytest.mark.skip 1128 | def test_rand_poly_with_const_time(): 1129 | f = random_polynomial( 1130 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, 1131 | dist_pars=small_dist_pars, num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1132 | btd=bits_to_decode_for_testing 1133 | ) 1134 | assert isinstance(f, Polynomial) 1135 | f_coefs, n, w = f.get_coef_rep() 1136 | assert n <= small_dist_pars['bd'] and w <= small_dist_pars['bd'] 1137 | assert max(abs(f_coefs[i]) for i in f_coefs) <= n 1138 | assert len(f_coefs) <= w 1139 | 1140 | 1141 | # @pytest.mark.skip 1142 | def test_rand_poly_without_const_time(): 1143 | f = random_polynomial( 1144 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, 1145 | dist_pars=small_dist_pars, num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1146 | btd=bits_to_decode_for_testing, const_time_flag=False 1147 | ) 1148 | assert isinstance(f, Polynomial) 1149 | f_coefs, n, w = f.get_coef_rep() 1150 | assert n <= small_dist_pars['bd'] and w <= small_dist_pars['bd'] 1151 | assert max(abs(f_coefs[i]) for i in f_coefs) <= n 1152 | assert len(f_coefs) <= w 1153 | 1154 | 1155 | @pytest.fixture 1156 | def some_random_polys_for_a_vector_with_const_time() -> List[Polynomial]: 1157 | lp: LatticeParameters = lp_for_testing 1158 | return [random_polynomial( 1159 | secpar=secpar4testing, lp=lp, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1160 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1161 | btd=bits_to_decode_for_testing 1162 | ) for _ in range(lp.length)] 1163 | 1164 | 1165 | @pytest.fixture 1166 | def some_random_polys_for_a_vector_without_const_time() -> List[Polynomial]: 1167 | lp: LatticeParameters = lp_for_testing 1168 | return [random_polynomial( 1169 | secpar=secpar4testing, lp=lp, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1170 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1171 | btd=bits_to_decode_for_testing, const_time_flag=False 1172 | ) for _ in range(lp.length)] 1173 | 1174 | 1175 | # @pytest.mark.skip 1176 | def test_polynomial_vector_init_with_const_time(some_random_polys_for_a_vector_with_const_time): 1177 | lp: LatticeParameters = lp_for_testing 1178 | assert PolynomialVector(lp=lp, entries=some_random_polys_for_a_vector_with_const_time) 1179 | v = PolynomialVector(lp=lp, entries=some_random_polys_for_a_vector_with_const_time) 1180 | tmp = v.get_coef_rep() 1181 | assert max(i[1] for i in tmp) <= lp.halfmod // 2 1182 | assert max(i[2] for i in tmp) <= lp.degree 1183 | assert random_polynomialvector( 1184 | secpar=secpar4testing, lp=lp, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1185 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1186 | btd=bits_to_decode_for_testing 1187 | ) 1188 | 1189 | 1190 | # @pytest.mark.skip 1191 | def test_polynomial_vector_init_without_const_time(some_random_polys_for_a_vector_without_const_time): 1192 | lp: LatticeParameters = lp_for_testing 1193 | assert PolynomialVector(lp=lp, entries=some_random_polys_for_a_vector_without_const_time, const_time_flag=False) 1194 | v = PolynomialVector(lp=lp, entries=some_random_polys_for_a_vector_without_const_time, const_time_flag=False) 1195 | tmp = v.get_coef_rep() 1196 | assert max(i[1] for i in tmp) <= lp.halfmod // 2 1197 | assert max(i[2] for i in tmp) <= lp.degree 1198 | assert random_polynomialvector( 1199 | secpar=secpar4testing, lp=lp, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1200 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1201 | btd=bits_to_decode_for_testing, const_time_flag=False 1202 | ) 1203 | 1204 | 1205 | @pytest.fixture 1206 | def some_random_polynomialvector_with_const_time(some_random_polys_for_a_vector_with_const_time) -> PolynomialVector: 1207 | return PolynomialVector(lp=lp_for_testing, entries=some_random_polys_for_a_vector_with_const_time) 1208 | 1209 | 1210 | @pytest.fixture 1211 | def some_random_polynomialvector_without_const_time( 1212 | some_random_polys_for_a_vector_without_const_time) -> PolynomialVector: 1213 | return PolynomialVector(lp=lp_for_testing, entries=some_random_polys_for_a_vector_without_const_time, 1214 | const_time_flag=False) 1215 | 1216 | 1217 | @pytest.fixture 1218 | def some_random_polynomialvectors_with_const_time(some_random_polys_for_a_vector_with_const_time) -> List[ 1219 | PolynomialVector]: 1220 | return [PolynomialVector(lp=lp_for_testing, entries=some_random_polys_for_a_vector_with_const_time) for _ in 1221 | range(sample_size_for_random_tests)] 1222 | 1223 | 1224 | @pytest.fixture 1225 | def some_random_polynomialvectors_without_const_time(some_random_polys_for_a_vector_without_const_time) -> List[ 1226 | PolynomialVector]: 1227 | return [PolynomialVector(lp=lp_for_testing, entries=some_random_polys_for_a_vector_without_const_time, 1228 | const_time_flag=False) for _ in 1229 | range(sample_size_for_random_tests)] 1230 | 1231 | 1232 | # @pytest.mark.skip 1233 | def test_polynomial_vector_eq_with_const_time(some_random_polys_for_a_vector_with_const_time, 1234 | some_random_polynomialvector_with_const_time): 1235 | lp: LatticeParameters = lp_for_testing 1236 | v = PolynomialVector(lp=lp, entries=deepcopy(some_random_polys_for_a_vector_with_const_time)) 1237 | assert v == some_random_polynomialvector_with_const_time 1238 | 1239 | 1240 | # @pytest.mark.skip 1241 | def test_polynomial_vector_eq_without_const_time(some_random_polys_for_a_vector_without_const_time, 1242 | some_random_polynomialvector_without_const_time): 1243 | lp: LatticeParameters = lp_for_testing 1244 | v = PolynomialVector(lp=lp, entries=deepcopy(some_random_polys_for_a_vector_without_const_time), 1245 | const_time_flag=False) 1246 | assert v == some_random_polynomialvector_without_const_time 1247 | 1248 | 1249 | @pytest.fixture 1250 | def some_random_polynomialvector_pairs_sums_with_const_time() -> List[ 1251 | Tuple[PolynomialVector, PolynomialVector, PolynomialVector]]: 1252 | result = [] 1253 | while len(result) < sample_size_for_random_tests: 1254 | f = random_polynomialvector( 1255 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1256 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1257 | btd=bits_to_decode_for_testing 1258 | ) 1259 | g = random_polynomialvector( 1260 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1261 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1262 | btd=bits_to_decode_for_testing 1263 | ) 1264 | h = f + g 1265 | result += [(f, g, h)] 1266 | return result 1267 | 1268 | 1269 | @pytest.fixture 1270 | def some_random_polynomialvector_pairs_sums_without_const_time() -> List[ 1271 | Tuple[PolynomialVector, PolynomialVector, PolynomialVector]]: 1272 | result = [] 1273 | while len(result) < sample_size_for_random_tests: 1274 | f = random_polynomialvector( 1275 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1276 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1277 | btd=bits_to_decode_for_testing, const_time_flag=False 1278 | ) 1279 | g = random_polynomialvector( 1280 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1281 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1282 | btd=bits_to_decode_for_testing, const_time_flag=False 1283 | ) 1284 | h = f + g 1285 | result += [(f, g, h)] 1286 | return result 1287 | 1288 | 1289 | @pytest.fixture 1290 | def some_random_polynomialvector_pairs_diffs_with_const_time() -> List[ 1291 | Tuple[PolynomialVector, PolynomialVector, PolynomialVector]]: 1292 | result = [] 1293 | while len(result) < sample_size_for_random_tests: 1294 | f = random_polynomialvector( 1295 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1296 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1297 | btd=bits_to_decode_for_testing 1298 | ) 1299 | g = random_polynomialvector( 1300 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1301 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1302 | btd=bits_to_decode_for_testing 1303 | ) 1304 | h = f - g 1305 | result += [(f, g, h)] 1306 | return result 1307 | 1308 | 1309 | @pytest.fixture 1310 | def some_random_polynomialvector_pairs_diffs_without_const_time() -> List[ 1311 | Tuple[PolynomialVector, PolynomialVector, PolynomialVector]]: 1312 | result = [] 1313 | while len(result) < sample_size_for_random_tests: 1314 | f = random_polynomialvector( 1315 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1316 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1317 | btd=bits_to_decode_for_testing, const_time_flag=False 1318 | ) 1319 | g = random_polynomialvector( 1320 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1321 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1322 | btd=bits_to_decode_for_testing, const_time_flag=False 1323 | ) 1324 | h = f - g 1325 | result += [(f, g, h)] 1326 | return result 1327 | 1328 | 1329 | # @pytest.mark.skip 1330 | def test_polynomial_vector_add_with_const_time(some_random_polynomialvector_pairs_sums_with_const_time): 1331 | # TODO: Rewrite 1332 | lp: LatticeParameters = lp_for_testing 1333 | for next_item in some_random_polynomialvector_pairs_sums_with_const_time: 1334 | f, g, observed_h = next_item 1335 | for i, val in enumerate(zip(f.entries, g.entries, observed_h.entries)): 1336 | ff, gg, hh = val 1337 | diff = [ff.ntt_representation[j] + gg.ntt_representation[j] for j in range(lp.degree)] 1338 | diff = [diff[j] - hh.ntt_representation[j] for j in range(lp.degree)] 1339 | assert all( 1340 | cent(q=lp.modulus, halfmod=lp.halfmod, logmod=lp.logmod, val=diff[j]) == 0 for j in range(lp.degree) 1341 | ) 1342 | observed_h_coefs_and_norms_and_wts = observed_h.get_coef_rep() 1343 | f_coefs_and_norms_and_wts = f.get_coef_rep() 1344 | g_coefs_and_norms_and_wts = g.get_coef_rep() 1345 | for i, val in enumerate(zip( 1346 | f_coefs_and_norms_and_wts, g_coefs_and_norms_and_wts, observed_h_coefs_and_norms_and_wts 1347 | )): 1348 | f_dat, g_dat, obs_h_dat = val 1349 | f_coefs, f_norm, f_wt = f_dat 1350 | g_coefs, g_norm, g_wt = g_dat 1351 | expected_h_coefs: Dict[int, int] = deepcopy(f_coefs) 1352 | obs_h_coefs, obs_h_norm, obs_h_wt = obs_h_dat 1353 | for j in g_coefs: 1354 | if j in expected_h_coefs: 1355 | diff = expected_h_coefs[j] + g_coefs[j] 1356 | expected_h_coefs[j] = cent( 1357 | q=lp_for_testing.modulus, 1358 | val=diff, halfmod=lp_for_testing.halfmod, 1359 | logmod=lp_for_testing.logmod 1360 | ) 1361 | else: 1362 | expected_h_coefs[j] = g_coefs[j] 1363 | expected_h_coefs = {i: expected_h_coefs[i] for i in expected_h_coefs if expected_h_coefs[i] != 0} 1364 | expected_h_norm: int = max(abs(expected_h_coefs[i]) for i in expected_h_coefs) 1365 | expected_h_wt: int = len(expected_h_coefs) 1366 | assert expected_h_wt == obs_h_wt 1367 | assert expected_h_norm == obs_h_norm 1368 | assert sorted(list(expected_h_coefs.keys())) == sorted(list(obs_h_coefs.keys())) 1369 | assert all( 1370 | 0 == cent( 1371 | q=lp_for_testing.modulus, 1372 | val=expected_h_coefs[i] - obs_h_coefs[i], 1373 | halfmod=lp_for_testing.halfmod, 1374 | logmod=lp_for_testing.logmod 1375 | ) for i in expected_h_coefs 1376 | ) 1377 | 1378 | 1379 | # @pytest.mark.skip 1380 | def test_polynomial_vector_add_without_const_time(some_random_polynomialvector_pairs_sums_without_const_time): 1381 | # TODO: Rewrite 1382 | lp: LatticeParameters = lp_for_testing 1383 | for next_item in some_random_polynomialvector_pairs_sums_without_const_time: 1384 | f, g, observed_h = next_item 1385 | for i, val in enumerate(zip(f.entries, g.entries, observed_h.entries)): 1386 | ff, gg, hh = val 1387 | diff = [ff.ntt_representation[j] + gg.ntt_representation[j] for j in range(lp.degree)] 1388 | diff = [diff[j] - hh.ntt_representation[j] for j in range(lp.degree)] 1389 | assert all( 1390 | cent(q=lp.modulus, halfmod=lp.halfmod, logmod=lp.logmod, val=diff[j]) == 0 for j in range(lp.degree) 1391 | ) 1392 | observed_h_coefs_and_norms_and_wts = observed_h.get_coef_rep() 1393 | f_coefs_and_norms_and_wts = f.get_coef_rep() 1394 | g_coefs_and_norms_and_wts = g.get_coef_rep() 1395 | for i, val in enumerate(zip( 1396 | f_coefs_and_norms_and_wts, g_coefs_and_norms_and_wts, observed_h_coefs_and_norms_and_wts 1397 | )): 1398 | f_dat, g_dat, obs_h_dat = val 1399 | f_coefs, f_norm, f_wt = f_dat 1400 | g_coefs, g_norm, g_wt = g_dat 1401 | expected_h_coefs: Dict[int, int] = deepcopy(f_coefs) 1402 | obs_h_coefs, obs_h_norm, obs_h_wt = obs_h_dat 1403 | for j in g_coefs: 1404 | if j in expected_h_coefs: 1405 | diff = expected_h_coefs[j] + g_coefs[j] 1406 | expected_h_coefs[j] = cent( 1407 | q=lp_for_testing.modulus, 1408 | val=diff, halfmod=lp_for_testing.halfmod, 1409 | logmod=lp_for_testing.logmod 1410 | ) 1411 | else: 1412 | expected_h_coefs[j] = g_coefs[j] 1413 | expected_h_coefs = {i: expected_h_coefs[i] for i in expected_h_coefs if expected_h_coefs[i] != 0} 1414 | expected_h_norm: int = max(abs(expected_h_coefs[i]) for i in expected_h_coefs) 1415 | expected_h_wt: int = len(expected_h_coefs) 1416 | assert expected_h_wt == obs_h_wt 1417 | assert expected_h_norm == obs_h_norm 1418 | assert sorted(list(expected_h_coefs.keys())) == sorted(list(obs_h_coefs.keys())) 1419 | assert all( 1420 | 0 == cent( 1421 | q=lp_for_testing.modulus, 1422 | val=expected_h_coefs[i] - obs_h_coefs[i], 1423 | halfmod=lp_for_testing.halfmod, 1424 | logmod=lp_for_testing.logmod 1425 | ) for i in expected_h_coefs 1426 | ) 1427 | 1428 | 1429 | # @pytest.mark.skip 1430 | def test_polynomial_vector_sub_with_const_time(some_random_polynomialvector_pairs_diffs_with_const_time): 1431 | lp: LatticeParameters = lp_for_testing 1432 | for next_item in some_random_polynomialvector_pairs_diffs_with_const_time: 1433 | f, g, observed_h = next_item 1434 | observed_h_coefs_and_norms_and_weights = [ 1435 | i.get_coef_rep() for i in observed_h.entries 1436 | ] 1437 | for i in range(lp.length): 1438 | f_coefs, f_norm, f_wt = f.entries[i].get_coef_rep() 1439 | g_coefs, g_norm, g_wt = g.entries[i].get_coef_rep() 1440 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h_coefs_and_norms_and_weights[i] 1441 | exp_h_coefs = deepcopy(f_coefs) 1442 | for j in g_coefs: 1443 | if j in exp_h_coefs: 1444 | diff = exp_h_coefs[j] - g_coefs[j] 1445 | exp_h_coefs[j] = cent( 1446 | q=lp_for_testing.modulus, 1447 | val=diff, halfmod=lp_for_testing.halfmod, 1448 | logmod=lp_for_testing.logmod 1449 | ) 1450 | else: 1451 | exp_h_coefs[j] = -g_coefs[j] 1452 | exp_h_coefs = {i: exp_h_coefs[i] for i in exp_h_coefs if exp_h_coefs[i] != 0} 1453 | exp_h_norm = max(abs(exp_h_coefs[i]) for i in exp_h_coefs) 1454 | exp_h_wt = len(exp_h_coefs) 1455 | 1456 | assert obs_h_norm == exp_h_norm 1457 | assert obs_h_wt == exp_h_wt 1458 | assert sorted(list(obs_h_coefs.keys())) == sorted(list(exp_h_coefs.keys())) 1459 | for j in obs_h_coefs: 1460 | diff = obs_h_coefs[j] - exp_h_coefs[j] 1461 | assert 0 == cent( 1462 | q=lp_for_testing.modulus, val=diff, halfmod=lp_for_testing.halfmod, logmod=lp_for_testing.logmod 1463 | ) 1464 | 1465 | 1466 | # @pytest.mark.skip 1467 | def test_polynomial_vector_sub_without_const_time(some_random_polynomialvector_pairs_diffs_without_const_time): 1468 | lp: LatticeParameters = lp_for_testing 1469 | for next_item in some_random_polynomialvector_pairs_diffs_without_const_time: 1470 | f, g, observed_h = next_item 1471 | observed_h_coefs_and_norms_and_weights = [ 1472 | i.get_coef_rep() for i in observed_h.entries 1473 | ] 1474 | for i in range(lp.length): 1475 | f_coefs, f_norm, f_wt = f.entries[i].get_coef_rep() 1476 | g_coefs, g_norm, g_wt = g.entries[i].get_coef_rep() 1477 | obs_h_coefs, obs_h_norm, obs_h_wt = observed_h_coefs_and_norms_and_weights[i] 1478 | exp_h_coefs = deepcopy(f_coefs) 1479 | for j in g_coefs: 1480 | if j in exp_h_coefs: 1481 | diff = exp_h_coefs[j] - g_coefs[j] 1482 | exp_h_coefs[j] = cent( 1483 | q=lp_for_testing.modulus, 1484 | val=diff, halfmod=lp_for_testing.halfmod, 1485 | logmod=lp_for_testing.logmod 1486 | ) 1487 | else: 1488 | exp_h_coefs[j] = -g_coefs[j] 1489 | exp_h_coefs = {i: exp_h_coefs[i] for i in exp_h_coefs if exp_h_coefs[i] != 0} 1490 | exp_h_norm = max(abs(exp_h_coefs[i]) for i in exp_h_coefs) 1491 | exp_h_wt = len(exp_h_coefs) 1492 | 1493 | assert obs_h_norm == exp_h_norm 1494 | assert obs_h_wt == exp_h_wt 1495 | assert sorted(list(obs_h_coefs.keys())) == sorted(list(exp_h_coefs.keys())) 1496 | for j in obs_h_coefs: 1497 | diff = obs_h_coefs[j] - exp_h_coefs[j] 1498 | assert 0 == cent( 1499 | q=lp_for_testing.modulus, val=diff, halfmod=lp_for_testing.halfmod, logmod=lp_for_testing.logmod 1500 | ) 1501 | 1502 | 1503 | # @pytest.mark.skip 1504 | def test_polynomial_vector_mul_with_const_time(one_with_const_time, some_random_polynomialvectors_with_const_time): 1505 | # tests the dot product 1506 | lp: LatticeParameters = lp_for_testing 1507 | all_ones: PolynomialVector = PolynomialVector(lp=lp, 1508 | entries=[deepcopy(one_with_const_time) for _ in range(lp.length)]) 1509 | for v in some_random_polynomialvectors_with_const_time: 1510 | expected_sum: Polynomial = sum(x for x in v.entries) 1511 | observed_sum: Polynomial = all_ones * v 1512 | assert observed_sum == expected_sum 1513 | observed_sum: Polynomial = v * all_ones 1514 | assert observed_sum == expected_sum 1515 | 1516 | 1517 | # @pytest.mark.skip 1518 | def test_polynomial_vector_mul_without_const_time(one_without_const_time, 1519 | some_random_polynomialvectors_without_const_time): 1520 | # tests the dot product 1521 | lp: LatticeParameters = lp_for_testing 1522 | all_ones: PolynomialVector = PolynomialVector(lp=lp, 1523 | entries=[deepcopy(one_without_const_time) for _ in range(lp.length)], 1524 | const_time_flag=False) 1525 | for v in some_random_polynomialvectors_without_const_time: 1526 | expected_sum: Polynomial = sum(x for x in v.entries) 1527 | observed_sum: Polynomial = all_ones * v 1528 | assert observed_sum == expected_sum 1529 | observed_sum: Polynomial = v * all_ones 1530 | assert observed_sum == expected_sum 1531 | 1532 | 1533 | @pytest.fixture 1534 | def some_random_linear_polynomialvectors_with_const_time() -> list: 1535 | result = [] 1536 | while len(result) < sample_size_for_random_tests: 1537 | next_result_entry = [] 1538 | entries = [] 1539 | while len(entries) < lp_for_testing.length: 1540 | a: int = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 1541 | b: int = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 1542 | if a != 0 or b != 0: 1543 | next_poly: Polynomial = Polynomial(lp=lp_for_testing, coefs={0: a, 1: b}) 1544 | next_result_entry += [(a, b, next_poly)] 1545 | entries += [next_poly] 1546 | next_polynomialvector = PolynomialVector(lp=lp_for_testing, entries=entries) 1547 | next_result_entry += [next_polynomialvector] 1548 | result += [next_result_entry] 1549 | return result 1550 | 1551 | 1552 | @pytest.fixture 1553 | def some_random_linear_polynomialvectors_without_const_time() -> list: 1554 | result = [] 1555 | while len(result) < sample_size_for_random_tests: 1556 | next_result_entry = [] 1557 | entries = [] 1558 | while len(entries) < lp_for_testing.length: 1559 | a: int = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 1560 | b: int = (2 * randbits(1) - 1) * (randbelow(lp_for_testing.halfmod) + 1) 1561 | if a != 0 or b != 0: 1562 | next_poly: Polynomial = Polynomial(lp=lp_for_testing, coefs={0: a, 1: b}, const_time_flag=False) 1563 | next_result_entry += [(a, b, next_poly)] 1564 | entries += [next_poly] 1565 | next_polynomialvector = PolynomialVector(lp=lp_for_testing, entries=entries, const_time_flag=False) 1566 | next_result_entry += [next_polynomialvector] 1567 | result += [next_result_entry] 1568 | return result 1569 | 1570 | 1571 | @pytest.fixture 1572 | def expected_polynomialvector_rep_with_const_time(some_random_linear_polynomialvectors_with_const_time) -> List[str]: 1573 | result = [] 1574 | for next_polynomialvector_data in some_random_linear_polynomialvectors_with_const_time: 1575 | next_rep: str = '[' 1576 | the_polynomialvector = next_polynomialvector_data[-1] 1577 | for i, val in enumerate(zip(the_polynomialvector.entries, next_polynomialvector_data[:-1])): 1578 | # the_next_poly = val[0] 1579 | next_tuple = val[1] 1580 | a, b, also_the_next_poly = next_tuple 1581 | the_coefs = {} 1582 | if a != 0: 1583 | the_coefs[0] = a 1584 | if b != 0: 1585 | the_coefs[1] = b 1586 | the_norm = max(abs(a), abs(b)) 1587 | the_wt = len(the_coefs) 1588 | # assert the_next_poly.get_coef_rep() == the_coefs, the_norm, the_wt 1589 | # assert also_the_next_poly == the_next_poly 1590 | the_coefs_sorted_keys = sorted(list(the_coefs.keys())) 1591 | sorted_coefs = [(i, the_coefs[i]) for i in the_coefs_sorted_keys] 1592 | next_rep += str((sorted_coefs, the_norm, the_wt)) + ', ' 1593 | next_rep = next_rep[:-2] + ']' 1594 | result += [next_rep] 1595 | return result 1596 | 1597 | 1598 | @pytest.fixture 1599 | def expected_polynomialvector_rep_without_const_time(some_random_linear_polynomialvectors_without_const_time) -> List[ 1600 | str]: 1601 | result = [] 1602 | for next_polynomialvector_data in some_random_linear_polynomialvectors_without_const_time: 1603 | next_rep: str = '[' 1604 | the_polynomialvector = next_polynomialvector_data[-1] 1605 | for i, val in enumerate(zip(the_polynomialvector.entries, next_polynomialvector_data[:-1])): 1606 | # the_next_poly = val[0] 1607 | next_tuple = val[1] 1608 | a, b, also_the_next_poly = next_tuple 1609 | the_coefs = {} 1610 | if a != 0: 1611 | the_coefs[0] = a 1612 | if b != 0: 1613 | the_coefs[1] = b 1614 | the_norm = max(abs(a), abs(b)) 1615 | the_wt = len(the_coefs) 1616 | # assert the_next_poly.get_coef_rep() == the_coefs, the_norm, the_wt 1617 | # assert also_the_next_poly == the_next_poly 1618 | the_coefs_sorted_keys = sorted(list(the_coefs.keys())) 1619 | sorted_coefs = [(i, the_coefs[i]) for i in the_coefs_sorted_keys] 1620 | next_rep += str((sorted_coefs, the_norm, the_wt)) + ', ' 1621 | next_rep = next_rep[:-2] + ']' 1622 | result += [next_rep] 1623 | return result 1624 | 1625 | 1626 | # @pytest.mark.skip 1627 | def test_polynomial_vector_repr_with_const_time(some_random_linear_polynomialvectors_with_const_time, 1628 | expected_polynomialvector_rep_with_const_time): 1629 | for next_pair in zip(some_random_linear_polynomialvectors_with_const_time, 1630 | expected_polynomialvector_rep_with_const_time): 1631 | next_random_linear_polynomialvector, next_expected_rep = next_pair 1632 | assert str(next_random_linear_polynomialvector[-1]) == next_expected_rep 1633 | 1634 | 1635 | # @pytest.mark.skip 1636 | def test_polynomial_vector_repr_without_const_time(some_random_linear_polynomialvectors_without_const_time, 1637 | expected_polynomialvector_rep_without_const_time): 1638 | for next_pair in zip(some_random_linear_polynomialvectors_without_const_time, 1639 | expected_polynomialvector_rep_without_const_time): 1640 | next_random_linear_polynomialvector, next_expected_rep = next_pair 1641 | assert str(next_random_linear_polynomialvector[-1]) == next_expected_rep 1642 | 1643 | 1644 | # @pytest.mark.skip 1645 | def test_polynomial_vector_pow_with_const_time(): 1646 | for k in range(sample_size_for_random_tests): 1647 | v: Polynomial = random_polynomial( 1648 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1649 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1650 | btd=bits_to_decode_for_testing 1651 | ) 1652 | u: PolynomialVector = random_polynomialvector( 1653 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1654 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1655 | btd=bits_to_decode_for_testing 1656 | ) 1657 | expected_scaled_vector: PolynomialVector = deepcopy(u) 1658 | expected_scaled_vector.entries = [i * v for i in expected_scaled_vector.entries] 1659 | observed_scaled_vector = u ** v 1660 | assert expected_scaled_vector == observed_scaled_vector 1661 | 1662 | 1663 | # @pytest.mark.skip 1664 | def test_polynomial_vector_pow_without_const_time(): 1665 | for k in range(sample_size_for_random_tests): 1666 | v: Polynomial = random_polynomial( 1667 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1668 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1669 | btd=bits_to_decode_for_testing, const_time_flag=False 1670 | ) 1671 | u: PolynomialVector = random_polynomialvector( 1672 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1673 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1674 | btd=bits_to_decode_for_testing, const_time_flag=False 1675 | ) 1676 | expected_scaled_vector: PolynomialVector = deepcopy(u) 1677 | expected_scaled_vector.entries = [i * v for i in expected_scaled_vector.entries] 1678 | observed_scaled_vector = u ** v 1679 | assert expected_scaled_vector == observed_scaled_vector 1680 | 1681 | 1682 | # @pytest.mark.skip 1683 | def test_polynomialvector_coefficient_representation_and_norm_and_weight_with_const_time(): 1684 | f: PolynomialVector = random_polynomialvector( 1685 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1686 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1687 | btd=bits_to_decode_for_testing 1688 | ) 1689 | assert isinstance(f, PolynomialVector) 1690 | result = f.get_coef_rep() 1691 | for i in result: 1692 | coef_rep, n, w = i 1693 | assert n <= 7176 1694 | assert w <= 384 1695 | assert w == len(coef_rep) 1696 | assert n == max(abs(coef_rep[i]) for i in coef_rep) 1697 | 1698 | 1699 | # @pytest.mark.skip 1700 | def test_polynomialvector_coefficient_representation_and_norm_and_weight_without_const_time(): 1701 | f: PolynomialVector = random_polynomialvector( 1702 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1703 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1704 | btd=bits_to_decode_for_testing, const_time_flag=False 1705 | ) 1706 | assert isinstance(f, PolynomialVector) 1707 | result = f.get_coef_rep() 1708 | for i in result: 1709 | coef_rep, n, w = i 1710 | assert n <= 7176 1711 | assert w <= 384 1712 | assert w == len(coef_rep) 1713 | assert n == max(abs(coef_rep[i]) for i in coef_rep) 1714 | 1715 | 1716 | # @pytest.mark.skip 1717 | def test_random_polynomialvector_with_const_time(): 1718 | f: PolynomialVector = random_polynomialvector( 1719 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1720 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1721 | btd=bits_to_decode_for_testing 1722 | ) 1723 | assert isinstance(f, PolynomialVector) 1724 | results = f.get_coef_rep() 1725 | for i in results: 1726 | coef_rep, n, w = i 1727 | assert n <= 7176 1728 | assert w <= 384 1729 | assert len(coef_rep) == w 1730 | assert n == max(abs(coef_rep[i]) for i in coef_rep) 1731 | 1732 | 1733 | # @pytest.mark.skip 1734 | def test_random_polynomialvector_without_const_time(): 1735 | f: PolynomialVector = random_polynomialvector( 1736 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, dist_pars=small_dist_pars, 1737 | num_coefs=small_dist_pars['wt'], bti=bits_to_indices_for_testing, 1738 | btd=bits_to_decode_for_testing, const_time_flag=False 1739 | ) 1740 | assert isinstance(f, PolynomialVector) 1741 | results = f.get_coef_rep() 1742 | for i in results: 1743 | coef_rep, n, w = i 1744 | assert n <= 7176 1745 | assert w <= 384 1746 | assert len(coef_rep) == w 1747 | assert n == max(abs(coef_rep[i]) for i in coef_rep) 1748 | 1749 | 1750 | # @pytest.mark.skip 1751 | def test_decode_bitstring_to_coefficient(): 1752 | # TODO: Rewrite this 1753 | dist_pars_tmp = deepcopy(small_dist_pars) 1754 | for bound in range(1, lp_for_testing.modulus // 2, lp_for_testing.modulus // (2 ** 5)): 1755 | dist_pars_tmp['bd'] = bound 1756 | bits_to_decode = ceil(log2(bound)) + 1 + secpar4testing 1757 | for signum_bit in range(2): 1758 | for magnitude_minus_one in range(bound): 1759 | expected_result = (2 * signum_bit - 1) * (1 + magnitude_minus_one) 1760 | bitstring = str(signum_bit) + bin(magnitude_minus_one)[2:].zfill(bits_to_decode - 1) 1761 | observed_result = decode2coef( 1762 | secpar=secpar4testing, lp=lp_for_testing, val=bitstring, distribution=UNIFORM_INFINITY_WEIGHT, 1763 | dist_pars=dist_pars_tmp, btd=bits_to_decode 1764 | ) 1765 | assert expected_result == observed_result 1766 | 1767 | 1768 | # @pytest.mark.skip 1769 | def test_decode_bitstring_to_indices(): 1770 | lp: LatticeParameters = LatticeParameters(degree=8, modulus=small_q_for_testing, length=3) 1771 | dist_pars: Dict[str, int] = {'bd': 1, 'wt': 3} 1772 | bits_to_indices_for_this_test: int = ceil(log2(lp.degree)) + 2 * (ceil(log2(lp.degree)) + secpar4testing) 1773 | # say we want indices 0, 3, 6. 1774 | # first index is 0, so the first part of the bitstring is '000' (only need 3 bits for the first) 1775 | # second index is 3, requires ceil(log2(8)) + secpar = 11 bits since we already picked an index. 1776 | # but the remaining indices are [1, 2, 3, 4, 5, 6, 7] 1777 | # to get 3 out of this, we need to access index 2!!! 1778 | # we need an 11-bit integer == 2 mod 7. for fun, let's use 2 + 2**6 * 7 = 450 -> '00111000010` 1779 | # third index is 6, requires ceil(log2(8)) + secpar = 11 bits since we already picked two indices 1780 | # but the remaining indices are [1, 2, 4, 5, 6, 7] 1781 | # to get 6 out of this, we need to access index 4 1782 | # we need an 11-bit integer == 4 mod 6. for fun, let's use 4 + 2**5 * 6 = 196 -> '00011000100' 1783 | # i.e. set our bitstring = '0000011100001000011000100' and we should get 1784 | # [0, 3, 6]. 1785 | val = bin(0)[2:].zfill(ceil(log2(lp.degree))) 1786 | val += bin(2 + 2 ** 6 * 7)[2:].zfill(ceil(log2(lp.degree)) + secpar4testing) 1787 | val += bin(4 + 2 ** 5 * 6)[2:].zfill(ceil(log2(lp.degree)) + secpar4testing) 1788 | expected_result = [0, 3, 6] 1789 | observed_result = decode2indices( 1790 | secpar=secpar4testing, lp=lp, num_coefs=dist_pars['wt'], val=val, bti=bits_to_indices_for_this_test 1791 | ) 1792 | assert expected_result == observed_result 1793 | 1794 | 1795 | # @pytest.mark.skip 1796 | def test_decode_bitstring_to_polynomial_coefficients(): 1797 | lp: LatticeParameters = LatticeParameters(degree=8, modulus=small_q_for_testing, length=3) 1798 | 1799 | # Let's construct the bitstring that should give us {0: 1, 3: 1, 6: -1}. 1800 | expected_coefs = [1, 1, -1] 1801 | expected_indices = [0, 3, 6] 1802 | expected_result = {idx: coef for idx, coef in zip(expected_indices, expected_coefs)} 1803 | # expected coefficients will be [1, 1, -1] 1804 | # expected indices will be [0, 3, 6] 1805 | val_for_first_index = bin(0)[2:].zfill(ceil(log2(lp.degree))) 1806 | val_for_second_index = bin(2 + 2 ** 6 * 7)[2:].zfill(ceil(log2(lp.degree)) + secpar4testing) 1807 | val_for_third_index = bin(4 + 2 ** 5 * 6)[2:].zfill(ceil(log2(lp.degree)) + secpar4testing) 1808 | val_for_indices = val_for_first_index + val_for_second_index + val_for_third_index 1809 | val_for_first_coef = '1' + '0' * (bits_to_decode_for_testing - 1) 1810 | val_for_second_coef = '1' + '0' * (bits_to_decode_for_testing - 1) 1811 | val_for_third_coef = '0' + '0' * (bits_to_decode_for_testing - 1) 1812 | val_for_coefs = val_for_first_coef + val_for_second_coef + val_for_third_coef 1813 | 1814 | val = val_for_indices + val_for_coefs 1815 | 1816 | wt_for_this_test: int = 3 1817 | bits_to_indices: int = ceil(log2(lp.degree)) + (wt_for_this_test - 1) * (ceil(log2(lp.degree)) + secpar4testing) 1818 | 1819 | for bd in range(1, small_q_for_testing // (2 ** 5)): 1820 | dist_pars: Dict[str, int] = {'bd': bd, 'wt': wt_for_this_test} 1821 | observed_result = decode2polycoefs(secpar=secpar4testing, lp=lp, distribution=UNIFORM_INFINITY_WEIGHT, 1822 | dist_pars=dist_pars, val=val, num_coefs=dist_pars['wt'], 1823 | bti=bits_to_indices, btd=bits_to_decode_for_testing) 1824 | assert expected_result == observed_result 1825 | 1826 | 1827 | # @pytest.mark.skip 1828 | def test_decode_bitstring_to_coefficients(): 1829 | # Thorough test 1830 | for bound in range(2, modulus_for_testing // 2, modulus_for_testing // (2 ** 5)): 1831 | for weight in range(1, degree_for_testing, degree_for_testing // (2 ** 5)): 1832 | expected_result = list() 1833 | while len(expected_result) < weight: 1834 | next_int_sign = 2 * randbits(1) - 1 # pick a random sign +/- 1 1835 | next_int_mag = 1 + randbelow(bound) # ceil(log2(bound)) bits to define a magnitude 1, 2, ..., bound 1836 | expected_result += [next_int_sign * next_int_mag] # put into list 1837 | num_bits_to_sample_sign_without_bias = 1 1838 | num_bits_to_sample_mag_without_bias = ceil(log2(bound)) + secpar4testing 1839 | num_bits_per_coef = num_bits_to_sample_mag_without_bias + num_bits_to_sample_sign_without_bias 1840 | num_bits = weight * num_bits_per_coef 1841 | num_bits = ceil(num_bits/8)*8 1842 | vals = list() 1843 | for i in expected_result: 1844 | tmp = bin(int(i > 0))[2:] + bin(abs(i) - 1)[2:].zfill(num_bits_per_coef - 1) 1845 | vals += [tmp] 1846 | assert len(vals) == weight 1847 | for i in vals: 1848 | assert len(i) == num_bits_per_coef 1849 | assert sum(len(i) for i in vals) <= num_bits 1850 | merged_vals = ''.join(vals) 1851 | merged_vals += bin(0)[2:].zfill(num_bits - len(merged_vals)) 1852 | bits_to_decode = ceil(log2(bound)) + 1 + secpar4testing 1853 | # assert len(merged_vals) == ceil(weight * bits_to_decode/8)*8 1854 | observed_result = decode2coefs( 1855 | secpar=secpar4testing, lp=lp_for_testing, distribution=UNIFORM_INFINITY_WEIGHT, 1856 | dist_pars={'bd': bound, 'wt': weight}, val=merged_vals, num_coefs=weight, 1857 | btd=bits_to_decode 1858 | ) 1859 | assert expected_result == observed_result 1860 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | lint 4 | py36 5 | py37 6 | py38 7 | py39 8 | py310 9 | pypy3 10 | minversion = 3.14.2 11 | requires = 12 | # https://github.com/tox-dev/tox/issues/765 13 | virtualenv >= 16.7.9 14 | pip >= 19.3.1 15 | 16 | [testenv] 17 | passenv = 18 | LC_ALL 19 | LANG 20 | HOME 21 | ;platform = linux 22 | commands = 23 | pip install -e . 24 | pip install -r requirements-dev.txt 25 | pytest --cov=lattice_algebra 26 | deps = -rrequirements-dev.txt 27 | skip_install = true 28 | 29 | [testenv:lint] 30 | commands = 31 | python -m pre_commit run 32 | deps = pre-commit>=1.20.0 33 | skip_install = true 34 | usedevelop = false 35 | 36 | [testenv:cov-report] 37 | passenv = 38 | LC_ALL 39 | LANG 40 | HOME 41 | commands = 42 | pip install -e . 43 | pytest --cov=lattice_algebra --cov-report=term --cov-report=html --cov-report=xml 44 | deps = -rrequirements-dev.txt 45 | skip_install = true 46 | 47 | [testenv:docs] 48 | passenv = 49 | LC_ALL 50 | LANG 51 | HOME 52 | commands = 53 | python setup.py install 54 | pip install -r requirements-dev.txt 55 | pip install -r docs/requirements.txt 56 | mkdocs 57 | whitelist_externals = make 58 | deps = -rdocs/requirements.txt 59 | 60 | skip_install = true 61 | --------------------------------------------------------------------------------