├── cascade
├── __init__.py
├── tests
│ ├── __init__.py
│ ├── test_mock_classical_channel.py
│ ├── test_reconciliation.py
│ ├── test_key.py
│ ├── test_algorithm.py
│ ├── test_shuffle.py
│ └── test_block.py
├── stats.py
├── classical_channel.py
├── mock_classical_channel.py
├── key.py
├── algorithm.py
├── shuffle.py
└── block.py
├── doc8.ini
├── .vscode
├── spellright.dict
├── launch.json
└── settings.json
├── docs
└── source
│ ├── requirements.txt
│ ├── figures
│ ├── figures.pptx
│ ├── iterations.png
│ ├── shuffle-key.png
│ ├── openssl-logo.png
│ ├── binary-recursion.png
│ ├── input-and-output.png
│ ├── split-block-plr.png
│ ├── top-level-blocks.png
│ ├── end-of-iteration-n.png
│ ├── error-parity-table.png
│ ├── qkd-alice-bob-eve.png
│ ├── cascade-effect-after.png
│ ├── cascade-effect-before.png
│ ├── correct-key-noisy-key.png
│ ├── left-even-right-odd.png
│ ├── left-odd-right-even.png
│ ├── qkd-phases-and-steps.png
│ ├── reply-parity-message.png
│ ├── shuffle-per-iteration.png
│ ├── compute-current-parity.png
│ ├── ask-parity-message-naive.png
│ ├── end-of-iteration-n-plus-1.png
│ ├── architecture-engine-mock-qkd.png
│ ├── compute-correct-parity-naive.png
│ ├── compute-correct-parity-better.png
│ ├── demystifying-figure-1-original.png
│ ├── demystifying-figure-10-original.png
│ ├── demystifying-figure-11-original.png
│ ├── demystifying-figure-12-original.png
│ ├── demystifying-figure-13-original.png
│ ├── demystifying-figure-2-original.png
│ ├── demystifying-figure-3-original.png
│ ├── demystifying-figure-4-original.png
│ ├── demystifying-figure-5-original.png
│ ├── demystifying-figure-6-original.png
│ ├── demystifying-figure-7-original.png
│ ├── demystifying-figure-8-original.png
│ ├── demystifying-figure-9-original.png
│ ├── peer-to-peer-vs-client-server.png
│ ├── demystifying-figure-1-reproduced.png
│ ├── demystifying-figure-10-reproduced.png
│ ├── demystifying-figure-11-reproduced.png
│ ├── demystifying-figure-13-reproduced.png
│ ├── demystifying-figure-2-reproduced.png
│ ├── demystifying-figure-3-reproduced.png
│ ├── demystifying-figure-4-reproduced.png
│ ├── demystifying-figure-5-reproduced.png
│ ├── demystifying-figure-8-reproduced.png
│ ├── demystifying-figure-9-reproduced.png
│ ├── compute-current-parity-highlighted.png
│ ├── andre-reis-thesis-figure-5-1-original.png
│ ├── andre-reis-thesis-figure-5-10-original.png
│ ├── andre-reis-thesis-figure-5-2-original.png
│ ├── andre-reis-thesis-figure-5-3-original.png
│ ├── andre-reis-thesis-figure-5-4-original.png
│ ├── andre-reis-thesis-figure-5-5-original.png
│ ├── andre-reis-thesis-figure-5-6-original.png
│ ├── andre-reis-thesis-figure-5-7-original.png
│ ├── andre-reis-thesis-figure-5-8-original.png
│ ├── andre-reis-thesis-figure-5-9-original.png
│ ├── demystifying-figure-11-reproduced.png.png
│ ├── andre-reis-thesis-figure-5-1-reproduced.png
│ ├── andre-reis-thesis-figure-5-2-reproduced.png
│ ├── andre-reis-thesis-figure-5-3-reproduced.png
│ ├── andre-reis-thesis-figure-5-5a-reproduced.png
│ ├── andre-reis-thesis-figure-5-5b-reproduced.png
│ └── pan-european-quantum-internet-hackathon.png
│ ├── index.rst
│ ├── conf.py
│ ├── comparison-conclusions.rst
│ ├── references.rst
│ ├── implementation.rst
│ ├── intro.rst
│ └── comparison.rst
├── profile.png
├── .travis.yml
├── study
├── experiments_profile.json
├── experiments_zero_handling.json
├── experiments_performance.json
├── graphs_zero_handling.json
├── experiments_papers.json
├── aggregate_stats.py
├── data_point.py
├── graphs_performance.json
├── make_graphs.py
├── graphs_andre_reis_thesis.json
├── run_experiments.py
└── graphs_demystifying.json
├── .readthedocs.yaml
├── pylintrc
├── LICENSE
├── README.rst
├── requirements.txt
├── .gitignore
└── Makefile
/cascade/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/cascade/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/doc8.ini:
--------------------------------------------------------------------------------
1 | [doc8]
2 | max-line-length=999999
--------------------------------------------------------------------------------
/.vscode/spellright.dict:
--------------------------------------------------------------------------------
1 | fixme
2 | kwargs
3 |
--------------------------------------------------------------------------------
/docs/source/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx-rtd-theme
--------------------------------------------------------------------------------
/profile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/profile.png
--------------------------------------------------------------------------------
/docs/source/figures/figures.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/figures.pptx
--------------------------------------------------------------------------------
/docs/source/figures/iterations.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/iterations.png
--------------------------------------------------------------------------------
/docs/source/figures/shuffle-key.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/shuffle-key.png
--------------------------------------------------------------------------------
/docs/source/figures/openssl-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/openssl-logo.png
--------------------------------------------------------------------------------
/docs/source/figures/binary-recursion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/binary-recursion.png
--------------------------------------------------------------------------------
/docs/source/figures/input-and-output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/input-and-output.png
--------------------------------------------------------------------------------
/docs/source/figures/split-block-plr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/split-block-plr.png
--------------------------------------------------------------------------------
/docs/source/figures/top-level-blocks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/top-level-blocks.png
--------------------------------------------------------------------------------
/docs/source/figures/end-of-iteration-n.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/end-of-iteration-n.png
--------------------------------------------------------------------------------
/docs/source/figures/error-parity-table.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/error-parity-table.png
--------------------------------------------------------------------------------
/docs/source/figures/qkd-alice-bob-eve.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/qkd-alice-bob-eve.png
--------------------------------------------------------------------------------
/docs/source/figures/cascade-effect-after.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/cascade-effect-after.png
--------------------------------------------------------------------------------
/docs/source/figures/cascade-effect-before.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/cascade-effect-before.png
--------------------------------------------------------------------------------
/docs/source/figures/correct-key-noisy-key.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/correct-key-noisy-key.png
--------------------------------------------------------------------------------
/docs/source/figures/left-even-right-odd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/left-even-right-odd.png
--------------------------------------------------------------------------------
/docs/source/figures/left-odd-right-even.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/left-odd-right-even.png
--------------------------------------------------------------------------------
/docs/source/figures/qkd-phases-and-steps.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/qkd-phases-and-steps.png
--------------------------------------------------------------------------------
/docs/source/figures/reply-parity-message.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/reply-parity-message.png
--------------------------------------------------------------------------------
/docs/source/figures/shuffle-per-iteration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/shuffle-per-iteration.png
--------------------------------------------------------------------------------
/docs/source/figures/compute-current-parity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/compute-current-parity.png
--------------------------------------------------------------------------------
/docs/source/figures/ask-parity-message-naive.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/ask-parity-message-naive.png
--------------------------------------------------------------------------------
/docs/source/figures/end-of-iteration-n-plus-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/end-of-iteration-n-plus-1.png
--------------------------------------------------------------------------------
/docs/source/figures/architecture-engine-mock-qkd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/architecture-engine-mock-qkd.png
--------------------------------------------------------------------------------
/docs/source/figures/compute-correct-parity-naive.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/compute-correct-parity-naive.png
--------------------------------------------------------------------------------
/docs/source/figures/compute-correct-parity-better.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/compute-correct-parity-better.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-1-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-1-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-10-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-10-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-11-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-11-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-12-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-12-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-13-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-13-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-2-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-2-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-3-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-3-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-4-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-4-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-5-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-5-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-6-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-6-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-7-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-7-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-8-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-8-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-9-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-9-original.png
--------------------------------------------------------------------------------
/docs/source/figures/peer-to-peer-vs-client-server.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/peer-to-peer-vs-client-server.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-1-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-1-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-10-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-10-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-11-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-11-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-13-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-13-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-2-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-2-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-3-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-3-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-4-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-4-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-5-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-5-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-8-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-8-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-9-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-9-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/compute-current-parity-highlighted.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/compute-current-parity-highlighted.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-1-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-1-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-10-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-10-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-2-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-2-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-3-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-3-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-4-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-4-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-5-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-5-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-6-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-6-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-7-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-7-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-8-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-8-original.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-9-original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-9-original.png
--------------------------------------------------------------------------------
/docs/source/figures/demystifying-figure-11-reproduced.png.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/demystifying-figure-11-reproduced.png.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-1-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-1-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-2-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-2-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-3-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-3-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-5a-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-5a-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/andre-reis-thesis-figure-5-5b-reproduced.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/andre-reis-thesis-figure-5-5b-reproduced.png
--------------------------------------------------------------------------------
/docs/source/figures/pan-european-quantum-internet-hackathon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brunorijsman/cascade-python/HEAD/docs/source/figures/pan-european-quantum-internet-hackathon.png
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - "3.7"
4 | install:
5 | - virtualenv env --python=python3
6 | - source env/bin/activate
7 | - pip install -r requirements.txt
8 | script:
9 | - make pre-commit
10 | after_success:
11 | - codecov
12 |
--------------------------------------------------------------------------------
/study/experiments_profile.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "independent_variable": "key_size",
4 | "algorithm": ["original", "yanetal", "biconf", "option7"],
5 | "error_rate": 0.02,
6 | "key_size": [1000, 10000],
7 | "runs": 10
8 | }
9 | ]
10 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Based on https://docs.readthedocs.io/en/stable/config-file/v2.html
2 |
3 | version: 2
4 |
5 | build:
6 | os: ubuntu-22.04
7 | tools:
8 | python: "3.11"
9 |
10 | sphinx:
11 | configuration: docs/source/conf.py
12 |
13 | python:
14 | install:
15 | - requirements: docs/source/requirements.txt
16 |
--------------------------------------------------------------------------------
/study/experiments_zero_handling.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "independent_variable": "error_rate",
4 | "algorithm": ["original", "biconf"],
5 | "error_rate": [
6 | {"start": 0.000, "end": 0.005, "step_size": 0.00005},
7 | {"start": 0.005, "end": 0.050, "step_size": 0.00050}
8 | ],
9 | "key_size": 10000,
10 | "runs": 100
11 | }
12 | ]
13 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 |
8 |
9 | {
10 | "name": "Reconciliation test",
11 | "type": "python",
12 | "request": "launch",
13 | "program": "${workspaceFolder}/env/bin/pytest",
14 | "args": ["-v", "-s", "-x", "cascade/tests/test_reconciliation.py"]
15 | },
16 | ]
17 | }
--------------------------------------------------------------------------------
/study/experiments_performance.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "independent_variable": "error_rate",
4 | "algorithm": ["original", "yanetal", "biconf", "option7"],
5 | "error_rate": [0.00, 0.01, 0.02, 0.05, 0.10],
6 | "key_size": 10000,
7 | "runs": 10
8 | },
9 | {
10 | "independent_variable": "key_size",
11 | "algorithm": ["original", "yanetal", "biconf", "option7"],
12 | "error_rate": 0.02,
13 | "key_size": {
14 | "start": 1e3,
15 | "end": 1e5,
16 | "step_factor": 1.3
17 | },
18 | "runs": 10
19 | }
20 | ]
21 |
--------------------------------------------------------------------------------
/pylintrc:
--------------------------------------------------------------------------------
1 | [MESSAGES CONTROL]
2 |
3 | disable=
4 | missing-docstring,
5 | invalid-name,
6 | too-many-arguments,
7 | too-many-locals,
8 | too-many-branches,
9 | too-many-statements,
10 | too-many-instance-attributes,
11 | too-few-public-methods,
12 | too-many-public-methods,
13 | too-many-function-args,
14 | too-many-positional-arguments,
15 | fixme,
16 | duplicate-code,
17 | wildcard-import,
18 | unused-wildcard-import,
19 | global-statement,
20 | use-implicit-booleaness-not-comparison,
21 | consider-using-max-builtin,
22 | unnecessary-dunder-call,
23 | use-dict-literal,
24 | global-variable-not-assigned,
25 | consider-using-with,
26 |
--------------------------------------------------------------------------------
/cascade/stats.py:
--------------------------------------------------------------------------------
1 | class Stats:
2 | """
3 | Stats of a single reconciliation.
4 | """
5 |
6 | def __init__(self):
7 | """
8 | Create a new stats block with all counters initialized to zero.
9 | """
10 | self.elapsed_process_time = None
11 | self.elapsed_real_time = None
12 | self.normal_iterations = 0
13 | self.biconf_iterations = 0
14 | self.ask_parity_messages = 0
15 | self.ask_parity_blocks = 0
16 | self.ask_parity_bits = 0
17 | self.reply_parity_bits = 0
18 | self.reconciliation_bits = 0
19 | self.reconciliation_bits_per_key_bit = None
20 | self.efficiency = None
21 | self.infer_parity_blocks = 0
22 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to Cascade-Python's documentation.
2 | ==========================================
3 |
4 | An implementation of the Cascade QKD information reconciliation protocol by `Bruno Rijsman `_
5 |
6 | See also:
7 |
8 | * `Blogpost on Cascade `_
9 |
10 | * `C++ implementation of Cascade `_
11 |
12 | .. toctree::
13 | :maxdepth: 2
14 | :caption: Contents:
15 |
16 | intro.rst
17 |
18 | protocol.rst
19 |
20 | comparison.rst
21 |
22 | comparison-conclusions.rst
23 |
24 | references.rst
25 |
26 | Indices and tables
27 | ==================
28 |
29 | * :ref:`genindex`
30 | * :ref:`modindex`
31 | * :ref:`search`
32 |
--------------------------------------------------------------------------------
/study/graphs_zero_handling.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "graph_name": "zero_handling",
4 | "title": "Zero handling",
5 | "x_axis": {
6 | "title": "Error rate",
7 | "variable": "requested_bit_error_rate"
8 | },
9 | "y_axis": {
10 | "title": "Efficiency",
11 | "variable": "efficiency",
12 | "range": [0.5, 2.0]
13 | },
14 | "series": [
15 | {
16 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
17 | "legend": "original",
18 | "line_color": "black",
19 | "deviation_color": "lightgray"
20 | },
21 | {
22 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
23 | "legend": "biconf",
24 | "line_color": "red",
25 | "deviation_color": "lightsalmon"
26 | }
27 | ]
28 | }
29 | ]
--------------------------------------------------------------------------------
/study/experiments_papers.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "independent_variable": "error_rate",
4 | "algorithm": "all",
5 | "error_rate": [
6 | {"start": 0.0000, "end": 0.0050, "step_size": 0.00005},
7 | {"start": 0.0050, "end": 0.1100, "step_size": 0.00050}
8 | ],
9 | "key_size": [1000, 2000, 10000],
10 | "runs": 1000
11 | },
12 | {
13 | "independent_variable": "key_size",
14 | "algorithm": "all",
15 | "error_rate": 0.05,
16 | "key_size": {
17 | "start": 1e3,
18 | "end": 1e5,
19 | "step_factor": 1.05
20 | },
21 | "runs": 1000
22 | },
23 | {
24 | "independent_variable": "key_size",
25 | "algorithm": "original",
26 | "error_rate": [0.01, 0.02],
27 | "key_size": {
28 | "start": 1e3,
29 | "end": 1e5,
30 | "step_factor": 1.05
31 | },
32 | "runs": 1000
33 | }
34 | ]
35 |
--------------------------------------------------------------------------------
/cascade/classical_channel.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 | class ClassicalChannel(ABC):
4 | """
5 | An abstract base class that abstracts the interactions that Bob has with Alice over the
6 | classical channel.
7 | """
8 |
9 | @abstractmethod
10 | def start_reconciliation(self):
11 | """
12 | Bob tells Alice that he is starting a new Cascade reconciliation.
13 | """
14 |
15 | @abstractmethod
16 | def end_reconciliation(self):
17 | """
18 | Bob tells Alice that he is finished with a Cascade reconciliation.
19 | """
20 |
21 | @abstractmethod
22 | def ask_parities(self, blocks):
23 | """
24 | Bob asks Alice to compute the parities for a list of blocks.
25 |
26 | Params:
27 | blocks (list): A list of blocks for which the ask the parities.
28 |
29 | Returns:
30 | parities (list): A list of parities, where each parity is an int value 0 or 1. The list
31 | of parities must be in the same order as the list of blocks.
32 | """
33 |
--------------------------------------------------------------------------------
/cascade/mock_classical_channel.py:
--------------------------------------------------------------------------------
1 | from cascade.classical_channel import ClassicalChannel
2 |
3 | class MockClassicalChannel(ClassicalChannel):
4 | """
5 | A mock concrete implementation of the ClassicalChannel base class, which is used for the
6 | experiments.
7 | """
8 |
9 | def __init__(self, correct_key):
10 | self._correct_key = correct_key
11 | self._id_to_shuffle = {}
12 | self._reconciliation_started = False
13 |
14 | def start_reconciliation(self):
15 | self._reconciliation_started = True
16 |
17 | def end_reconciliation(self):
18 | self._reconciliation_started = False
19 | self._id_to_shuffle = {}
20 |
21 | def ask_parities(self, blocks):
22 | parities = []
23 | for block in blocks:
24 | shuffle = block.get_shuffle()
25 | start_index = block.get_start_index()
26 | end_index = block.get_end_index()
27 | parity = shuffle.calculate_parity(self._correct_key, start_index, end_index)
28 | parities.append(parity)
29 | return parities
30 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Bruno Rijsman
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | .. image:: https://travis-ci.org/brunorijsman/cascade-python.svg?branch=master
2 | :target: https://travis-ci.org/brunorijsman/cascade-python
3 |
4 | .. image:: https://codecov.io/gh/brunorijsman/cascade-python/branch/master/graph/badge.svg
5 | :target: https://codecov.io/gh/brunorijsman/cascade-python
6 |
7 | .. image:: https://readthedocs.org/projects/cascade-python/badge/?version=latest
8 | :target: https://cascade-python.readthedocs.io/en/latest/
9 |
10 | **************
11 | cascade-python
12 | **************
13 |
14 | This Github project has been superseded by Github project `cascadecpp`_, which is a port of
15 | the Python code in this repository to C++. The
16 | C++ code runs about 100x faster because C++ is faster than Python but mostly because the C++
17 | code has been much more heavily optimized. Also, the C++ code is more reliable because it has
18 | been more carefully debugged. I recommend that you read the documentation in the link below
19 | for an introduction to the Cascade algorithm. But when it comes to running code, I recommend
20 | the C++ code over the Python code.
21 |
22 | Cascade information reconciliation protocol for Quantum Key Distribution (QKD).
23 |
24 | See `documentation`_.
25 |
26 | .. _documentation: https://cascade-python.readthedocs.io/en/latest/
27 |
28 | .. _cascadecpp: https://github.com/brunorijsman/cascade-cpp
29 |
--------------------------------------------------------------------------------
/study/aggregate_stats.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | class AggregateStats:
4 |
5 | def __init__(self):
6 | self._count = 0
7 | self._sum = 0
8 | self._square_sum = 0
9 |
10 | def record_value(self, value):
11 | if value is not None:
12 | self._count += 1
13 | self._sum += value
14 | self._square_sum += value * value
15 |
16 | def average(self):
17 | if self._count == 0:
18 | return math.nan
19 | return self._sum / self._count
20 |
21 | def deviation(self):
22 | """
23 | Compute the corrected standard deviation.
24 | See https://en.wikipedia.org/wiki/Bessel%27s_correction.
25 |
26 | Returns:
27 | The corrected standard deviation, or NaN if there are less than 2 samples.
28 | """
29 | if self._count < 2:
30 | return math.nan
31 | variance = self._square_sum / (self._count - 1)
32 | variance -= self._sum ** 2 / ((self._count - 1) * self._count)
33 | # Variance can up being some vary small negative number due to rounding errors
34 | if variance <= 0.0:
35 | variance = 0.0
36 | deviation = math.sqrt(variance)
37 | return deviation
38 |
39 | def to_json_encodeable_object(self):
40 | return {'average': self.average(), 'deviation': self.deviation(), 'count': self._count}
41 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | alabaster==1.0.0
2 | astroid==3.3.8
3 | attrs==25.1.0
4 | babel==2.17.0
5 | certifi==2025.1.31
6 | chardet==5.2.0
7 | charset-normalizer==3.4.1
8 | codecov==2.1.13
9 | coverage==7.6.12
10 | dill==0.3.9
11 | doc8==1.1.2
12 | docutils==0.21.2
13 | gitdb==4.0.12
14 | gitdb2==4.0.2
15 | GitPython==3.1.44
16 | gprof2dot==2024.6.6
17 | idna==3.10
18 | imagesize==1.4.1
19 | importlib_metadata==8.6.1
20 | iniconfig==2.0.0
21 | isort==6.0.0
22 | Jinja2==3.1.5
23 | lazy-object-proxy==1.10.0
24 | MarkupSafe==3.0.2
25 | mccabe==0.7.0
26 | more-itertools==10.6.0
27 | narwhals==1.27.1
28 | packaging==24.2
29 | pbr==6.1.1
30 | platformdirs==4.3.6
31 | plotly==6.0.0
32 | pluggy==1.5.0
33 | py==1.11.0
34 | Pygments==2.19.1
35 | pylint==3.3.4
36 | pyparsing==3.2.1
37 | pytest==8.3.4
38 | pytest-cov==6.0.0
39 | pytoolconfig==1.3.1
40 | pytz==2025.1
41 | requests==2.32.3
42 | restructuredtext_lint==1.4.0
43 | retrying==1.3.4
44 | rope==1.13.0
45 | six==1.17.0
46 | smmap==5.0.2
47 | smmap2==3.0.1
48 | snowballstemmer==2.2.0
49 | Sphinx==8.1.3
50 | sphinx-rtd-theme==3.0.2
51 | sphinxcontrib-applehelp==2.0.0
52 | sphinxcontrib-devhelp==2.0.0
53 | sphinxcontrib-htmlhelp==2.1.0
54 | sphinxcontrib-jquery==4.1
55 | sphinxcontrib-jsmath==1.0.1
56 | sphinxcontrib-qthelp==2.0.0
57 | sphinxcontrib-serializinghtml==2.0.0
58 | stevedore==5.4.0
59 | tomlkit==0.13.2
60 | typed-ast==1.5.5
61 | urllib3==2.3.0
62 | wcwidth==0.2.13
63 | wrapt==1.17.2
64 | zipp==3.21.0
65 |
--------------------------------------------------------------------------------
/cascade/tests/test_mock_classical_channel.py:
--------------------------------------------------------------------------------
1 | from cascade.block import Block
2 | from cascade.mock_classical_channel import MockClassicalChannel
3 | from cascade.key import Key
4 | from cascade.shuffle import Shuffle
5 |
6 | def test_create_mock_classical_channel():
7 | Key.set_random_seed(1)
8 | correct_key = Key.create_random_key(32)
9 | _channel = MockClassicalChannel(correct_key)
10 |
11 | def test_start_and_end_reconciliation():
12 | Key.set_random_seed(2)
13 | correct_key = Key.create_random_key(32)
14 | channel = MockClassicalChannel(correct_key)
15 | channel.start_reconciliation()
16 | channel.end_reconciliation()
17 |
18 | def test_ask_parities():
19 | Key.set_random_seed(3)
20 | Shuffle.set_random_seed(77716)
21 | correct_key = Key.create_random_key(32)
22 | shuffle = Shuffle(correct_key.get_size(), Shuffle.SHUFFLE_RANDOM)
23 | blocks = Block.create_covering_blocks(correct_key, shuffle, 8)
24 | assert len(blocks) == 4
25 | assert blocks[0].__str__() == "01010011"
26 | assert blocks[1].__str__() == "01011100"
27 | assert blocks[2].__str__() == "10110001"
28 | assert blocks[3].__str__() == "01001110"
29 | channel = MockClassicalChannel(correct_key)
30 | channel.start_reconciliation()
31 | parities = channel.ask_parities(blocks)
32 | assert parities[0] == 0
33 | assert parities[1] == 0
34 | assert parities[2] == 0
35 | assert parities[3] == 0
36 | channel.end_reconciliation()
37 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 |
3 | import os
4 | import subprocess
5 | import sys
6 |
7 | sys.path.insert(0, os.path.abspath('../..'))
8 |
9 | project = 'Cascade-Python'
10 | # pylint:disable=redefined-builtin
11 | copyright = '2020-2025, Bruno Rijsman'
12 | author = 'Bruno Rijsman'
13 | release = '0.0.2'
14 |
15 | extensions = [
16 | 'sphinx.ext.napoleon',
17 | 'sphinx.ext.autodoc',
18 | 'sphinx.ext.todo'
19 | ]
20 |
21 | templates_path = []
22 | exclude_patterns = []
23 |
24 | html_theme = 'sphinx_rtd_theme'
25 |
26 | autodoc_default_options = {
27 | 'special-members': '__init__, __repr__, __str__',
28 | }
29 |
30 | def is_venv():
31 | return (hasattr(sys, 'real_prefix') or
32 | (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))
33 |
34 | def run_apidoc(_):
35 | modules = ['cascade']
36 | for module in modules:
37 | cur_dir = os.path.abspath(os.path.dirname(__file__))
38 | module = os.path.join(cur_dir, '..', '..', module)
39 | output_dir = os.path.join(cur_dir, '_modules')
40 | apidoc = 'sphinx-apidoc'
41 | if is_venv():
42 | apidoc = os.path.abspath(os.path.join(sys.prefix, 'bin', 'sphinx-apidoc'))
43 | exclude_pattern = f"`find {module} -name tests`"
44 | cmd = f"{apidoc} -f -e -o {output_dir} {module} {exclude_pattern}"
45 | print(f"**** module={module} output_dir={output_dir} cmd={cmd}")
46 | subprocess.check_call(cmd, shell=True)
47 |
48 | def setup(app):
49 | app.connect('builder-inited', run_apidoc)
50 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | # sphinx-apidoc generated files
107 | docs/source/_build/*
108 | docs/source/_modules/*
109 | docs/build/*
110 |
111 | # Open powerpoint file
112 | ~$*.pptx
113 |
114 | # Profile output (but we do store the .png in version control)
115 | profile.out
116 |
--------------------------------------------------------------------------------
/study/data_point.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from aggregate_stats import AggregateStats
4 |
5 | class DataPoint:
6 |
7 | def __init__(self, algorithm_name, key_size, requested_bit_error_rate, code_version):
8 | self.execution_time = time.strftime('%Y-%m-%d %H:%M:%S %Z')
9 | self.algorithm_name = algorithm_name
10 | self.key_size = key_size
11 | self.requested_bit_error_rate = requested_bit_error_rate
12 | self.code_version = code_version
13 | self.reconciliations = 0
14 | self.actual_bit_errors = AggregateStats()
15 | self.actual_bit_error_rate = AggregateStats()
16 | self.elapsed_process_time = AggregateStats()
17 | self.elapsed_real_time = AggregateStats()
18 | self.normal_iterations = AggregateStats()
19 | self.biconf_iterations = AggregateStats()
20 | self.ask_parity_messages = AggregateStats()
21 | self.ask_parity_blocks = AggregateStats()
22 | self.ask_parity_bits = AggregateStats()
23 | self.reply_parity_bits = AggregateStats()
24 | self.reconciliation_bits_per_key_bit = AggregateStats()
25 | self.efficiency = AggregateStats()
26 | self.infer_parity_blocks = AggregateStats()
27 | self.remaining_bit_errors = AggregateStats()
28 | self.remaining_bit_error_rate = AggregateStats()
29 | self.remaining_frame_error_rate = AggregateStats()
30 |
31 | def record_reconciliation_stats(self, stats):
32 | self.reconciliations += 1
33 | self.elapsed_process_time.record_value(stats.elapsed_process_time)
34 | self.elapsed_real_time.record_value(stats.elapsed_real_time)
35 | self.normal_iterations.record_value(stats.normal_iterations)
36 | self.biconf_iterations.record_value(stats.biconf_iterations)
37 | self.ask_parity_messages.record_value(stats.ask_parity_messages)
38 | self.ask_parity_blocks.record_value(stats.ask_parity_blocks)
39 | self.ask_parity_bits.record_value(stats.ask_parity_bits)
40 | self.reply_parity_bits.record_value(stats.reply_parity_bits)
41 | self.reconciliation_bits_per_key_bit.record_value(stats.reconciliation_bits_per_key_bit)
42 | self.efficiency.record_value(stats.efficiency)
43 | self.infer_parity_blocks.record_value(stats.infer_parity_blocks)
44 |
--------------------------------------------------------------------------------
/docs/source/comparison-conclusions.rst:
--------------------------------------------------------------------------------
1 | *******************************************************
2 | Conclusions from Comparison of Results with Literature.
3 | *******************************************************
4 |
5 | Here we summarize some of the differences that we observed in the reproduced figures as compared to the literature.
6 |
7 | Less detail.
8 | ------------
9 |
10 | The graphs in the "demystifying the Information Reconciliation Protocol Cascade” paper are much more detailed because they executed 10,000 (or sometimes even 100,000 or 1,000,000) runs per data point, whereas we only ran 1,000 runs per data point. Our Python implementation was is not fast enough to execute more than 1,000 runs per data point. It already took us about 5 days to run all the experiments on 2xlarge m5 instance on AWS with just 1,000 runs per data point. We are in the process of re-implementing Cascade in C++ which will hopefully allow for more runs per data point.
11 |
12 | Standard deviation.
13 | -------------------
14 |
15 | The graphs in the original papers do not have any indication of the standard deviation (i.e. no error bars). We have that information for all experiments, although we don't show it in all graphs - we omit it if it would make the graph too noisy.
16 |
17 | Differences in the detailed shape of the channel use graph.
18 | -----------------------------------------------------------
19 |
20 | In many respects, most of my reproduced channel use figures match the figures in the original literature quite well. The overall shape matches quite well. The appearance of saw-tooth patterns matches quite well. The numerical value matches quite well.
21 |
22 | In some other aspects, however, there are some striking differences between the original and reproduced channel use figures as well.
23 |
24 | In some of the figures in the original literature (e.g. figure 2 in the demystifying paper) the channel uses graph clearly slopes down as the error rate increases. In my reproduced figures, this downward slope is missing.
25 |
26 | In other figures in the original literature (e.g. the black and green graphs in figure 9 in the demystifying paper) we see a "wave" pattern on top of the "saw tooth" pattern. This "wave" pattern is missing in my reproduced graphs.
27 |
28 | Channel use graph for Cascade opt. (2) is different.
29 | ----------------------------------------------------
30 |
31 | The original channel use graph for algorithm "Cascade opt. (2)" is quite different from the reproduced graph: the original values are higher and have much more wildly swinging saw-teeth.
32 |
--------------------------------------------------------------------------------
/study/graphs_performance.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "graph_name": "performance_vs_error_rate",
4 | "title": "Reconciliation processing time vs error rate (key size = 10,000)",
5 | "x_axis": {
6 | "title": "Error rate",
7 | "variable": "requested_bit_error_rate",
8 | "range": [0.0, 0.1]
9 | },
10 | "y_axis": {
11 | "title": "Process time per data point (seconds)",
12 | "variable": "elapsed_process_time"
13 | },
14 | "series": [
15 | {
16 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
17 | "legend": "original",
18 | "line_color": "black",
19 | "deviation_color": "lightgray"
20 | },
21 | {
22 | "data_file": "data__algorithm=yanetal;key_size=10000;error_rate=vary",
23 | "legend": "yanetal",
24 | "line_color": "blue",
25 | "deviation_color": "lightblue"
26 | },
27 | {
28 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
29 | "legend": "biconf",
30 | "line_color": "red",
31 | "deviation_color": "lightsalmon"
32 | },
33 | {
34 | "data_file": "data__algorithm=option7;key_size=10000;error_rate=vary",
35 | "legend": "option7",
36 | "line_color": "green",
37 | "deviation_color": "lightgreen"
38 | }
39 | ]
40 | },
41 | {
42 | "graph_name": "performance_vs_key_size",
43 | "title": "Reconciliation processing time vs key size (error rate = 0.02)",
44 | "x_axis": {
45 | "title": "Key size",
46 | "variable": "key_size"
47 | },
48 | "y_axis": {
49 | "title": "Process time per data point (seconds)",
50 | "variable": "elapsed_process_time"
51 | },
52 | "series": [
53 | {
54 | "data_file": "data__algorithm=original;key_size=vary;error_rate=0.02",
55 | "legend": "original",
56 | "line_color": "black",
57 | "deviation_color": "lightgray"
58 | },
59 | {
60 | "data_file": "data__algorithm=yanetal;key_size=vary;error_rate=0.02",
61 | "legend": "yanetal",
62 | "line_color": "blue",
63 | "deviation_color": "lightblue"
64 | },
65 | {
66 | "data_file": "data__algorithm=biconf;key_size=vary;error_rate=0.02",
67 | "legend": "biconf",
68 | "line_color": "red",
69 | "deviation_color": "lightsalmon"
70 | },
71 | {
72 | "data_file": "data__algorithm=option7;key_size=vary;error_rate=0.02",
73 | "legend": "option7",
74 | "line_color": "green",
75 | "deviation_color": "lightgreen"
76 | }
77 | ]
78 | }
79 | ]
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.pythonPath": "env/bin/python",
3 | "cSpell.words": [
4 | "André",
5 | "andrebreis",
6 | "Arial",
7 | "Avenir",
8 | "Ayuso",
9 | "basis",
10 | "Baumgartner",
11 | "bgcolor",
12 | "biconf",
13 | "Brochmann",
14 | "Calver",
15 | "Christoph",
16 | "Ciurana",
17 | "codecov",
18 | "cqclib",
19 | "criterium",
20 | "dichotomic",
21 | "Diffie",
22 | "dunder",
23 | "encodeable",
24 | "Engenharia",
25 | "Erven",
26 | "ETSI",
27 | "exponentformat",
28 | "Faculdade",
29 | "genindex",
30 | "Gigov",
31 | "Gilles",
32 | "Golay",
33 | "gprof",
34 | "gridcolor",
35 | "Grimaila",
36 | "Hackathon",
37 | "hadamard",
38 | "hexsha",
39 | "HMAC",
40 | "hoverinfo",
41 | "htmlcov",
42 | "Humphries",
43 | "ICNC",
44 | "imap",
45 | "inited",
46 | "interoperate",
47 | "IRTF",
48 | "Janabi",
49 | "Janssen",
50 | "Jesús",
51 | "Jiang",
52 | "Keuter",
53 | "Kozlowski",
54 | "kwargs",
55 | "LDPC",
56 | "lightblue",
57 | "lightgray",
58 | "lightgreen",
59 | "lightred",
60 | "lightsalmon",
61 | "linecolor",
62 | "Martín",
63 | "Martínez",
64 | "Mateo",
65 | "maxdepth",
66 | "mdskrzypczyk",
67 | "modindex",
68 | "Momtchil",
69 | "Mustafa",
70 | "Nagle",
71 | "Nikolay",
72 | "openssl",
73 | "Pacher",
74 | "Parallelization",
75 | "Pedersen",
76 | "Peev",
77 | "plotly",
78 | "pstats",
79 | "pycache",
80 | "pylint",
81 | "pytest",
82 | "PYTHONPATH",
83 | "qber",
84 | "QIRG",
85 | "qron",
86 | "qubit",
87 | "qubits",
88 | "Rabiee",
89 | "recurse",
90 | "recurses",
91 | "recursing",
92 | "reis",
93 | "Rijsman",
94 | "Ruqaya",
95 | "serie",
96 | "showexponent",
97 | "showgrid",
98 | "showlegend",
99 | "showline",
100 | "showticklabels",
101 | "simula",
102 | "simulaqron",
103 | "Singl",
104 | "slateblue",
105 | "strftime",
106 | "Sufyan",
107 | "tabularcolumns",
108 | "Tian",
109 | "tickfont",
110 | "Tienan",
111 | "toctree",
112 | "toself",
113 | "Toyran",
114 | "Tpng",
115 | "Universidade",
116 | "unshuffled",
117 | "venv",
118 | "virtualenv",
119 | "Wojciech",
120 | "xaxis",
121 | "Xiang",
122 | "Xiaxiang",
123 | "xlarge",
124 | "yanetal",
125 | "yaxis",
126 | "Yung"
127 | ],
128 | "python.linting.pylintEnabled": true,
129 | "python.linting.enabled": true,
130 | "restructuredtext.confPath": "${workspaceFolder}/docs/source",
131 | "restructuredtext.builtDocumentationPath": "${workspaceFolder}/docs/build",
132 | "editor.wordWrapColumn": 100,
133 | "pylint.importStrategy": "fromEnvironment",
134 | "editor.rulers": [
135 | 100
136 | ]
137 | }
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | MAKE_FILE_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
2 | export PYTHONPATH := $(PYTHONPATH):$(MAKE_FILE_DIR)
3 |
4 | pre-commit: lint test
5 | @echo "OK"
6 |
7 | clean:
8 | rm -f .coverage*
9 | rm -f profile.out
10 | rm -rf __pycache__
11 | rm -rf htmlcov
12 | rm -rf .pytest_cache
13 | rm -rf cascade/__pycache__
14 | rm -rf cascade/.pytest_cache
15 | rm -rf cascade/tests/__pycache__
16 | rm -rf study/__pycache__
17 | rm -rf docs/source/_build
18 | rm -rf docs/source/_modules
19 | rm -rf docs/build/*
20 |
21 | data: data-papers data-performance data-zero-handling
22 |
23 | data-papers:
24 | mkdir -p study/data/papers
25 | rm -f study/data/papers/data__*
26 | python study/run_experiments.py study/experiments_papers.json \
27 | --output-dir study/data/papers
28 |
29 | data-papers-subset:
30 | mkdir -p study/data/papers_subset
31 | rm -f study/data/papers_subset/data__*
32 | python study/run_experiments.py study/experiments_papers.json \
33 | --output-dir study/data/papers_subset --max-runs 3
34 |
35 | data-performance:
36 | mkdir -p study/data/performance
37 | rm -f study/data/performance/data__*
38 | python study/run_experiments.py study/experiments_performance.json \
39 | --output-dir study/data/performance
40 |
41 | data-zero-handling:
42 | mkdir -p study/data/zero_handling
43 | rm -f study/data/zero_handling/data__*
44 | python study/run_experiments.py study/experiments_zero_handling.json \
45 | --output-dir study/data/zero_handling
46 |
47 | coverage-open:
48 | open htmlcov/index.html
49 |
50 | docs:
51 | sphinx-build -a docs/source docs/build
52 |
53 | docs-open: docs
54 | open docs/build/index.html
55 |
56 | graphs-papers:
57 | mkdir -p study/graphs/papers
58 | rm -f study/graphs/papers/*.png
59 | python study/make_graphs.py study/graphs_demystifying.json \
60 | --data-dir study/data/papers
61 | python study/make_graphs.py study/graphs_andre_reis_thesis.json \
62 | --data-dir study/data/papers
63 |
64 | graphs-performance:
65 | mkdir -p study/graphs/performance
66 | rm -f study/graphs/performance/*.png
67 | python study/make_graphs.py study/graphs_performance.json \
68 | --data-dir study/data/performance
69 |
70 | graphs-zero-handling:
71 | mkdir -p study/graphs/zero_handling
72 | rm -f study/graphs/zero_handling/*.png
73 | python study/make_graphs.py study/graphs_zero_handling.json \
74 | --data-dir study/data/zero_handling
75 |
76 | install:
77 | pip install -r requirements.txt
78 |
79 | lint:
80 | pylint cascade cascade/tests
81 | pylint study
82 |
83 | profile:
84 | mkdir -p study/data/profile
85 | rm -f study/data/performance/data__*
86 | python -m cProfile -o profile.out study/run_experiments.py --disable-multi-processing \
87 | --output-directory study/data/profile study/experiments_profile.json
88 | python -m gprof2dot -f pstats profile.out | dot -Tpng -o profile.png
89 | open profile.png
90 |
91 | profile-open:
92 | open profile.png
93 |
94 | test:
95 | rm -f .coverage*
96 | pytest -v -s --cov=cascade --cov-report=html --cov-report term cascade/tests
97 |
98 | .PHONY: \
99 | clean \
100 | coverage-open \
101 | data \
102 | data-papers \
103 | data-papers-subset \
104 | data-performance \
105 | data-zero-handling \
106 | docs \
107 | docs-open \
108 | graphs-papers \
109 | graphs-performance \
110 | graphs-zero-handling \
111 | install \
112 | lint \
113 | pre-commit \
114 | profile \
115 | profile-open \
116 | test
117 |
--------------------------------------------------------------------------------
/cascade/tests/test_reconciliation.py:
--------------------------------------------------------------------------------
1 | from cascade.key import Key
2 | from cascade.mock_classical_channel import MockClassicalChannel
3 | from cascade.reconciliation import Reconciliation
4 | from cascade.shuffle import Shuffle
5 |
6 | def create_reconciliation(seed, algorithm, key_size, error_rate):
7 | Key.set_random_seed(seed)
8 | Shuffle.set_random_seed(seed + 1)
9 | correct_key = Key.create_random_key(key_size)
10 | noisy_key = correct_key.copy(error_rate, Key.ERROR_METHOD_EXACT)
11 | mock_classical_channel = MockClassicalChannel(correct_key)
12 | reconciliation = Reconciliation(algorithm, mock_classical_channel, noisy_key, error_rate)
13 | return (reconciliation, correct_key)
14 |
15 | def test_create_reconciliation():
16 | (_reconciliation, _correct_key) = create_reconciliation(1, "original", 32, 0.1)
17 |
18 | def test_get_noisy_key():
19 | (reconciliation, _correct_key) = create_reconciliation(1, "original", 32, 0.1)
20 | assert reconciliation.get_noisy_key().__str__() == "00101111001011111001001010100010"
21 |
22 | def test_get_reconciled_key():
23 | (reconciliation, correct_key) = create_reconciliation(1, "original", 32, 0.1)
24 | assert reconciliation.get_noisy_key().__str__() == "00101111001011111001001010100010"
25 | assert reconciliation.get_reconciled_key() is None
26 | reconciliation.reconcile()
27 | assert reconciliation.get_reconciled_key().__str__() == "00101111001011011001000010100110"
28 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
29 |
30 | def test_reconcile_original():
31 | (reconciliation, correct_key) = create_reconciliation(2, "original", 10000, 0.01)
32 | reconciliation.reconcile()
33 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
34 |
35 | def test_reconcile_biconf():
36 | (reconciliation, correct_key) = create_reconciliation(3, "biconf", 10000, 0.01)
37 | reconciliation.reconcile()
38 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
39 |
40 | def test_reconcile_yanetal():
41 | (reconciliation, correct_key) = create_reconciliation(4, "yanetal", 10000, 0.01)
42 | reconciliation.reconcile()
43 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
44 |
45 | def test_reconcile_option3():
46 | (reconciliation, correct_key) = create_reconciliation(5, "option3", 10000, 0.01)
47 | reconciliation.reconcile()
48 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
49 |
50 | def test_reconcile_option4():
51 | (reconciliation, correct_key) = create_reconciliation(6, "option4", 10000, 0.01)
52 | reconciliation.reconcile()
53 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
54 |
55 | def test_reconcile_option7():
56 | (reconciliation, correct_key) = create_reconciliation(7, "option7", 10000, 0.01)
57 | reconciliation.reconcile()
58 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
59 |
60 | def test_reconcile_option8():
61 | (reconciliation, correct_key) = create_reconciliation(8, "option8", 10000, 0.01)
62 | reconciliation.reconcile()
63 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
64 |
65 | def test_reconcile_zero_errors():
66 | (reconciliation, correct_key) = create_reconciliation(9, "original", 10000, 0.00)
67 | reconciliation.reconcile()
68 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
69 |
70 | def test_reconcile_many_errors():
71 | (reconciliation, correct_key) = create_reconciliation(10, "original", 10000, 0.90)
72 | reconciliation.reconcile()
73 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
74 |
75 | def test_reconcile_tiny_key():
76 | (reconciliation, correct_key) = create_reconciliation(11, "original", 1, 0.01)
77 | reconciliation.reconcile()
78 | assert reconciliation.get_reconciled_key().__str__() == correct_key.__str__()
79 |
--------------------------------------------------------------------------------
/docs/source/references.rst:
--------------------------------------------------------------------------------
1 | ****************
2 | Further reading.
3 | ****************
4 |
5 | Papers.
6 | =======
7 |
8 | `Demystifying the Information Reconciliation Protocol Cascade. `_ *Jesus Martinez-Mateo, Christoph Pacher, Momtchil Peev, Alex Ciurana, and Vicente Martin.* arXiv:1407.3257 [quant-ph], Jul 2014.
9 |
10 | `Towards an Optimal Implementation of Cascade. `_ *Jesús Martínez Mateo, Christoph Pacher, Momtchil Peev, Alex Ciurana Aguilar, Vicente Martín Ayuso.* 2014.
11 |
12 | `Secret-Key Reconciliation by Public Discussion. `_ *Charles H. Bennett and Gilles Brassard.* Proceedings of IEEE International Conference on Computers,Systems and Signal Processing, pages 175–179, Sep 1984.
13 |
14 | `An analysis of error reconciliation protocols used in Quantum Key Distribution systems. `_ *James S Johnson, Michael R Grimaila, Jeffrey W. Humphries, and Gerald B Baumgartner.* The Journal of Defense Modeling & Simulation 12(3):217-227, Jul 2015.
15 |
16 | `High Performance Information Reconciliation for QKD with CASCADE. `_ *Thomas Brochmann Pedersen and Mustafa Toyran.* arXiv:1307.7829 [quant-ph], Jul 2013.
17 |
18 | `Key Reconciliation Techniques in Quantum Key Distribution. `_ *Al-Janabi, Sufyan & Rabiee, Ruqaya.* 2011.
19 |
20 | `Information Reconciliation Protocol in Quantum Key Distribution System. `_ *Hao Yan, Tienan Ren, Xiang Peng, Xiaxiang Lin, Wei Jiang, Tian Liu, and Hong Guo.* Proceedings - 4th International Conference on Natural Computation, ICNC 2008, 2008.
21 |
22 | `A Probabilistic Analysis of BINARY and CASCADE. `_ *Ruth II-Yung Ng.* 2014.
23 |
24 | `Error Reconciliation in Quantum Key Distribution Protocols. `_ *Miralem Mehic, Marcin Niemiec, Harun Šiljak, and Miroslav Vozňák.* Lecture Notes in Computer Science, 2020.
25 |
26 | `Using Cascade in Quantum Key Distribution. `_ *Devashish Tupkary, Norbert Lütkenhaus.* arXiv:2307.00576 [quant-ph], Dec 2023.
27 |
28 | `Cracking the Curious Case of the Cascade Protocol. `_ *Anand Choudhary, Ajay Wasan.* IEEE Access. PP. 1-1. 10.1109/ACCESS.2023.3303392, 2023.
29 |
30 | Thesis.
31 | =======
32 |
33 | `Quantum Key Distribution Post Processing - A study on the Information Reconciliation Cascade Protocol. `_
34 | *André Reis.* Master's Thesis, Faculdade de Engenharia da Universidade do Porto, Jul 2019.
35 |
36 | `On Experimental Quantum Communication and Cryptography. `_ *Chris Erven.* PhD Thesis, University of Waterloo, 2012.
37 |
38 | `Quantum Key Distribution Data Post-Processing with Limited Resources: Towards Satellite-Based Quantum Communication. `_ *Nikolay Gigov.* Master's Thesis, University of Waterloo, 2013.
39 |
40 | `An Empirical Analysis of the Cascade Secret Key Reconciliation Protocol for Quantum Key Distribution. `_ *Timothy I. Calver, Captain, USAF.* Master's Thesis, Department of the Air Force, Air University, Air Force Institute of Technology.
41 |
42 | `Quantum Key Distribution post processing. `_ *Marco Giulio Lorenzo.* Master's Thesis, Politecnico di Torino, 2023.
43 |
44 | Implementations.
45 | ================
46 |
47 | GitHub repository `brunorijsman/cascade-python `_.
48 |
49 | GitHub repository `brunorijsman/cascade-cpp `_.
50 |
51 | GitHub repository `andrebreis/cascade-study `_.
52 |
53 | GitHub repository `mdskrzypczyk/QChat `_.
54 |
55 | GitHub repository `gftea/cascade-protocol `_.
56 |
--------------------------------------------------------------------------------
/docs/source/implementation.rst:
--------------------------------------------------------------------------------
1 | ******************************
2 | Cascade implementation Python.
3 | ******************************
4 |
5 | TODO: Write documentation for implementation.
6 |
7 | A Shuffle object represents a specific permutation (i.e. re-ordering) of key bits.
8 |
9 | Shuffle identifier
10 | ##################
11 |
12 | The concept of a shuffle identifier is used when Bob asks Alice to compute the correct parity for some subset of the bits in a shuffled key. Because the keys are large, this subset over which Alice must compute the correct parity can contain a very large number of bits. And while the subset of bits is a contiguous range in the shuffled key, it is not a contiguous range of bits in the original unshuffled key. For example, if the key size is 10,000 bits, Bob might want Alice to compute the correct parity over some random subset of 5,000 bits in the original unshuffled key.
13 |
14 | Bob could send a message to Alice that explicitly lists all 5,000 key bit indexes for which Alice should compute the correct parity. But that would be extremely costly and inefficient: these messages would be very large, and Bob will be sending many of these messages.
15 |
16 | Instead, Bob uses another more efficient approach based on the shuffle identifiers. Instead of explicitly sending a large number (5,000 in our previous example) of key indexes, Bob sends the following:
17 |
18 | * The identifier of the Shuffle object that Bob used to shuffle the key.
19 |
20 | * The start_shuffle_index and the end_shuffle_index that identifies the contiguous subset of bits in the shuffled key over which Alice must compute the parity.
21 |
22 | * Thus, instead of sending a very large message with thousands of indexes, Bob sends a small message with only three numbers (the shuffle_identifier, the start_shuffle_index, and the end_shuffle_index).
23 |
24 | When Alice receives this message from Bob, she can calculate the correct parity as follows:
25 |
26 | * Alice constructs a Shuffle object from the shuffle_identifier that she received from Bob. This Shuffle object is guaranteed to shuffle the key in exactly the same way as the Shuffle object that Bob used (i.e. guaranteed to create the exact same permutation of key bits).
27 |
28 | * Construction a Shuffle object from a shuffle_identifier is still a somewhat expensive operation (linear in the size of the key, in both space and time). However, Bob will only use a very limited number of Shuffle objects, namely one Shuffle object per iteration in the Cascade algorithm (i.e. typically between 4 and 14 Shuffle objects, depending on the exact variation of the Cascade algorithm). Thus, Alice will only see a small number of unique shuffle_identifier values and she can create a cache of Shuffle objects, indexed by the shuffle_identifier.
29 |
30 | * Once Alice has created the Shuffle object (based on the shuffle_identifier) she can re-create the shuffled key that Bob used, and compute the correct parity over what is now a contiguous range of bits start_shuffled_index ... end_shuffled_index.
31 |
32 | The concept of a shuffle_identifier is only intended to make the protocol more efficient. It has nothing to do with security. Specifically, it is assumed that Eve, the eavesdropper, can observe the shuffle_identifier can re-create the Shuffle object (i.e. the permutation of key bits) from it.
33 |
34 | Shuffle random number generators
35 | ################################
36 |
37 | The Shuffle module uses a rather complex arrangement with multiple separate and isolated random number generators. This complex arrangement exists for two reasons:
38 |
39 | 1. To enable a shuffle object to be re-created using only the shuffle identifier, and end up
40 | with a new shuffle object that generates the exact same permutation of key bits (as
41 | described above).
42 |
43 | 2. To enable the unit tests to be deterministic: when we run the same unit test multiple times,
44 | the exact same sequence of random shuffle objects is created.
45 |
46 | You might wonder whether this arrangement still works when multiple different version of Python are involved, for example:
47 |
48 | 1. Bob creates a shuffle object.
49 |
50 | 2. Bob sends the identifier of the shuffle object to Alice.
51 |
52 | 3. Alice re-creates the shuffle object using the shuffle identifier received from Bob.
53 |
54 | The question is: will this still work fine if Alice and Bob are running different versions of Python? The concern is that the random number generators in the different versions of Python generate different random number sequences for the same seed. If that were to happen, then Alice's shuffle object would permute keys in a different way than Bob's shuffle object.
55 |
56 | We need not be concerned. It *does* work fine because the Python documentation for the random module says the following: "Most of the random module’s algorithms and seeding functions are subject to change across Python versions, but two aspects are guaranteed not to change: If a new seeding method is added, then a backward compatible seeder will be offered. The generator’s random()method will continue to produce the same sequence when the compatible seeder is given the same seed."
57 |
--------------------------------------------------------------------------------
/cascade/key.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import random
3 |
4 | class Key:
5 | """
6 | A key that the Cascade protocol reconciles.
7 | """
8 | _random = random.Random()
9 |
10 | ERROR_METHOD_BERNOULLI = "bernoulli"
11 | ERROR_METHOD_EXACT = "exact"
12 | ERROR_METHODS = [ERROR_METHOD_BERNOULLI, ERROR_METHOD_EXACT]
13 |
14 | def __init__(self):
15 | """
16 | Create an empty key.
17 | """
18 | self._size = 0
19 | self._bits = {} # Bits are stored as dictionary, indexed by index [0..size), value 0 or 1.
20 |
21 | @staticmethod
22 | def create_random_key(size):
23 | """
24 | Create an random key.
25 |
26 | Args:
27 | size (int): The size of the key in bits. Must be >= 0.
28 |
29 | Returns:
30 | A random key of the specified size.
31 | """
32 | # pylint:disable=protected-access
33 | key = Key()
34 | key._size = size
35 | for i in range(size):
36 | key._bits[i] = Key._random.randint(0, 1)
37 | return key
38 |
39 | def __repr__(self):
40 | """
41 | Get the unambiguous string representation of the key.
42 |
43 | Returns:
44 | The unambiguous string representation of the key.
45 | """
46 | return "Key: " + self.__str__()
47 |
48 | def __str__(self):
49 | """
50 | Get the human-readable string representation of the key.
51 |
52 | Returns:
53 | The human-readable string representation of the key.
54 | """
55 | string = ""
56 | for i in range(self._size):
57 | string += str(self._bits[i])
58 | return string
59 |
60 | @staticmethod
61 | def set_random_seed(seed):
62 | """
63 | Set the seed for the isolated random number generated that is used only in the key
64 | module and nowhere else. If two applications set the seed to the same value, the key
65 | module produces the exact same sequence of random keys. This is used to make experiments
66 | reproducible.
67 |
68 | Args:
69 | seed (int): The seed value for the random number generator which is isolated to the
70 | key module.
71 | """
72 | Key._random = random.Random(seed)
73 |
74 | def get_size(self):
75 | """
76 | Get the size of the key in bits.
77 |
78 | Returns:
79 | The size of the key in bits.
80 | """
81 | return self._size
82 |
83 | def get_bit(self, index):
84 | """
85 | Get the value of the key bit at a given index.
86 |
87 | Args:
88 | index (int): The index of the bit. Index must be in range [0, key.size).
89 |
90 | Returns:
91 | The value (0 or 1) of the key bit at the given index.
92 | """
93 | return self._bits[index]
94 |
95 | def set_bit(self, index, value):
96 | """
97 | Set the value of the key bit at a given index.
98 |
99 | Args:
100 | index (int): The index of the bit. Index must be in range [0, key.size).
101 | value (int): The new value of the bit. Must be 0 or 1.
102 | """
103 | self._bits[index] = value
104 |
105 | def flip_bit(self, index):
106 | """
107 | Flip the value of the key bit at a given index (0 to 1, and vice versa).
108 |
109 | Args:
110 | index (int): The index of the bit. Index must be in range [0, key.size).
111 | """
112 | self._bits[index] = 1 - self._bits[index]
113 |
114 | def copy(self, error_rate, error_method):
115 | """
116 | Copy a key and optionally apply noise.
117 |
118 | Args:
119 | error_rate (float): The requested error rate.
120 | error_method (str): The method for choosing errors. Must be one of the error methods in
121 | ERROR_METHODS.
122 |
123 | Returns:
124 | A new Key instance, which is a copy of this key, with noise applied.
125 | """
126 | # pylint:disable=protected-access
127 | key = Key()
128 | key._size = self._size
129 | key._bits = copy.deepcopy(self._bits)
130 |
131 | if error_method == self.ERROR_METHOD_EXACT:
132 | error_count = round(error_rate * self._size)
133 | bits = list(self._bits.keys())
134 | bits_to_flip = Key._random.sample(bits, error_count)
135 | for index in bits_to_flip:
136 | key._bits[index] = 1 - key._bits[index]
137 |
138 | if error_method == self.ERROR_METHOD_BERNOULLI:
139 | for index in self._bits.keys():
140 | if Key._random.random() <= error_rate:
141 | key._bits[index] = 1 - key._bits[index]
142 |
143 | return key
144 |
145 | def difference(self, other_key):
146 | """
147 | Return the number of bits that are different between this key and the other_key (also known
148 | as the Hamming distance).
149 |
150 | Args:
151 | other_key (Key): The other key that this key has to be compared with. Must be the same
152 | size as this key.
153 |
154 | Returns:
155 | The number of bits that are different between this key and the other key.
156 | """
157 | difference = 0
158 | for i in range(self._size):
159 | # pylint:disable=protected-access
160 | if self._bits[i] != other_key._bits[i]:
161 | difference += 1
162 | return difference
163 |
--------------------------------------------------------------------------------
/cascade/tests/test_key.py:
--------------------------------------------------------------------------------
1 | from cascade.key import Key
2 |
3 | def test_create_empty_key():
4 | key = Key()
5 | assert key.get_size() == 0
6 |
7 | def test_create_random_key():
8 | Key.set_random_seed(111)
9 | key = Key.create_random_key(32)
10 | assert key.get_size() == 32
11 | assert key.get_bit(0) in [0, 1]
12 | assert key.get_bit(7) in [0, 1]
13 | assert key.get_bit(31) in [0, 1]
14 | key = Key.create_random_key(16)
15 | assert key.get_size() == 16
16 | assert key.__str__() == "1000111110000110"
17 |
18 | def test_repr():
19 | Key.set_random_seed(222)
20 | key = Key()
21 | assert key.__repr__() == "Key: "
22 | key = Key.create_random_key(8)
23 | assert key.__repr__() == "Key: 00110010"
24 |
25 | def test_str():
26 | Key.set_random_seed(222)
27 | key = Key()
28 | assert key.__str__() == ""
29 | key = Key.create_random_key(8)
30 | assert key.__str__() == "00110010"
31 |
32 | def test_set_random_seed():
33 | Key.set_random_seed(333)
34 | key = Key.create_random_key(8)
35 | assert key.__str__() == "11010111"
36 | key = Key.create_random_key(8)
37 | assert key.__str__() == "10101010"
38 | key = Key.create_random_key(16)
39 | assert key.__str__() == "1111101101010000"
40 | # Resetting the seed to the same original value should produce the same sequence of random keys
41 | Key.set_random_seed(333)
42 | key = Key.create_random_key(8)
43 | assert key.__str__() == "11010111"
44 | key = Key.create_random_key(8)
45 | assert key.__str__() == "10101010"
46 | key = Key.create_random_key(16)
47 | assert key.__str__() == "1111101101010000"
48 |
49 | def test_get_size():
50 | key = Key()
51 | assert key.get_size() == 0
52 | key = Key.create_random_key(7)
53 | assert key.get_size() == 7
54 |
55 | def test_get_bit():
56 | Key.set_random_seed(1234)
57 | key = Key.create_random_key(11)
58 | assert key.__str__() == "10000001000"
59 |
60 | def test_set_bit():
61 | Key.set_random_seed(2345)
62 | key = Key.create_random_key(13)
63 | assert key.__str__() == "1001011111011"
64 | key.set_bit(0, 0)
65 | assert key.__str__() == "0001011111011"
66 | key.set_bit(0, 1)
67 | assert key.__str__() == "1001011111011"
68 | key.set_bit(4, 0)
69 | assert key.__str__() == "1001011111011"
70 | key.set_bit(4, 1)
71 | assert key.__str__() == "1001111111011"
72 | key.set_bit(12, 0)
73 | assert key.__str__() == "1001111111010"
74 | key.set_bit(12, 1)
75 | assert key.__str__() == "1001111111011"
76 |
77 | def test_flip_bit():
78 | Key.set_random_seed(3456)
79 | key = Key.create_random_key(9)
80 | assert key.__str__() == "111001100"
81 | key.flip_bit(0)
82 | assert key.__str__() == "011001100"
83 | key.flip_bit(3)
84 | assert key.__str__() == "011101100"
85 | key.flip_bit(8)
86 | assert key.__str__() == "011101101"
87 |
88 | def test_copy_without_noise():
89 |
90 | Key.set_random_seed(3456)
91 |
92 | # Copy an empty key.
93 | key = Key()
94 | assert key.__str__() == ""
95 | key_copy = key.copy(0.0, Key.ERROR_METHOD_EXACT)
96 | assert key.__str__() == ""
97 | assert key_copy.__str__() == ""
98 |
99 | # Copy a non-empty key.
100 | key = Key.create_random_key(64)
101 | assert key.__str__() == "1110011000011110100111010001100011100000010011010101110100000010"
102 | key_copy = key.copy(0.0, Key.ERROR_METHOD_EXACT)
103 | assert key.__str__() == "1110011000011110100111010001100011100000010011010101110100000010"
104 | assert key_copy.__str__() == "1110011000011110100111010001100011100000010011010101110100000010"
105 |
106 | # Make sure that each key has an independent copy of the bits; i.e. that changing a bit in the
107 | # original key does not affect the copied key, or vice versa.
108 | key_copy.flip_bit(1)
109 | assert key.__str__() == "1110011000011110100111010001100011100000010011010101110100000010"
110 | assert key_copy.__str__() == "1010011000011110100111010001100011100000010011010101110100000010"
111 |
112 | def test_copy_with_exact_noise():
113 |
114 | Key.set_random_seed(5678)
115 |
116 | # Copy a non-empty key with noise
117 | key = Key.create_random_key(6)
118 | assert key.__str__() == "001101"
119 | key_copy = key.copy(0.5, Key.ERROR_METHOD_EXACT)
120 | assert key.__str__() == "001101"
121 | assert key_copy.__str__() == "011011"
122 |
123 | # Make sure that each key has an independent copy of the bits; i.e. that changing a bit in the
124 | # original key does not affect the copied key, or vice versa.
125 | key_copy.flip_bit(1)
126 | assert key.__str__() == "001101"
127 | assert key_copy.__str__() == "001011"
128 |
129 | # Extreme case, flip all bits.
130 | key_copy = key.copy(1.0, Key.ERROR_METHOD_EXACT)
131 | assert key_copy.__str__() == "110010"
132 |
133 | def test_copy_with_bernoulli_noise():
134 |
135 | Key.set_random_seed(5678)
136 |
137 | # Copy a non-empty key with noise
138 | key = Key.create_random_key(6)
139 | assert key.__str__() == "001101"
140 | key_copy = key.copy(0.5, Key.ERROR_METHOD_BERNOULLI)
141 | assert key.__str__() == "001101"
142 | assert key_copy.__str__() == "011100"
143 |
144 | # Make sure that each key has an independent copy of the bits; i.e. that changing a bit in the
145 | # original key does not affect the copied key, or vice versa.
146 | key_copy.flip_bit(1)
147 | assert key.__str__() == "001101"
148 | assert key_copy.__str__() == "001100"
149 |
150 | # Extreme case, flip probability 1.0.
151 | key_copy = key.copy(1.0, Key.ERROR_METHOD_BERNOULLI)
152 | assert key_copy.__str__() == "110010"
153 |
154 | def test_difference():
155 | # Normal case.
156 | key = Key.create_random_key(64)
157 | key_copy = key.copy(0.078125, Key.ERROR_METHOD_EXACT)
158 | assert key.difference(key_copy) == 5
159 | # Special case: compare with self.
160 | assert key.difference(key) == 0
161 | # Special case: empty key.
162 | empty_key_1 = Key()
163 | empty_key_2 = Key()
164 | assert empty_key_1.difference(empty_key_2) == 0
165 |
--------------------------------------------------------------------------------
/cascade/tests/test_algorithm.py:
--------------------------------------------------------------------------------
1 | from cascade.algorithm import get_algorithm_by_name
2 |
3 | def test_original_algorithm():
4 | algorithm = get_algorithm_by_name('original')
5 | assert algorithm.name == "original"
6 | assert algorithm.cascade_iterations == 4
7 | assert algorithm.block_size_function(0.0, 10000, 1) == 73000
8 | assert algorithm.block_size_function(0.1, 10000, 1) == 8
9 | assert algorithm.block_size_function(0.01, 10000, 1) == 73
10 | assert algorithm.block_size_function(0.01, 10000, 2) == 146
11 | assert algorithm.block_size_function(0.01, 10000, 3) == 292
12 | assert algorithm.block_size_function(0.001, 10000, 1) == 730
13 | assert algorithm.biconf_iterations == 0
14 | assert not algorithm.biconf_error_free_streak
15 | assert not algorithm.biconf_correct_complement
16 | assert not algorithm.biconf_cascade
17 | assert not algorithm.sub_block_reuse
18 | assert not algorithm.block_parity_inference
19 |
20 | def test_biconf_algorithm():
21 | algorithm = get_algorithm_by_name('biconf')
22 | assert algorithm.name == "biconf"
23 | assert algorithm.cascade_iterations == 2
24 | assert algorithm.block_size_function(0.0, 10000, 1) == 92000
25 | assert algorithm.block_size_function(0.1, 10000, 1) == 10
26 | assert algorithm.block_size_function(0.01, 10000, 1) == 92
27 | assert algorithm.block_size_function(0.01, 10000, 2) == 276
28 | assert algorithm.block_size_function(0.01, 10000, 3) == 828
29 | assert algorithm.block_size_function(0.001, 10000, 1) == 920
30 | assert algorithm.biconf_iterations == 10
31 | assert algorithm.biconf_error_free_streak
32 | assert not algorithm.biconf_correct_complement
33 | assert not algorithm.biconf_cascade
34 | assert not algorithm.sub_block_reuse
35 | assert not algorithm.block_parity_inference
36 |
37 | def test_yanetal_algorithm():
38 | algorithm = get_algorithm_by_name('yanetal')
39 | assert algorithm.name == "yanetal"
40 | assert algorithm.cascade_iterations == 10
41 | assert algorithm.block_size_function(0.0, 10000, 1) == 80000
42 | assert algorithm.block_size_function(0.1, 10000, 1) == 8
43 | assert algorithm.block_size_function(0.01, 10000, 1) == 80
44 | assert algorithm.block_size_function(0.01, 10000, 2) == 400
45 | assert algorithm.block_size_function(0.01, 10000, 3) == 5000
46 | assert algorithm.block_size_function(0.001, 10000, 1) == 800
47 | assert algorithm.biconf_iterations == 0
48 | assert not algorithm.biconf_error_free_streak
49 | assert not algorithm.biconf_correct_complement
50 | assert not algorithm.biconf_cascade
51 | assert not algorithm.sub_block_reuse
52 | assert not algorithm.block_parity_inference
53 |
54 | def test_option3_algorithm():
55 | algorithm = get_algorithm_by_name('option3')
56 | assert algorithm.name == "option3"
57 | assert algorithm.cascade_iterations == 16
58 | assert algorithm.block_size_function(0.0, 10000, 1) == 100000
59 | assert algorithm.block_size_function(0.1, 10000, 1) == 10
60 | assert algorithm.block_size_function(0.01, 10000, 1) == 100
61 | assert algorithm.block_size_function(0.01, 10000, 2) == 200
62 | assert algorithm.block_size_function(0.01, 10000, 3) == 5000
63 | assert algorithm.block_size_function(0.001, 10000, 1) == 1000
64 | assert algorithm.biconf_iterations == 0
65 | assert not algorithm.biconf_error_free_streak
66 | assert not algorithm.biconf_correct_complement
67 | assert not algorithm.biconf_cascade
68 | assert not algorithm.sub_block_reuse
69 | assert not algorithm.block_parity_inference
70 |
71 | def test_option4_algorithm():
72 | algorithm = get_algorithm_by_name('option4')
73 | assert algorithm.name == "option4"
74 | assert algorithm.cascade_iterations == 16
75 | assert algorithm.block_size_function(0.0, 10000, 1) == 100000
76 | assert algorithm.block_size_function(0.1, 10000, 1) == 10
77 | assert algorithm.block_size_function(0.01, 10000, 1) == 100
78 | assert algorithm.block_size_function(0.01, 10000, 2) == 200
79 | assert algorithm.block_size_function(0.01, 10000, 3) == 5000
80 | assert algorithm.block_size_function(0.001, 10000, 1) == 1000
81 | assert algorithm.biconf_iterations == 0
82 | assert not algorithm.biconf_error_free_streak
83 | assert not algorithm.biconf_correct_complement
84 | assert not algorithm.biconf_cascade
85 | assert algorithm.sub_block_reuse
86 | assert not algorithm.block_parity_inference
87 |
88 | def test_option7_algorithm():
89 | algorithm = get_algorithm_by_name('option7')
90 | assert algorithm.name == "option7"
91 | assert algorithm.cascade_iterations == 14
92 | assert algorithm.block_size_function(0.0, 10000, 1) == 131072
93 | assert algorithm.block_size_function(0.1, 10000, 1) == 16
94 | assert algorithm.block_size_function(0.01, 10000, 1) == 128
95 | assert algorithm.block_size_function(0.01, 10000, 2) == 512
96 | assert algorithm.block_size_function(0.01, 10000, 3) == 5000
97 | assert algorithm.block_size_function(0.001, 10000, 1) == 1024
98 | assert algorithm.biconf_iterations == 0
99 | assert not algorithm.biconf_error_free_streak
100 | assert not algorithm.biconf_correct_complement
101 | assert not algorithm.biconf_cascade
102 | assert algorithm.sub_block_reuse
103 | assert not algorithm.block_parity_inference
104 |
105 | def test_option8_algorithm():
106 | algorithm = get_algorithm_by_name('option8')
107 | assert algorithm.name == "option8"
108 | assert algorithm.cascade_iterations == 14
109 | assert algorithm.block_size_function(0.0, 10000, 1) == 131072
110 | assert algorithm.block_size_function(0.1, 10000, 1) == 8
111 | assert algorithm.block_size_function(0.01, 10000, 1) == 128
112 | assert algorithm.block_size_function(0.01, 10000, 2) == 1024
113 | assert algorithm.block_size_function(0.01, 10000, 3) == 4096
114 | assert algorithm.block_size_function(0.01, 10000, 4) == 5000
115 | assert algorithm.block_size_function(0.001, 10000, 1) == 1024
116 | assert algorithm.biconf_iterations == 0
117 | assert not algorithm.biconf_error_free_streak
118 | assert not algorithm.biconf_correct_complement
119 | assert not algorithm.biconf_cascade
120 | assert algorithm.sub_block_reuse
121 | assert not algorithm.block_parity_inference
122 |
--------------------------------------------------------------------------------
/study/make_graphs.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os.path
3 |
4 | import argparse
5 | import json
6 | import plotly.graph_objects as go
7 |
8 | def parse_command_line_arguments():
9 | parser = argparse.ArgumentParser(description="Produce graph for Cascade experimental results")
10 | parser.add_argument('graphs_file_name', metavar="graphs-file", type=str,
11 | help="graphs definition file")
12 | parser.add_argument('-d', '--data-dir', type=str,
13 | help="directory where the data files are stored")
14 | parser.add_argument('-g', '--graph-name', type=str,
15 | help="name of graph to produce (default: produce all graphs)")
16 | args = parser.parse_args()
17 | return args
18 |
19 | def parse_graphs_file(graphs_file_name):
20 | with open(graphs_file_name, encoding="utf-8") as graphs_file:
21 | graphs = json.load(graphs_file)
22 | return graphs
23 |
24 | def select_graph(graphs, graph_name):
25 | for graph in graphs:
26 | if graph['graph_name'] == graph_name:
27 | return [graph]
28 | sys.exit(f"Graph name {graph_name} not found")
29 |
30 | def produce_graph(graph, data_dir):
31 | figure = go.Figure()
32 | x_axis = dict(title=graph['x_axis']['title'],
33 | type=graph['x_axis'].get('type', 'linear'),
34 | showline=True,
35 | linecolor='black',
36 | showgrid=True,
37 | gridcolor='lightgray',
38 | showticklabels=True,
39 | linewidth=1,
40 | ticks='outside',
41 | tickfont=dict(family='Arial', size=12, color='black'))
42 | if 'range' in graph['x_axis']:
43 | x_axis['range'] = graph['x_axis']['range']
44 | y_axis = dict(title=graph['y_axis']['title'],
45 | showline=True,
46 | linecolor='black',
47 | showgrid=True,
48 | gridcolor='lightgray',
49 | showticklabels=True,
50 | linewidth=1,
51 | ticks='outside',
52 | tickfont=dict(family='Arial', size=12, color='black'))
53 | if graph['y_axis'].get('type') == 'log':
54 | y_axis['type'] = 'log'
55 | y_axis['showexponent'] = 'all'
56 | y_axis['exponentformat'] = 'e'
57 | if 'range' in graph['y_axis']:
58 | y_axis['range'] = graph['y_axis']['range']
59 | figure.update_layout(
60 | title=graph['title'],
61 | xaxis=x_axis,
62 | yaxis=y_axis,
63 | plot_bgcolor='white')
64 | x_axis_variable = graph['x_axis']['variable']
65 | y_axis_variable = graph['y_axis']['variable']
66 | for series in graph['series']:
67 | plot_series(figure, x_axis_variable, y_axis_variable, series, data_dir)
68 | figure.show()
69 |
70 | def plot_series(figure, x_axis_variable, y_axis_variable, series, data_dir):
71 | data_file_name = series['data_file']
72 | if data_dir:
73 | data_file_name = os.path.join(data_dir, data_file_name)
74 | data_points = read_data_points(data_file_name)
75 | if 'filter' in series:
76 | data_points = filter_data_points(data_points, series['filter'])
77 | if series['deviation_color'] != 'none':
78 | plot_deviation(figure, series, x_axis_variable, y_axis_variable, data_points)
79 | plot_average(figure, series, x_axis_variable, y_axis_variable, data_points)
80 |
81 | def plot_average(figure, series, x_axis_variable, y_axis_variable, data_points):
82 | xs = []
83 | ys = []
84 | for data_point in data_points:
85 | xs.append(data_point[x_axis_variable])
86 | ys.append(data_point[y_axis_variable]['average'])
87 | mode = series.get('mode', 'lines')
88 | marker = series.get('marker', {})
89 | line = dict(color=series['line_color'], width=1)
90 | if 'dash' in series:
91 | line['dash'] = series['dash']
92 | scatter = go.Scatter(
93 | x=xs,
94 | y=ys,
95 | name=series['legend'],
96 | mode=mode,
97 | marker=marker,
98 | line=line)
99 | figure.add_trace(scatter)
100 |
101 | def plot_deviation(figure, series, x_axis_variable, y_axis_variable, data_points):
102 | xs = []
103 | ys_upper = []
104 | ys_lower = []
105 | for data_point in data_points:
106 | xs.append(data_point[x_axis_variable])
107 | average = data_point[y_axis_variable]['average']
108 | deviation = data_point[y_axis_variable]['deviation']
109 | ys_upper.append(average + deviation)
110 | ys_lower.append(average - deviation)
111 | xs = xs + xs[::-1]
112 | ys = ys_upper + ys_lower[::-1]
113 | scatter = go.Scatter(
114 | x=xs,
115 | y=ys,
116 | showlegend=False,
117 | hoverinfo='none',
118 | fill='toself',
119 | line_color=series['deviation_color'],
120 | fillcolor=series['deviation_color'],
121 | opacity=0.4)
122 | figure.add_trace(scatter)
123 |
124 | def read_data_points(data_file_name):
125 | data_points = []
126 | with open(data_file_name, encoding="utf-8") as data_file:
127 | for line in data_file:
128 | data_point = json.loads(line)
129 | data_points.append(data_point)
130 | return data_points
131 |
132 | def filter_data_points(data_points, filter_def):
133 | filter_variable = filter_def['variable']
134 | min_value = None
135 | max_value = None
136 | if 'value' in filter_def:
137 | min_value = filter_def['value'] - filter_def['margin']
138 | max_value = filter_def['value'] + filter_def['margin']
139 | if 'min_value' in filter_def:
140 | min_value = filter_def['min_value']
141 | if 'max_value' in filter_def:
142 | max_value = filter_def['max_value']
143 | filtered_data_points = []
144 | for data_point in data_points:
145 | value = data_point[filter_variable]
146 | if isinstance(value, dict):
147 | value = value['average']
148 | keep = True
149 | if min_value is not None and value < min_value:
150 | keep = False
151 | if max_value is not None and value > max_value:
152 | keep = False
153 | if keep:
154 | filtered_data_points.append(data_point)
155 | return filtered_data_points
156 |
157 | def main():
158 | args = parse_command_line_arguments()
159 | graphs = parse_graphs_file(args.graphs_file_name)
160 | if args.graph_name is not None:
161 | graphs = select_graph(graphs, args.graph_name)
162 | for graph in graphs:
163 | produce_graph(graph, args.data_dir)
164 |
165 | if __name__ == "__main__":
166 | main()
167 |
--------------------------------------------------------------------------------
/study/graphs_andre_reis_thesis.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "graph_name": "andre_reis_thesis_figure_5_1",
4 | "title": "Figure 5.1 from \"Andre Reis Thesis\"",
5 | "x_axis": {
6 | "title": "error rate",
7 | "variable": "requested_bit_error_rate",
8 | "range": [0.00, 0.10]
9 | },
10 | "y_axis": {
11 | "title": "avg eff",
12 | "variable": "efficiency",
13 | "range": [1.00, 1.50]
14 | },
15 | "series": [
16 | {
17 | "data_file": "data__algorithm=yanetal;key_size=10000;error_rate=vary",
18 | "legend": "yanetal",
19 | "line_color": "blue",
20 | "deviation_color": "none"
21 | },
22 | {
23 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
24 | "legend": "original",
25 | "line_color": "orange",
26 | "deviation_color": "none"
27 | },
28 | {
29 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
30 | "legend": "biconf",
31 | "line_color": "green",
32 | "deviation_color": "none"
33 | },
34 | {
35 | "data_file": "data__algorithm=option7;key_size=10000;error_rate=vary",
36 | "legend": "option7",
37 | "line_color": "red",
38 | "deviation_color": "none"
39 | },
40 | {
41 | "data_file": "data__algorithm=option8;key_size=10000;error_rate=vary",
42 | "legend": "option8",
43 | "line_color": "purple",
44 | "deviation_color": "none"
45 | }
46 | ]
47 | },
48 | {
49 | "graph_name": "andre_reis_thesis_figure_5_2",
50 | "title": "Figure 5.2 from \"Andre Reis Thesis\"",
51 | "x_axis": {
52 | "title": "error rate",
53 | "variable": "requested_bit_error_rate",
54 | "range": [0.0, 0.10]
55 | },
56 | "y_axis": {
57 | "title": "fer",
58 | "variable": "remaining_frame_error_rate",
59 | "range": [0.00, 0.005]
60 | },
61 | "series": [
62 | {
63 | "data_file": "data__algorithm=yanetal;key_size=10000;error_rate=vary",
64 | "legend": "yanetal",
65 | "line_color": "blue",
66 | "deviation_color": "none"
67 | },
68 | {
69 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
70 | "legend": "original",
71 | "line_color": "orange",
72 | "deviation_color": "none"
73 | },
74 | {
75 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
76 | "legend": "biconf",
77 | "line_color": "green",
78 | "deviation_color": "none"
79 | },
80 | {
81 | "data_file": "data__algorithm=option7;key_size=10000;error_rate=vary",
82 | "legend": "option7",
83 | "line_color": "red",
84 | "deviation_color": "none"
85 | },
86 | {
87 | "data_file": "data__algorithm=option8;key_size=10000;error_rate=vary",
88 | "legend": "option8",
89 | "line_color": "purple",
90 | "deviation_color": "none"
91 | }
92 | ]
93 | },
94 | {
95 | "graph_name": "andre_reis_thesis_figure_5_3",
96 | "title": "Figure 5.3 from \"Andre Reis Thesis\"",
97 | "x_axis": {
98 | "title": "error rate",
99 | "variable": "requested_bit_error_rate",
100 | "range": [0.0, 0.10]
101 | },
102 | "y_axis": {
103 | "title": "avg cu",
104 | "variable": "ask_parity_messages"
105 | },
106 | "series": [
107 | {
108 | "data_file": "data__algorithm=yanetal;key_size=10000;error_rate=vary",
109 | "legend": "yanetal",
110 | "line_color": "blue",
111 | "deviation_color": "none"
112 | },
113 | {
114 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
115 | "legend": "original",
116 | "line_color": "orange",
117 | "deviation_color": "none"
118 | },
119 | {
120 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
121 | "legend": "biconf",
122 | "line_color": "green",
123 | "deviation_color": "none"
124 | }
125 | ]
126 | },
127 | {
128 | "graph_name": "andre_reis_thesis_figure_5_5a",
129 | "title": "Figure 5.5a from \"Andre Reis Thesis\"",
130 | "x_axis": {
131 | "title": "key length",
132 | "variable": "key_size",
133 | "range": [0, 100000]
134 | },
135 | "y_axis": {
136 | "title": "avg eff",
137 | "variable": "efficiency",
138 | "range": [1.00, 1.50]
139 | },
140 | "series": [
141 | {
142 | "data_file": "data__algorithm=yanetal;key_size=vary;error_rate=0.05",
143 | "legend": "original",
144 | "mode": "lines+markers",
145 | "marker": {
146 | "symbol": "circle"
147 | },
148 | "line_color": "blue",
149 | "deviation_color": "none"
150 | },
151 | {
152 | "data_file": "data__algorithm=original;key_size=vary;error_rate=0.05",
153 | "legend": "original",
154 | "mode": "lines+markers",
155 | "marker": {
156 | "symbol": "square"
157 | },
158 | "line_color": "orange",
159 | "deviation_color": "none"
160 | },
161 | {
162 | "data_file": "data__algorithm=biconf;key_size=vary;error_rate=0.05",
163 | "legend": "biconf",
164 | "mode": "lines+markers",
165 | "marker": {
166 | "symbol": "diamond"
167 | },
168 | "line_color": "green",
169 | "deviation_color": "none"
170 | },
171 | {
172 | "data_file": "data__algorithm=option7;key_size=vary;error_rate=0.05",
173 | "legend": "option7",
174 | "mode": "lines+markers",
175 | "marker": {
176 | "symbol": "cross"
177 | },
178 | "line_color": "red",
179 | "deviation_color": "none"
180 | },
181 | {
182 | "data_file": "data__algorithm=option8;key_size=vary;error_rate=0.05",
183 | "legend": "option8",
184 | "mode": "lines+markers",
185 | "marker": {
186 | "symbol": "triangle-up"
187 | },
188 | "line_color": "purple",
189 | "deviation_color": "none"
190 | }
191 | ]
192 | }
193 | ]
--------------------------------------------------------------------------------
/cascade/tests/test_shuffle.py:
--------------------------------------------------------------------------------
1 | from cascade.key import Key
2 | from cascade.shuffle import Shuffle
3 |
4 | def test_create_shuffle_keep_same():
5 | Key.set_random_seed(1111)
6 | Shuffle.set_random_seed(1112)
7 |
8 | # Empty shuffle.
9 | key = Key()
10 | assert key.__str__() == ""
11 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_KEEP_SAME)
12 | assert shuffle.__str__() == ""
13 |
14 | # Non-empty shuffle.
15 | key = Key.create_random_key(8)
16 | assert key.__str__() == "00101100"
17 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_KEEP_SAME)
18 | assert shuffle.__str__() == "0->0 1->1 2->2 3->3 4->4 5->5 6->6 7->7"
19 | for index in range(key.get_size()):
20 | assert shuffle.get_bit(key, index) == key.get_bit(index)
21 |
22 | def test_create_shuffle_random():
23 | Key.set_random_seed(1111)
24 | Shuffle.set_random_seed(1112)
25 |
26 | # Empty shuffle.
27 | key = Key()
28 | assert key.__str__() == ""
29 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
30 | assert shuffle.__str__() == ""
31 |
32 | # Non-empty shuffle.
33 | key = Key.create_random_key(16)
34 | assert key.__str__() == "0010110001010010"
35 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
36 | assert shuffle.__str__() == ("0->2 1->9 2->8 3->7 4->3 5->12 6->1 7->5 8->13 "
37 | "9->6 10->0 11->14 12->10 13->4 14->11 15->15")
38 | key = Key()
39 | assert key.__str__() == ""
40 |
41 | def test_create_shuffle_from_identifier():
42 | Key.set_random_seed(1111)
43 | Shuffle.set_random_seed(1112)
44 |
45 | # Empty keep-same shuffle
46 | key = Key()
47 | original_shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_KEEP_SAME)
48 | recreated_shuffle = Shuffle.create_shuffle_from_identifier(original_shuffle.get_identifier())
49 | assert original_shuffle.__repr__() == recreated_shuffle.__repr__()
50 |
51 | # Empty random shuffle
52 | key = Key()
53 | original_shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
54 | recreated_shuffle = Shuffle.create_shuffle_from_identifier(original_shuffle.get_identifier())
55 | assert original_shuffle.__repr__() == recreated_shuffle.__repr__()
56 |
57 | # Non-empty keep-same shuffle
58 | key = Key.create_random_key(32)
59 | original_shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_KEEP_SAME)
60 | recreated_shuffle = Shuffle.create_shuffle_from_identifier(original_shuffle.get_identifier())
61 | assert original_shuffle.__repr__() == recreated_shuffle.__repr__()
62 |
63 | # Non-empty random shuffle
64 | key = Key.create_random_key(32)
65 | original_shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
66 | recreated_shuffle = Shuffle.create_shuffle_from_identifier(original_shuffle.get_identifier())
67 | assert original_shuffle.__repr__() == recreated_shuffle.__repr__()
68 |
69 | def test_encode_identifier():
70 | # pylint:disable=protected-access
71 | assert Shuffle._encode_identifier(0, 0, 0) == 0
72 | assert Shuffle._encode_identifier(1, 2, 3) == 302000000001
73 | assert Shuffle._encode_identifier(999999999, 99, 999999999999) == 99999999999999999999999
74 |
75 | def test_decode_identifier():
76 | # pylint:disable=protected-access
77 | assert Shuffle._decode_identifier(0) == (0, 0, 0)
78 | assert Shuffle._decode_identifier(302000000001) == (1, 2, 3)
79 | assert Shuffle._decode_identifier(99999999999999999999999) == (999999999, 99, 999999999999)
80 |
81 | def test_repr():
82 | Key.set_random_seed(2221)
83 | Shuffle.set_random_seed(2222)
84 | key = Key.create_random_key(8)
85 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_KEEP_SAME)
86 | assert shuffle.__repr__() == "Shuffle: 0->0 1->1 2->2 3->3 4->4 5->5 6->6 7->7"
87 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
88 | assert shuffle.__repr__() == "Shuffle: 0->0 1->1 2->6 3->4 4->3 5->2 6->5 7->7"
89 |
90 | def test_str():
91 | Key.set_random_seed(3331)
92 | Shuffle.set_random_seed(3332)
93 | key = Key.create_random_key(8)
94 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_KEEP_SAME)
95 | assert shuffle.__str__() == "0->0 1->1 2->2 3->3 4->4 5->5 6->6 7->7"
96 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
97 | assert shuffle.__str__() == "0->0 1->4 2->2 3->6 4->7 5->3 6->1 7->5"
98 |
99 | def test_set_random_seed():
100 | Key.set_random_seed(4441)
101 | Shuffle.set_random_seed(4442)
102 | key = Key.create_random_key(8)
103 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
104 | assert shuffle.__str__() == "0->1 1->3 2->2 3->7 4->5 5->6 6->4 7->0"
105 |
106 | def test_get_size():
107 | key = Key.create_random_key(19)
108 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_KEEP_SAME)
109 | assert shuffle.get_size() == 19
110 |
111 | def test_get_identifier():
112 | Key.set_random_seed(4441)
113 | Shuffle.set_random_seed(4442)
114 | key = Key.create_random_key(8)
115 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
116 | assert shuffle.get_identifier() == 18048933084901000000008
117 |
118 | def test_get_key_index():
119 | Shuffle.set_random_seed(9992)
120 | shuffle = Shuffle(6, Shuffle.SHUFFLE_RANDOM)
121 | assert shuffle.__repr__() == "Shuffle: 0->5 1->4 2->2 3->0 4->1 5->3"
122 | assert shuffle.get_key_index(0) == 5
123 | assert shuffle.get_key_index(1) == 4
124 | assert shuffle.get_key_index(5) == 3
125 |
126 | def test_get_bit():
127 | Key.set_random_seed(5551)
128 | Shuffle.set_random_seed(5552)
129 | key = Key.create_random_key(13)
130 | assert key.__str__() == "1011010010010"
131 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
132 | assert shuffle.__str__() == ("0->5 1->9 2->3 3->12 4->10 5->6 6->11 7->7 8->0 9->4 "
133 | "10->1 11->2 12->8")
134 | assert shuffle.get_bit(key, 0) == 1 # Shuffle bit 0 -> Key bit 5 -> Bit value 1
135 | assert shuffle.get_bit(key, 1) == 0 # Shuffle bit 1 -> Key bit 9 -> Bit value 0
136 | assert shuffle.get_bit(key, 2) == 1 # Shuffle bit 2 -> Key bit 3 -> Bit value 1
137 | assert shuffle.get_bit(key, 12) == 1 # Shuffle bit 12 -> Key bit 8 -> Bit value 1
138 |
139 | def test_set_bit():
140 | Key.set_random_seed(6661)
141 | Shuffle.set_random_seed(6662)
142 | key = Key.create_random_key(6)
143 | assert key.__repr__() == "Key: 011110"
144 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
145 | assert shuffle.__repr__() == "Shuffle: 0->1 1->5 2->0 3->4 4->2 5->3"
146 | shuffle.set_bit(key, 3, 0) # Shuffle bit 3 -> Key bit 4 -> Bit value 1->0
147 | assert key.__repr__() == "Key: 011100"
148 | shuffle.set_bit(key, 0, 1) # Shuffle bit 0 -> Key bit 1 -> Bit value 1->1
149 | assert key.__repr__() == "Key: 011100"
150 | shuffle.set_bit(key, 5, 0) # Shuffle bit 5 -> Key bit 3 -> Bit value 0->0
151 | assert key.__repr__() == "Key: 011000"
152 |
153 | def test_flip_bit():
154 | Key.set_random_seed(7771)
155 | Shuffle.set_random_seed(7772)
156 | key = Key.create_random_key(6)
157 | assert key.__repr__() == "Key: 010011"
158 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
159 | assert shuffle.__repr__() == "Shuffle: 0->5 1->2 2->4 3->3 4->1 5->0"
160 | shuffle.flip_bit(key, 0) # Shuffle bit 0 -> Key bit 5 -> Bit value 1->0
161 | assert key.__repr__() == "Key: 010010"
162 |
163 | def test_calculate_parity():
164 | Key.set_random_seed(8881)
165 | Shuffle.set_random_seed(8882)
166 | key = Key.create_random_key(10)
167 | assert key.__repr__() == "Key: 1011111100"
168 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
169 | assert shuffle.__repr__() == "Shuffle: 0->1 1->4 2->5 3->9 4->6 5->0 6->7 7->2 8->3 9->8"
170 | assert shuffle.__str__() == "0->1 1->4 2->5 3->9 4->6 5->0 6->7 7->2 8->3 9->8"
171 | assert shuffle.calculate_parity(key, 0, 10) == 1
172 | assert shuffle.calculate_parity(key, 4, 8) == 0
173 | assert shuffle.calculate_parity(key, 1, 2) == 1
174 |
--------------------------------------------------------------------------------
/study/run_experiments.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os.path
3 |
4 | import argparse
5 | import json
6 | import git
7 |
8 | from cascade.algorithm import ALGORITHMS
9 | from cascade.key import Key
10 | from cascade.mock_classical_channel import MockClassicalChannel
11 | from cascade.reconciliation import Reconciliation
12 |
13 | from study.data_point import DataPoint
14 |
15 | TOTAL_NR_DATA_POINTS = None
16 | DATA_POINTS_PROCESSED = None
17 |
18 | def parse_command_line_arguments():
19 | parser = argparse.ArgumentParser(description="Run Cascade experiments")
20 | parser.add_argument('experiments_file_name', metavar="experiments-file", type=str,
21 | help="experiments definition file")
22 | parser.add_argument('-d', '--disable-multi-processing', action='store_true',
23 | help="disable multi-processing")
24 | parser.add_argument('-m', '--max-runs', type=int,
25 | help="maximum number of reconciliation runs per data point")
26 | parser.add_argument('-o', '--output-directory', type=str,
27 | help="output directory where to store data__* files")
28 | args = parser.parse_args()
29 | return args
30 |
31 | def parse_experiments_file(file_name):
32 | with open(file_name, encoding="utf-8") as json_file:
33 | experiments = json.load(json_file)
34 | return experiments
35 |
36 | def experiments_to_series(experiments, max_runs):
37 | all_series = []
38 | for experiment in experiments:
39 | runs = experiment['runs']
40 | if max_runs is not None and runs > max_runs:
41 | runs = max_runs
42 | if experiment['independent_variable'] == 'error_rate':
43 | experiment_series = experiment_to_error_rate_series(experiment, runs)
44 | elif experiment['independent_variable'] == 'key_size':
45 | experiment_series = experiment_to_key_size_series(experiment, runs)
46 | else:
47 | assert False
48 | all_series += experiment_series
49 | return all_series
50 |
51 | def experiment_to_error_rate_series(experiment, runs):
52 | series = []
53 | algorithms = make_list(experiment['algorithm'])
54 | if algorithms == ['all']:
55 | algorithms = list(ALGORITHMS.keys())
56 | for algorithm in algorithms:
57 | for key_size in make_list(experiment['key_size']):
58 | serie = dict(name=f"algorithm={algorithm};key_size={key_size};error_rate=vary",
59 | algorithms=[algorithm],
60 | key_sizes=[key_size],
61 | error_rates=make_list(experiment['error_rate']),
62 | runs=runs)
63 | series.append(serie)
64 | return series
65 |
66 | def experiment_to_key_size_series(experiment, runs):
67 | series = []
68 | algorithms = make_list(experiment['algorithm'])
69 | if algorithms == ['all']:
70 | algorithms = list(ALGORITHMS.keys())
71 | for algorithm in algorithms:
72 | for error_rate in make_list(experiment['error_rate']):
73 | serie = dict(name=f"algorithm={algorithm};key_size=vary;error_rate={error_rate}",
74 | algorithms=[algorithm],
75 | key_sizes=make_list(experiment['key_size'], do_round=True),
76 | error_rates=[error_rate],
77 | runs=runs)
78 | series.append(serie)
79 | return series
80 |
81 | def make_list(value, do_round=False):
82 | if isinstance(value, (int, float, str)):
83 | return [value]
84 | if isinstance(value, list):
85 | result = []
86 | for item in value:
87 | result += make_list(item)
88 | return result
89 | if isinstance(value, dict):
90 | start = value['start']
91 | end = value['end']
92 | assert ('step_size' in value) != ('step_factor' in value)
93 | step_size = value.get('step_size', 0.0)
94 | step_factor = value.get('step_factor', 1.0)
95 | lst = []
96 | current = start
97 | while current <= end:
98 | if do_round:
99 | lst.append(round(current))
100 | else:
101 | lst.append(current)
102 | current += step_size
103 | current *= step_factor
104 | return lst
105 | assert False
106 | return None
107 |
108 | def compute_total_nr_data_points(series):
109 | global TOTAL_NR_DATA_POINTS
110 | TOTAL_NR_DATA_POINTS = 0
111 | for serie in series:
112 | TOTAL_NR_DATA_POINTS += (len(serie['algorithms']) *
113 | len(serie['key_sizes']) *
114 | len(serie['error_rates']))
115 |
116 | def run_series(series, output_directory, disable_multi_processing):
117 | global DATA_POINTS_PROCESSED
118 | DATA_POINTS_PROCESSED = 0
119 | for serie in series:
120 | run_serie(serie, output_directory, disable_multi_processing)
121 |
122 | def run_serie(serie, output_directory, disable_multi_processing):
123 | reconciliation_params = serie_to_reconciliation_params(serie)
124 | data_file_name = "data__" + serie['name']
125 | if output_directory:
126 | data_file_name = os.path.join(output_directory, data_file_name)
127 | if disable_multi_processing:
128 | with open(data_file_name, mode="w", encoding="utf-8") as data_file:
129 | for param in reconciliation_params:
130 | data_point = produce_data_point(param)
131 | print(to_json(data_point), file=data_file)
132 | report_data_point_done(data_point)
133 | else:
134 | pool = multiprocessing.Pool()
135 | with open(data_file_name, mode="w", encoding="utf-8") as data_file:
136 | for data_point in pool.imap(produce_data_point, reconciliation_params):
137 | print(to_json(data_point), file=data_file)
138 | report_data_point_done(data_point)
139 | pool.close()
140 | pool.terminate()
141 |
142 | def serie_to_reconciliation_params(serie):
143 | reconciliation_params = []
144 | runs = serie['runs']
145 | for algorithm in serie['algorithms']:
146 | for key_size in serie['key_sizes']:
147 | for error_rate in serie['error_rates']:
148 | reconciliation_params.append((algorithm, key_size, error_rate, runs))
149 | return reconciliation_params
150 |
151 | def report_data_point_done(data_point):
152 | global DATA_POINTS_PROCESSED, TOTAL_NR_DATA_POINTS
153 | DATA_POINTS_PROCESSED += 1
154 | print(f"data_point={DATA_POINTS_PROCESSED}/{TOTAL_NR_DATA_POINTS} "
155 | f"algorithm={data_point.algorithm_name} "
156 | f"key_size={data_point.key_size} "
157 | f"error_rate={data_point.requested_bit_error_rate:.6f} "
158 | f"runs={data_point.reconciliations}")
159 |
160 | def produce_data_point(reconciliation_params):
161 | (algorithm, key_size, error_rate, runs) = reconciliation_params
162 | data_point = DataPoint(algorithm, key_size, error_rate, get_code_version())
163 | for _ in range(runs):
164 | run_reconciliation(data_point, algorithm, key_size, 'exact', error_rate)
165 | return data_point
166 |
167 | def run_reconciliation(data_point, algorithm, key_size, error_method, error_rate):
168 | # Key.set_random_seed(seed)
169 | # Shuffle.set_random_seed(seed+1)
170 | correct_key = Key.create_random_key(key_size)
171 | noisy_key = correct_key.copy(error_rate, error_method)
172 | actual_bit_errors = correct_key.difference(noisy_key)
173 | data_point.actual_bit_errors.record_value(actual_bit_errors)
174 | actual_bit_error_rate = actual_bit_errors / key_size
175 | data_point.actual_bit_error_rate.record_value(actual_bit_error_rate)
176 | mock_classical_channel = MockClassicalChannel(correct_key)
177 | reconciliation = Reconciliation(algorithm, mock_classical_channel, noisy_key, error_rate)
178 | reconciliated_key = reconciliation.reconcile()
179 | data_point.record_reconciliation_stats(reconciliation.stats)
180 | remaining_bit_errors = correct_key.difference(reconciliated_key)
181 | data_point.remaining_bit_errors.record_value(remaining_bit_errors)
182 | remaining_bit_error_rate = remaining_bit_errors / key_size
183 | data_point.remaining_bit_error_rate.record_value(remaining_bit_error_rate)
184 | if remaining_bit_errors > 0:
185 | data_point.remaining_frame_error_rate.record_value(1.0)
186 | else:
187 | data_point.remaining_frame_error_rate.record_value(0.0)
188 |
189 | def get_code_version():
190 | try:
191 | repo = git.Repo(search_parent_directories=True)
192 | sha = repo.head.object.hexsha
193 | return str(sha)
194 | except git.InvalidGitRepositoryError:
195 | return "unknown"
196 |
197 | def to_json_encodeable_object(obj):
198 | members = dir(obj)
199 | if 'to_json_encodeable_object' in members:
200 | return obj.to_json_encodeable_object()
201 | dictionary = {}
202 | for member in members:
203 | if member[0] != '_':
204 | value = getattr(obj, member)
205 | if not callable(value):
206 | dictionary[member] = value
207 | return dictionary
208 |
209 | def to_json(obj):
210 | return json.dumps(obj, default=to_json_encodeable_object)
211 |
212 | def main():
213 | args = parse_command_line_arguments()
214 | experiments = parse_experiments_file(args.experiments_file_name)
215 | series = experiments_to_series(experiments, args.max_runs)
216 | compute_total_nr_data_points(series)
217 | run_series(series, args.output_directory, args.disable_multi_processing)
218 |
219 | if __name__ == "__main__":
220 | main()
221 |
--------------------------------------------------------------------------------
/cascade/algorithm.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | ALGORITHMS = {}
4 |
5 | class Algorithm:
6 | """
7 | A Cascade algorithm.
8 | """
9 |
10 | def __init__(self, name, cascade_iterations, block_size_function, biconf_iterations,
11 | biconf_error_free_streak, biconf_correct_complement, biconf_cascade,
12 | sub_block_reuse, block_parity_inference):
13 | """
14 | Create a new Cascade algorithm.
15 |
16 | Params:
17 | name (str): A human-readable name that uniquely identifies the Cascade algorithm.
18 | cascade_iterations (int): The number of normal cascade iterations.
19 | block_size_function (function): A function that computes the block size given the
20 | estimated error rate, the key size, and the Cascade iteration number.
21 | biconf_iterations (int): The number of BICONF iterations to be run after the normal
22 | Cascade iterations are completed. 0 means don't run BICONF.
23 | biconf_error_free_streak (bool): If False, run a fixed number of BICONF iterations as
24 | indicated by biconf_iterations. If True, keep running BICONF until we see a streak
25 | of biconf_iterations error-free iterations.
26 | biconf_correct_complement (bool): If False, run BINARY only on the selected subset of
27 | bits. If True, run BINARY both on the selected subset of bits as well as the
28 | complementary set of bits.
29 | biconf_cascade (bool): Correct cascading errors during BICONF iterations?
30 | sub_block_reuse (bool): If False, consider only top-level blocks for cascading errors.
31 | If True, consider blocks of all sizes for cascading errors.
32 | block_parity_inference (bool): TODO
33 | """
34 | self.name = name
35 | self.cascade_iterations = cascade_iterations
36 | self.block_size_function = block_size_function
37 | self.biconf_iterations = biconf_iterations
38 | self.biconf_error_free_streak = biconf_error_free_streak
39 | self.biconf_correct_complement = biconf_correct_complement
40 | self.biconf_cascade = biconf_cascade
41 | self.sub_block_reuse = sub_block_reuse
42 | self.block_parity_inference = block_parity_inference
43 | ALGORITHMS[name] = self
44 |
45 | def get_algorithm_by_name(name):
46 | """
47 | Get an algorithm object by name.
48 |
49 | Params:
50 | name (str): The name of the algorithm to be retrieved.
51 |
52 | Returns:
53 | The algorithm object corresponding to the given name, or None if no such algorithm exists.
54 | """
55 | return ALGORITHMS.get(name)
56 |
57 | _MIN_ESTIMATED_BIT_ERROR_RATE = 0.00001
58 |
59 | # Name in Demystifying paper: Cascade orig.
60 | # Name in Andre Reis Thesis : original
61 |
62 | def _original_block_size_function(estimated_bit_error_rate, key_size, iteration):
63 | if estimated_bit_error_rate < _MIN_ESTIMATED_BIT_ERROR_RATE:
64 | estimated_bit_error_rate = _MIN_ESTIMATED_BIT_ERROR_RATE
65 | if iteration == 1:
66 | return math.ceil(0.73 / estimated_bit_error_rate)
67 | return 2 * _original_block_size_function(estimated_bit_error_rate, key_size, iteration - 1)
68 |
69 | _ORIGINAL_ALGORITHM = Algorithm(name="original",
70 | cascade_iterations=4,
71 | block_size_function=_original_block_size_function,
72 | biconf_iterations=0,
73 | biconf_error_free_streak=False,
74 | biconf_correct_complement=False,
75 | biconf_cascade=False,
76 | sub_block_reuse=False,
77 | block_parity_inference=False)
78 |
79 | # Name in Demystifying paper: Cascade mod. (1)
80 | # Name in Andre Reis Thesis : biconf
81 |
82 | def _biconf_block_size_function(estimated_bit_error_rate, key_size, iteration):
83 | if estimated_bit_error_rate < _MIN_ESTIMATED_BIT_ERROR_RATE:
84 | estimated_bit_error_rate = _MIN_ESTIMATED_BIT_ERROR_RATE
85 | if iteration == 1:
86 | return math.ceil(0.92 / estimated_bit_error_rate)
87 | return 3 * _biconf_block_size_function(estimated_bit_error_rate, key_size, iteration - 1)
88 |
89 | _BICONF_ALGORITHM = Algorithm(name="biconf",
90 | cascade_iterations=2,
91 | block_size_function=_biconf_block_size_function,
92 | biconf_iterations=10,
93 | biconf_error_free_streak=True,
94 | biconf_correct_complement=False,
95 | biconf_cascade=False,
96 | sub_block_reuse=False,
97 | block_parity_inference=False)
98 |
99 | # Name in Demystifying paper: Cascade opt. (2)
100 | # Name in Andre Reis Thesis : yanetal (Yan et al.)
101 |
102 | def _yanetal_block_size_function(estimated_bit_error_rate, key_size, iteration):
103 | if estimated_bit_error_rate < _MIN_ESTIMATED_BIT_ERROR_RATE:
104 | estimated_bit_error_rate = _MIN_ESTIMATED_BIT_ERROR_RATE
105 | if iteration == 1:
106 | return math.ceil(0.80 / estimated_bit_error_rate)
107 | if iteration == 2:
108 | return 5 * _yanetal_block_size_function(estimated_bit_error_rate, key_size, iteration - 1)
109 | return key_size // 2
110 |
111 | _YANETAL_ALGORITHM = Algorithm(name="yanetal",
112 | cascade_iterations=10,
113 | block_size_function=_yanetal_block_size_function,
114 | biconf_iterations=0,
115 | biconf_error_free_streak=False,
116 | biconf_correct_complement=False,
117 | biconf_cascade=False,
118 | sub_block_reuse=False,
119 | block_parity_inference=False)
120 |
121 | # Name in Demystifying paper: Cascade opt. (3)
122 | # Name in Andre Reis Thesis : -
123 |
124 | def _option3456_block_size_function(estimated_bit_error_rate, key_size, iteration):
125 | if estimated_bit_error_rate < _MIN_ESTIMATED_BIT_ERROR_RATE:
126 | estimated_bit_error_rate = _MIN_ESTIMATED_BIT_ERROR_RATE
127 | if iteration == 1:
128 | return math.ceil(1.00 / estimated_bit_error_rate)
129 | if iteration == 2:
130 | return 2 * _option3456_block_size_function(estimated_bit_error_rate, key_size,
131 | iteration - 1)
132 | return key_size // 2
133 |
134 | _OPTION3_ALGORITHM = Algorithm(name="option3",
135 | cascade_iterations=16,
136 | block_size_function=_option3456_block_size_function,
137 | biconf_iterations=0,
138 | biconf_error_free_streak=False,
139 | biconf_correct_complement=False,
140 | biconf_cascade=False,
141 | sub_block_reuse=False,
142 | block_parity_inference=False)
143 |
144 | # Name in Demystifying paper: Cascade opt. (4)
145 | # Name in Andre Reis Thesis : -
146 |
147 | _OPTION4_ALGORITHM = Algorithm(name="option4",
148 | cascade_iterations=16,
149 | block_size_function=_option3456_block_size_function,
150 | biconf_iterations=0,
151 | biconf_error_free_streak=False,
152 | biconf_correct_complement=False,
153 | biconf_cascade=False,
154 | sub_block_reuse=True,
155 | block_parity_inference=False)
156 |
157 | # Note: Cascade opt. (5) from the Demystifying paper is not supported yet:
158 | # TODO: need to add support for deterministic shuffling
159 |
160 | # Note: Cascade opt. (6) from the Demystifying paper is not supported yet:
161 | # TODO: need to add support for singleton block removal
162 |
163 | # Name in Demystifying paper: Cascade opt. (7)
164 | # Name in Andre Reis Thesis : option-7
165 |
166 | def _option7_block_size_function(estimated_bit_error_rate, key_size, iteration):
167 | if estimated_bit_error_rate < _MIN_ESTIMATED_BIT_ERROR_RATE:
168 | estimated_bit_error_rate = _MIN_ESTIMATED_BIT_ERROR_RATE
169 | if iteration == 1:
170 | return 2 ** math.ceil(math.log2(1.00 / estimated_bit_error_rate))
171 | if iteration == 2:
172 | return 4 * _option7_block_size_function(estimated_bit_error_rate, key_size, iteration - 1)
173 | return key_size // 2
174 |
175 | _OPTION7_ALGORITHM = Algorithm(name="option7",
176 | cascade_iterations=14,
177 | block_size_function=_option7_block_size_function,
178 | biconf_iterations=0,
179 | biconf_error_free_streak=False,
180 | biconf_correct_complement=False,
181 | biconf_cascade=False,
182 | sub_block_reuse=True,
183 | block_parity_inference=False)
184 |
185 | # Name in Demystifying paper: Cascade opt. (8)
186 | # Name in Andre Reis Thesis : option-8
187 |
188 | def _option8_block_size_function(estimated_bit_error_rate, key_size, iteration):
189 | if estimated_bit_error_rate < _MIN_ESTIMATED_BIT_ERROR_RATE:
190 | estimated_bit_error_rate = _MIN_ESTIMATED_BIT_ERROR_RATE
191 | alpha = math.log2(1.00 / estimated_bit_error_rate) - 0.5
192 | if iteration == 1:
193 | return 2 ** math.ceil(alpha)
194 | if iteration == 2:
195 | return 2 ** math.ceil((alpha + 12.0) / 2.0)
196 | if iteration == 3:
197 | return 4096
198 | return key_size // 2
199 |
200 | _OPTION8_ALGORITHM = Algorithm(name="option8",
201 | cascade_iterations=14,
202 | block_size_function=_option8_block_size_function,
203 | biconf_iterations=0,
204 | biconf_error_free_streak=False,
205 | biconf_correct_complement=False,
206 | biconf_cascade=False,
207 | sub_block_reuse=True,
208 | block_parity_inference=False)
209 |
--------------------------------------------------------------------------------
/cascade/shuffle.py:
--------------------------------------------------------------------------------
1 | import math
2 | import random
3 |
4 | class Shuffle:
5 | """
6 | A shuffling (i.e. re-ordering) of the bits in a key.
7 | """
8 |
9 | _shuffle_seed_random_generator = random.Random()
10 |
11 | SHUFFLE_KEEP_SAME = 0
12 | """Do not shuffle the bits in the key."""
13 | SHUFFLE_RANDOM = 1
14 | """Randomly shuffle the bits in the key."""
15 |
16 | _MAX_KEY_SIZE = 1_000_000_000
17 | _MAX_ALGORITHM = 100
18 | _MAX_SHUFFLE_SEED = 1_000_000_000_000
19 |
20 | def __init__(self, size, algorithm, shuffle_seed=None):
21 | """
22 | Create a shuffle. A shuffle represents a permutation of the bits in a key. The shuffle
23 | can be random or deterministic depending on the shuffle algorithm. A Shuffle object is
24 | de-coupled from the Key objects: the same Shuffle object can be applied to multiple
25 | different Key objects, to permute (shuffle) the bits in those different keys according to
26 | the same pattern.
27 |
28 | Args:
29 | size (int): The size of the shuffle, i.e. the number of bits in the keys that this
30 | shuffle will be applied to. Must be >= 0 (i.e. empty shuffles are allowed).
31 | algorithm (int): The algorithm for generating the shuffle pattern:
32 | SHUFFLE_KEEP_SAME: Do not shuffle the key (keep the key bits in the original order).
33 | SHUFFLE_RANDOM: Randomly shuffle the key.
34 | shuffle_seed (None or int): The seed value for the isolated shuffle random number
35 | generator that is used to generate the shuffling permutation. If shuffle_seed is
36 | None, then a random shuffle_seed value will be generated.
37 | """
38 | self._size = size
39 | self._shuffle_index_to_key_index = {}
40 | for shuffle_index in range(0, size):
41 | self._shuffle_index_to_key_index[shuffle_index] = shuffle_index
42 | if algorithm == self.SHUFFLE_RANDOM:
43 | if shuffle_seed is None:
44 | shuffle_seed = \
45 | Shuffle._shuffle_seed_random_generator.randint(1, Shuffle._MAX_SHUFFLE_SEED - 1)
46 | shuffle_random_generator = random.Random(shuffle_seed)
47 | Shuffle._shuffle(self._shuffle_index_to_key_index, shuffle_random_generator.random)
48 | else:
49 | shuffle_seed = 0
50 | self._identifier = Shuffle._encode_identifier(size, algorithm, shuffle_seed)
51 |
52 | @staticmethod
53 | def _shuffle(x, random_function):
54 | """
55 | The random argument in random.shuffle was deprecated in Python 3.9 and removed in
56 | Python 3.11. Instead you are expected to use random.Random.shuffle instead. However, this
57 | produces a different shuffle order, even if the random number generator is seeded in the
58 | same way. My test suite depends a specific deterministic shuffle order for a given
59 | hard-coded seed value. Since I don't want to reimplement my test suite to adjust to the new
60 | shuffle order, I reimplement the order shuffle algorithm from Python 3.9 here.
61 | """
62 | for i in reversed(range(1, len(x))):
63 | # pick an element in x[:i+1] with which to exchange x[i]
64 | j = math.floor(random_function() * (i + 1))
65 | x[i], x[j] = x[j], x[i]
66 |
67 | @staticmethod
68 | def create_shuffle_from_identifier(identifier):
69 | """
70 | Create a shuffle object from a shuffle identifier.
71 |
72 | Alice and Bob need to agree on how to shuffle the bits in each pass. Bob could send complete
73 | shuffle objects to Alice, but that would be expensive because shuffle objects are large.
74 | Instead, Bob sends a short shuffle identifier from Alice from which Alice can reconstruct
75 | the shuffle object.
76 |
77 | Args:
78 | identifier (int): The shuffle identifier.
79 | """
80 | (size, algorithm, shuffle_seed) = Shuffle._decode_identifier(identifier)
81 | shuffle = Shuffle(size, algorithm, shuffle_seed)
82 | return shuffle
83 |
84 | @staticmethod
85 | def _encode_identifier(size, algorithm, shuffle_seed):
86 | identifier = shuffle_seed
87 | identifier *= Shuffle._MAX_ALGORITHM
88 | identifier += algorithm
89 | identifier *= Shuffle._MAX_KEY_SIZE
90 | identifier += size
91 | return identifier
92 |
93 | @staticmethod
94 | def _decode_identifier(identifier):
95 | size = identifier % Shuffle._MAX_KEY_SIZE
96 | identifier //= Shuffle._MAX_KEY_SIZE
97 | algorithm = identifier % Shuffle._MAX_ALGORITHM
98 | identifier //= Shuffle._MAX_ALGORITHM
99 | shuffle_seed = identifier
100 | return (size, algorithm, shuffle_seed)
101 |
102 | def __repr__(self):
103 | """
104 | Get the unambiguous string representation of the shuffle.
105 |
106 | Returns:
107 | The unambiguous string representation of the shuffle.
108 |
109 | Example:
110 | >>> shuffle.__repr__()
111 | 'Shuffle: 0->3 1->1 2->2 3->0 4->4 5->5'
112 | """
113 | return "Shuffle: " + self.__str__()
114 |
115 | def __str__(self):
116 | """
117 | Get the human-readable string representation of the shuffle.
118 |
119 | Returns:
120 | The human-readable string representation of the shuffle.
121 |
122 | Example:
123 | >>> shuffle.__str__()
124 | '0->3 1->1 2->2 3->0 4->4 5->5'
125 | """
126 | string = ""
127 | for shuffle_index in range(self._size):
128 | key_index = self._shuffle_index_to_key_index[shuffle_index]
129 | if string:
130 | string += " "
131 | string += f"{shuffle_index}->{key_index}"
132 | return string
133 |
134 | @staticmethod
135 | def set_random_seed(seed):
136 | """
137 | Set the seed for the isolated random number generated that is used only in the shuffle
138 | module and nowhere else. If two applications set the seed to the same value, the shuffle
139 | module produces the exact same sequence of shuffles. This is used to make experiments
140 | reproducible.
141 |
142 | Args:
143 | seed (int): The seed value for the random number generator which is isolated to the
144 | shuffle module.
145 | """
146 | Shuffle._shuffle_seed_random_generator = random.Random(seed)
147 |
148 | def get_size(self):
149 | """
150 | Get the size of the shuffle in bits.
151 |
152 | Returns:
153 | The size of the shuffle in bits.
154 | """
155 | return self._size
156 |
157 | def get_identifier(self):
158 | """
159 | Get the shuffle identifier.
160 |
161 | Returns:
162 | The shuffle identifier.
163 | """
164 | return self._identifier
165 |
166 | def get_key_index(self, shuffle_index):
167 | """
168 | Get the key index that a given shuffle index is mapped to.
169 |
170 | Args:
171 | shuffle_index (int): The shuffle index of the bit. Index must be in range
172 | [0, shuffle._size).
173 |
174 | Returns:
175 | The key index.
176 | """
177 | return self._shuffle_index_to_key_index[shuffle_index]
178 |
179 | def get_bit(self, key, shuffle_index):
180 | """
181 | Get a bit from a shuffled key.
182 |
183 | Args:
184 | key (Key): The key. We first shuffle this key according to this shuffle pattern and
185 | then retrieve the bit at shuffle_index in the shuffled key. The size of the key
186 | must be equal to the size of this shuffle.
187 | shuffle_index (int): The index of the bit in the shuffled key. The index must be in
188 | range [0, shuffle.size).
189 |
190 | Returns:
191 | The value (0 or 1) of the shuffled key bit at the given index.
192 | """
193 | key_index = self._shuffle_index_to_key_index[shuffle_index]
194 | return key.get_bit(key_index)
195 |
196 | def set_bit(self, key, shuffle_index, value):
197 | """
198 | Set a bit in a shuffled key to a given value.
199 |
200 | Args:
201 | key (Key): The key. We first shuffle this key according to this shuffle pattern and
202 | then set the bit at shuffle_index in the shuffled key to the given value. The size
203 | of the key must be equal to the size of this shuffle.
204 | shuffle_index (int): The index of the bit in the shuffled key. The index must be in
205 | range [0, shuffle.size).
206 | value (int): The new value of the bit. Must be 0 or 1.
207 | """
208 | key_index = self._shuffle_index_to_key_index[shuffle_index]
209 | key.set_bit(key_index, value)
210 |
211 | def flip_bit(self, key, shuffle_index):
212 | """
213 | Flip a bit in a shuffled key (flip 0 to 1 and vice versa).
214 |
215 | Args:
216 | key (Key): The key. We first shuffle this key according to this shuffle pattern and
217 | then flip the bit at shuffle_index in the shuffled key. The size of the key must be
218 | equal to the size of this shuffle.
219 | shuffle_index (int): The index of the bit in the shuffled key. The index must be in
220 | range [0, shuffle.size).
221 | """
222 | key_index = self._shuffle_index_to_key_index[shuffle_index]
223 | key.flip_bit(key_index)
224 |
225 | def calculate_parity(self, key, shuffle_start_index, shuffle_end_index):
226 | """
227 | Calculate the parity of a contiguous sub-range of bits in a shuffled key.
228 |
229 | Args:
230 | key (Key): The key for which to calculate the parity after shuffling it.
231 | shuffle_start_index (int): The index of the first bit (inclusive) in the range of
232 | bits in the shuffled key over which to calculate the parity.
233 | shuffle_end_index (int): The index of the last bit (exclusive) in the range of
234 | bits in the shuffled key over which to calculate the parity.
235 |
236 | Returns:
237 | The parity of the contiguous sub-range of bits in the shuffled key.
238 | """
239 | parity = 0
240 | for shuffle_index in range(shuffle_start_index, shuffle_end_index):
241 | key_index = self._shuffle_index_to_key_index[shuffle_index]
242 | if key.get_bit(key_index):
243 | parity = 1 - parity
244 | return parity
245 |
--------------------------------------------------------------------------------
/docs/source/intro.rst:
--------------------------------------------------------------------------------
1 | *************
2 | Introduction.
3 | *************
4 |
5 | What is in this GitHub repository?
6 | ==================================
7 |
8 | This GitHub repository contains a Python implementation of the Cascade information reconciliation protocol. Information reconciliation protocols in general, and the Cascade protocol in particular, are a small but important and complex classical post-processing step in quantum key distribution (QKD) protocols. They are intended to detect and correct inevitable bit errors in the distributed key.
9 |
10 | This repository also contains Python scripts that analyze the Cascade protocol and reproduce the analysis results that were previously reported in the following academic papers:
11 |
12 | * Jesus Martinez-Mateo, Christoph Pacher, Momtchil Peev, Alex Ciurana, and Vicente Martin. `Demystifying the Information Reconciliation Protocol Cascade. `_ arXiv:1407.3257 [quant-ph], Jul 2014.
13 |
14 | * André Reis. `Quantum Key Distribution Post Processing - A Study on the Information Reconciliation Cascade Protocol. `_ Master's Thesis, Faculdade de Engenharia, Universidade do Porto. Jul 2019.
15 |
16 | Finally, this repository contains extensive documentation describing the Cascade protocol, our implementation of the Cascade protocol, the findings of reproducing the Cascade analysis results from the academic literature, and lessons learned.
17 |
18 | The broader context.
19 | ====================
20 |
21 | The code in this GitHub repository is just a small step in the larger project of adding full support for quantum key distribution (QKD) to OpenSSL. This larger project includes other GitHub repositories:
22 |
23 | * The `openssl-qkd `_ GitHub repository contains a C implementation of a dynamically loaded engine for OpenSSL. This engine replace a classic Diffie-Hellman key exchange with a quantum key distribution (QKD) mechanism. The actual quantum key distribution protocol is not part of this repository. Instead, the engine invokes a stub implementation of `application programmer interface (API) `_ defined by European telecommunications standards institute (ETSI).
24 |
25 | * The `simulaqron-bb84-python `_ GitHub repository contains a Python implementation of the BB84 quantum key distribution (QKD) protocol. It runs on top of the `SimulaQron `_ quantum network simulator.
26 |
27 | All of these repositories are also just small steps working towards the overall goal adding full support for quantum key distribution to OpenSSL. Much work remains to be done, which is summarized at the end of this chapter.
28 |
29 | Once the OpenSSL library supports quantum key distribution, many applications that use OpenSSL (such as for example web servers and web clients) will be able to use quantum key distribution with little or no code changes to the application itself.
30 |
31 | The initial goal is to support simulated quantum networks using simulators such as `SimulaQron `_ or `NetSquid `_, both developed at `QuTech `_. But by building on top of a well-defined application programming interface (namely the `ETSI QKD API `_) it is conceivable that our code will be able to interoperate with real quantum key distribution devices that are being developed in academia and by commercial vendors.
32 |
33 | The pan-European quantum Internet hackathon.
34 | ============================================
35 |
36 | This project has its roots in the `Pan-European Quantum Internet Hackathon `_ which took place on 5 and 6 November 2019 and which was organized by `RIPE labs `_ .
37 |
38 | .. image:: figures/pan-european-quantum-internet-hackathon.png
39 | :align: center
40 | :alt: Pan European Quantum Hackathon Logo
41 |
42 | Participants from six geographically distributed locations (Delft, Dublin, Geneva, Padua, Paris, and Sarajevo) formed teams that worked on various projects related to the `Quantum Internet `_.
43 |
44 | I participated in Delft where the hackathon was hosted by QuTech, a world-leading quantum technology research and development office within the `Delft University of Technology `_.
45 |
46 | The OpenSSL integration challenge.
47 | ==================================
48 |
49 | In Delft, I joined a team working on one of the `challenges suggested by the hackathon organizers `_, namely the `OpenSSL integration challenge `_.
50 |
51 | This challenge was developed by `Wojciech Kozlowski `_, a postdoctoral researcher at QuTech and one of the organizers of the Delft hackathon. He is also the main author of the `Architectural Principles of the Quantum Internet `_ document that is being developed in the `Quantum Internet Research Group (QIRG) `_ in the `Internet Research Task Force (IRTF) `_.
52 |
53 | .. image:: figures/openssl-logo.png
54 | :align: center
55 | :alt: OpenSSL Logo
56 |
57 | The OpenSSL integration challenge consists of two parts:
58 |
59 | 1. Enhance `OpenSSL `_ to be able to use `Quantum Key Distribution (QKD) `_ as a `key agreement protocol `_. OpenSSL is an open source cryptography library that implements the `Secure Sockets Layer (SSL) and Transport Layer Security (TLS) `_ protocols. OpenSSL is widely used in Internet applications such as web browsers and web servers.
60 |
61 | 2. Implement a specific quantum key distribution protocol, namely the `Bennett and Brassard 1984 (BB84) `_ protocol, on top of the SimulaQron quantum network simulator.
62 |
63 | The end-goal of the challenge is to use an off-the-shelf browser (e.g. Chrome) and connect it to a secure HTTPS website hosted on an off-the-shelf web server (e.g. Apache), while using the BB84 quantum key distribution algorithm as the key agreement protocol (running a `SimulaQron `_ simulated quantum network), instead of the classical Diffie-Hellman protocol that is normally used in classical networks.
64 |
65 | Integration of OpenSSL with the stub ETSI QKD API.
66 | ==================================================
67 |
68 | The following figure shows what was actually achieved soon after the end of the hackathon.
69 |
70 | .. image:: figures/architecture-engine-mock-qkd.png
71 | :align: center
72 | :alt: Architecture using engines and mock QKD
73 |
74 | This is called the "upper half" of the solution for the OpenSSL integration challenge. The source code for this upper half implementation can be found in GitHub repository `openssl-qkd `_ and the documentation can be found on `this page `_.
75 |
76 | At the hackathon there was another team working on the "lower half" of the OpenSSL challenge. They were working on an implementation of the BB84 protocol running on SimulaQron. This BB84 implementation would provide a north-bound interface in the form of the ETSI QKD API.
77 |
78 | The hope was that by the end of the hackathon the "upper half" (the OpenSSL engine that consumes the ETSI QKD API) could be integrated with the "lower half" (the BB84 implementation that provides the ETSI QKD API). We did not quite make that goal during the hackathon itself. We picked up the work where the hackathon left off.
79 |
80 | Python implementation of BB84 on SimulaQron.
81 | ============================================
82 |
83 | The GitHub repository `simulaqron-bb84-python `_ contains our Python implementation of BB84 running on SimulaQron. We essentially re-did the work that was done by the other hackathon team.
84 |
85 | You can think of it as an exercise to get familiar with BB84 and with the CQC interface provided by SimulaQron. It is a fully functional implementation of BB84 that runs on SimulaQron. However, it is not very suitable as an implementation of the "lower half" that can be integrated with the "upper half" implementation in the `openssl-qkd `_ repository. This is because Python code can not easily be integrated with C code into a dynamically loaded library that can be used as an OpenSSL engine. Yes, it is technically possible, but we prefer to rewrite the Cascade code in C (or maybe C++ or Rust); we consider the Python code to be a prototype (we did prototyping in Python because it is much easier to experiment in Python than in C).
86 |
87 | Python implementation of Cascade.
88 | =================================
89 |
90 | The openssl-qkd repository only contains code for the quantum phase of BB84; it does not contain any classical post-processing code: both the information reconciliation step and the privacy amplification step are missing.
91 |
92 | This GitHub repository `cascade-python `_ contains a Python implementation of the information reconciliation step.
93 |
94 | C++ implementation of Cascade.
95 | ==============================
96 |
97 | Soon after implementing Cascade in Python (this repository), I reimplemented it in C++ (GitHub repository `cascade-cpp `_).
98 |
99 | The main reason for reimplementing Cascade in C++ was that the Python code was too slow. The "make data-papers" target in the Python code does 1,000 Cascade iterations per data point and takes more than 5 days of continuous running on an AWS m5.2xlarge instance (120 hours x US$ 0.40 per hour = US$ 48 in compute cost). By contrast, the "make data-papers" target the C++ code does 10,000 Cascade iterations per data point (10x better accuracy) only takes ten hours to complete (US$ 4).
100 |
101 | Also, the C++ was more carefully debugged than the Python code and has some extra debugging functionality.
102 |
103 | Next steps.
104 | ===========
105 |
106 | These are the remaining work-items for completing the work of implementing an OpenSSL engine that uses BB84 running on SimulaQron:
107 |
108 | 1. Implement a Python prototype for privacy amplification.
109 |
110 | 2. Implement one or more Python prototypes for other information reconciliation protocols, such as Golay codes.
111 |
112 | 3. Add a north-bound ETSI QKD API.
113 |
114 | 4. Rewrite the Python implementation of Cascade and the other information reconciliation protocols into C or C++ or Rust and integrate with the BB84 code.
115 |
116 | 5. Rewrite the Python implementation of privacy amplification into C or C++ or Rust and integrate with the BB84 code.
117 |
118 | 6. Demonstrate running Chrome and Apache on top of the QKD OpenSSL engine.
119 |
--------------------------------------------------------------------------------
/docs/source/comparison.rst:
--------------------------------------------------------------------------------
1 | ******************************************
2 | Raw Comparison of Results with Literature.
3 | ******************************************
4 |
5 | Comparison with "Demystifying the Information Reconciliation Protocol Cascade"
6 | ==============================================================================
7 |
8 | Here we compare the results of our Python Cascade implementation with the results reported in the following paper:
9 |
10 | `Demystifying the Information Reconciliation Protocol Cascade. `_ *Jesus Martinez-Mateo, Christoph Pacher, Momtchil Peev, Alex Ciurana, and Vicente Martin.* arXiv:1407.3257 [quant-ph], Jul 2014.
11 |
12 | Figure 1
13 | --------
14 |
15 | Original figure in paper:
16 |
17 | .. image:: figures/demystifying-figure-1-original.png
18 | :align: center
19 |
20 | Reproduced figure from this code:
21 |
22 | .. image:: figures/demystifying-figure-1-reproduced.png
23 | :align: center
24 |
25 | The original and the reproduced figure match very well.
26 |
27 | The original graph has more detail because they executed more runs per data point (this is true for all graphs, so we won't repeat this point.)
28 |
29 | The original graph does not have any indication of the standard deviation (this is true for all graphs, so we won't repeat this point).
30 |
31 | Figure 2
32 | --------
33 |
34 | Original figure in paper:
35 |
36 | .. image:: figures/demystifying-figure-2-original.png
37 | :align: center
38 |
39 | Reproduced figure from this code:
40 |
41 | .. image:: figures/demystifying-figure-2-reproduced.png
42 | :align: center
43 |
44 | At first blush the original and the reproduced figure match are quite similar.
45 |
46 | However, both the original algorithm (black line) and the modified algorithm (blue) line have a more distinct drop-off towards the right side of the figure.
47 |
48 | For example, in the figure from the original paper, as the x-axis increases from bit error rate 0.03 to 1.10, the jigsaw shape of the blue line starts to have bigger "jigsaws" and also distinctly slopes down. In the reproduced graph, we do see the bigger "jigsaws" but we don't see the downward slope: the line stays essentially flat. I currently have no explanation for this difference.
49 |
50 | Figure 3
51 | --------
52 |
53 | Original figure in paper:
54 |
55 | .. image:: figures/demystifying-figure-3-original.png
56 | :align: center
57 |
58 | Reproduced figure from this code:
59 |
60 | .. image:: figures/demystifying-figure-3-reproduced.png
61 | :align: center
62 |
63 | In the original figure, the frame length (the x-axis) ranges from 10^3 to 10^7. In the reproduced figure the frame length only ranges from 10^3 to 10^5. This is because my Python code was too slow to run many iterations for key lengths 10^6 or 10^7.
64 |
65 | In the original figure, the lines are perfectly straight. In the reproduced figure the lines are very slightly curved.
66 |
67 | Other than those minor differences, the original and reproduced figure match very well: they shapes are very similar and the lines cross over at the same points.
68 |
69 | Figure 4
70 | --------
71 |
72 | Original figure in paper:
73 |
74 | .. image:: figures/demystifying-figure-4-original.png
75 | :align: center
76 |
77 | Reproduced figure from this code:
78 |
79 | .. image:: figures/demystifying-figure-4-reproduced.png
80 | :align: center
81 |
82 | The reproduced figure is mostly useless: for small frame error rates (below 10^-3) the reproduced figure falls apart. This is because I only did 1,000 iterations per data point. The Python code was too slow to do more iterations per data point. To detect frame errors below 10^-n we need to do at least 10^n iterations. Hence, running only 1,000 iterations it is only to be expected that we cannot detect frame error rates below 10^-3.
83 |
84 | Hopefully, when we have a faster C++ implementation we will be able to study lower error rates.
85 |
86 | Figure 5
87 | --------
88 |
89 | Original figure in paper:
90 |
91 | .. image:: figures/demystifying-figure-5-original.png
92 | :align: center
93 |
94 | Reproduced figure from this code:
95 |
96 | .. image:: figures/demystifying-figure-5-reproduced.png
97 | :align: center
98 |
99 | Once again, the reproduced figure is mostly useless, for similar reasons to figure 4.
100 |
101 | Due to the slow Python code, we only did 1,000 (10^3) iterations per data point. For key size 1,000 (10^3) this means we cannot bit error rates below 10^-6. And for key siz 10,000 (10^4) this means we cannot detect bit error rate below 10^-7.
102 |
103 | As expected, the reproduced figure 5 falls apart below these 10^-6 (black line) and 10^-7 (blue line) bit error rates.
104 |
105 | Again, hopefully, when we have a faster C++ implementation we will be able to run more iterations per data point and hence study lower error rates.
106 |
107 | Figure 6
108 | --------
109 |
110 | Original figure in paper:
111 |
112 | .. image:: figures/demystifying-figure-6-original.png
113 | :align: center
114 |
115 | This figure is not (yet) reproduced by the code.
116 |
117 | Figure 7
118 | --------
119 |
120 | Original figure in paper:
121 |
122 | .. image:: figures/demystifying-figure-7-original.png
123 | :align: center
124 |
125 | This figure is not (yet) reproduced by the code.
126 |
127 | Figure 8
128 | --------
129 |
130 | Original figure in paper:
131 |
132 | .. image:: figures/demystifying-figure-8-original.png
133 | :align: center
134 |
135 | Reproduced figure from this code:
136 |
137 | .. image:: figures/demystifying-figure-8-reproduced.png
138 | :align: center
139 |
140 | The original and the reproduced figure match very well.
141 |
142 | Once again, the original graph has more detail because they executed more runs per data point.
143 |
144 | Figure 9
145 | --------
146 |
147 | Original figure in paper:
148 |
149 | .. image:: figures/demystifying-figure-9-original.png
150 | :align: center
151 |
152 | Reproduced figure from this code:
153 |
154 | .. image:: figures/demystifying-figure-9-reproduced.png
155 | :align: center
156 |
157 | There are few noticeable differences between the original figure and the reproduced figure.
158 |
159 | There is of course the fact that the original figure has more detail than the reproduced figure, because we run fewer iterations per data point.
160 |
161 | The black, green, and blue graphs match reasonably well in the original and the int reproduced figure. They have very similar values and similar shapes including the obvious saw-teeth. There are a few differences in the details though.
162 |
163 | We already observed the first difference in figure 2. In the original graph the blue graph clearly slopes down towards the end. In the reproduced graph the blue line saw-tooths around a flat trend instead of trend that slopes down.
164 |
165 | In the green and the black graphs, we also see a difference. In the original graph we see a lower frequency secondary wave pattern on top of the higher frequency saw-teeth. For example, there are 6 "waves" in the green graph and lots of saw-teeth within each "wave". In the reproduced green and black graphs, we do not see these "waves".
166 |
167 |
168 | The last and biggest difference is in the red graph. This graph is completely different in the original vs reproduced figure. In the original figure the red graph is much higher (above the blue graph) and has much bigger saw-teeth.
169 |
170 | I currently do not have an explanation for any of these observed differences.
171 |
172 | Figure 10
173 | ---------
174 |
175 | Original figure in paper:
176 |
177 | .. image:: figures/demystifying-figure-10-original.png
178 | :align: center
179 |
180 | Reproduced figure from this code:
181 |
182 | .. image:: figures/demystifying-figure-10-reproduced.png
183 | :align: center
184 |
185 | Figure 11
186 | ---------
187 |
188 | Original figure in paper:
189 |
190 | .. image:: figures/demystifying-figure-11-original.png
191 | :align: center
192 |
193 | Reproduced figure from this code:
194 |
195 | .. image:: figures/demystifying-figure-11-reproduced.png
196 | :align: center
197 |
198 | Figure 12
199 | ---------
200 |
201 | Original figure in paper:
202 |
203 | .. image:: figures/demystifying-figure-12-original.png
204 | :align: center
205 |
206 | This figure is not (yet) reproduced by the code.
207 |
208 | Figure 13
209 | ---------
210 |
211 | Original figure in paper:
212 |
213 | .. image:: figures/demystifying-figure-13-original.png
214 | :align: center
215 |
216 | Reproduced figure from this code:
217 |
218 | .. image:: figures/demystifying-figure-13-reproduced.png
219 | :align: center
220 |
221 | Comparison with "André Reis Thesis"
222 | ===================================
223 |
224 | Figure 5.1
225 | ----------
226 |
227 | Original figure in thesis:
228 |
229 | .. image:: figures/andre-reis-thesis-figure-5-1-original.png
230 | :align: center
231 |
232 | Reproduced figure from this code:
233 |
234 | .. image:: figures/andre-reis-thesis-figure-5-1-reproduced.png
235 | :align: center
236 |
237 | Figure 5.2
238 | ----------
239 |
240 | Original figure in thesis:
241 |
242 | .. image:: figures/andre-reis-thesis-figure-5-2-original.png
243 | :align: center
244 |
245 | Reproduced figure from this code:
246 |
247 | .. image:: figures/andre-reis-thesis-figure-5-2-reproduced.png
248 | :align: center
249 |
250 | Figure 5.3
251 | ----------
252 |
253 | Original figure in thesis:
254 |
255 | .. image:: figures/andre-reis-thesis-figure-5-3-original.png
256 | :align: center
257 |
258 | Reproduced figure from this code:
259 |
260 | .. image:: figures/andre-reis-thesis-figure-5-3-reproduced.png
261 | :align: center
262 |
263 | Figure 5.4
264 | ----------
265 |
266 | Original figure in thesis:
267 |
268 | .. image:: figures/andre-reis-thesis-figure-5-4-original.png
269 | :align: center
270 |
271 | This figure is not (yet) reproduced by the code.
272 |
273 | Figure 5.5
274 | ----------
275 |
276 | Original figure in thesis:
277 |
278 | .. image:: figures/andre-reis-thesis-figure-5-5-original.png
279 | :align: center
280 |
281 | Reproduced figure from this code:
282 |
283 | .. image:: figures/andre-reis-thesis-figure-5-5a-reproduced.png
284 | :align: center
285 |
286 | Figure 5.5b is not (yet) reproduced by the code.
287 |
288 | Figure 5.6
289 | ----------
290 |
291 | Original figure in thesis:
292 |
293 | .. image:: figures/andre-reis-thesis-figure-5-6-original.png
294 | :align: center
295 |
296 | This figure is not (yet) reproduced by the code.
297 |
298 | Figure 5.7
299 | ----------
300 |
301 | Original figure in thesis:
302 |
303 | .. image:: figures/andre-reis-thesis-figure-5-7-original.png
304 | :align: center
305 |
306 | This figure is not (yet) reproduced by the code.
307 |
308 | Figure 5.8
309 | ----------
310 |
311 | Original figure in thesis:
312 |
313 | .. image:: figures/andre-reis-thesis-figure-5-8-original.png
314 | :align: center
315 |
316 | This figure is not (yet) reproduced by the code.
317 |
318 | Figure 5.9
319 | ----------
320 |
321 | Original figure in thesis:
322 |
323 | .. image:: figures/andre-reis-thesis-figure-5-9-original.png
324 | :align: center
325 |
326 | This figure is not (yet) reproduced by the code.
327 |
328 | Figure 5.10
329 | -----------
330 |
331 | Original figure in thesis:
332 |
333 | .. image:: figures/andre-reis-thesis-figure-5-10-original.png
334 | :align: center
335 |
336 | This figure is not (yet) reproduced by the code.
337 |
--------------------------------------------------------------------------------
/cascade/block.py:
--------------------------------------------------------------------------------
1 | class Block:
2 | """
3 | A block is a contiguous subset of bits in a shuffled key.
4 | """
5 |
6 | ERRORS_EVEN = 0
7 | """The block contains an odd number of errors."""
8 | ERRORS_ODD = 1
9 | """The block contains an even number of errors."""
10 | ERRORS_UNKNOWN = None
11 | """We don't know whether the block contains an even or an odd number of errors."""
12 |
13 | def __init__(self, key, shuffle, start_index, end_index, parent_block):
14 | """
15 | Create a block, which is a contiguous subset of bits in a shuffled key.
16 |
17 | Args:
18 | key (Key): The key for which to create one single block that covers a subset of the key.
19 | shuffle (Shuffle): The shuffle to apply to the key before creating the block.
20 | start_index (int): The shuffle index, inclusive, at which the block starts. Must be in
21 | range [0, shuffle._size).
22 | end_index (int): The shuffle index, exclusive, at which the block end. Must be in range
23 | [0, shuffle._size]. The range must encompass at least 1 bit, i.e.
24 | end_index > start_index.
25 | parent_block (Block): The parent block. None if there is no parent, i.e. if this is a
26 | top-level block.
27 | """
28 |
29 | # Store block attributes.
30 | self._key = key
31 | self._shuffle = shuffle
32 | self._start_index = start_index
33 | self._end_index = end_index
34 |
35 | # Keep track of parent block. None if there is no parent, i.e. if this is a top-level block.
36 | self._parent_block = parent_block
37 |
38 | # Keep track of left and right sub-block to avoid creating them more then once.
39 | self._left_sub_block = None
40 | self._right_sub_block = None
41 |
42 | # Calculate the current parity for this block.
43 | self._current_parity = shuffle.calculate_parity(key, start_index, end_index)
44 |
45 | # We don't yet know the correct parity for this block.
46 | self._correct_parity = None
47 |
48 | @staticmethod
49 | def create_covering_blocks(key, shuffle, block_size):
50 | """
51 | Create a list of blocks of a given size that cover a given shuffled key.
52 |
53 | Args:
54 | key (Key): The key for which to create a list of block that collectively cover the
55 | entire key.
56 | shuffle (Shuffle): The shuffle to apply to the key before creating the blocks.
57 | block_size (int): The size of each block. Each block in the list, except for the last
58 | one, will be exactly this size. The last block may be smaller.
59 |
60 | Returns:
61 | A list of blocks that cover the shuffled key.
62 | """
63 |
64 | # Generate the blocks.
65 | blocks = []
66 | remaining_bits = shuffle.get_size()
67 | start_index = 0
68 | while remaining_bits > 0:
69 | actual_block_size = min(block_size, remaining_bits)
70 | end_index = start_index + actual_block_size
71 | block = Block(key, shuffle, start_index, end_index, None)
72 | blocks.append(block)
73 | start_index += actual_block_size
74 | remaining_bits -= actual_block_size
75 | return blocks
76 |
77 | def __repr__(self):
78 | """
79 | Get the unambiguous string representation of the block.
80 |
81 | Returns:
82 | The unambiguous string representation of the block.
83 | """
84 | string = "Block:"
85 | for shuffle_index in range(self._start_index, self._end_index):
86 | key_index = self._shuffle.get_key_index(shuffle_index)
87 | key_bit = self._shuffle.get_bit(self._key, shuffle_index)
88 | string += f" {shuffle_index}->{key_index}={key_bit}"
89 | return string
90 |
91 | def __str__(self):
92 | """
93 | Get the human-readable string representation of the block.
94 |
95 | Returns:
96 | The human-readable string representation of the block.
97 | """
98 | string = ""
99 | for shuffle_index in range(self._start_index, self._end_index):
100 | string += str(self._shuffle.get_bit(self._key, shuffle_index))
101 | return string
102 |
103 | def __lt__(self, other):
104 | """
105 | Is this block "less than" the other block? This is needed to insert the blocks in a priority
106 | queue; for equal block sizes the priority queue want to order by increasing block size. We
107 | don't care about the order of blocks within a given block size, so we simply order based on
108 | the id().
109 |
110 | Returns:
111 | True if self < other, False otherwise.
112 | """
113 | return id(self) < id(other)
114 |
115 | def get_start_index(self):
116 | """
117 | Get the start index of the block, i.e. the shuffled key index for the first bit in the
118 | block.
119 |
120 | Returns:
121 | The start index.
122 | """
123 | return self._start_index
124 |
125 | def get_end_index(self):
126 | """
127 | Get the end index of the block, i.e. the shuffled key index for the first bit after the last
128 | bit in the block.
129 |
130 | Returns:
131 | The end index.
132 | """
133 | return self._end_index
134 |
135 | def get_shuffle(self):
136 | """
137 | Get the shuffle for this block.
138 |
139 | Returns:
140 | The shuffle for this block.
141 | """
142 | return self._shuffle
143 |
144 | def get_size(self):
145 | """
146 | Get the size of the block in bits.
147 |
148 | Returns:
149 | The size of the block in bits.
150 | """
151 | return self._end_index - self._start_index
152 |
153 | def get_key_indexes(self):
154 | """
155 | Get a list of key indexes for this block.
156 |
157 | Returns:
158 | The key indexes for this block (the ordering of the list is undefined; in particular
159 | don't assume that the key indexes are in increasing order.)
160 | """
161 | key_indexes = []
162 | for shuffle_index in range(self._start_index, self._end_index):
163 | key_index = self._shuffle.get_key_index(shuffle_index)
164 | key_indexes.append(key_index)
165 | return key_indexes
166 |
167 | def get_current_parity(self):
168 | """
169 | Get the current parity of the block.
170 |
171 | Returns:
172 | The current parity (0 or 1) of the block.
173 | """
174 | return self._current_parity
175 |
176 | def get_correct_parity(self):
177 | """
178 | Get the correct parity of the block, if we know it.
179 |
180 | Returns:
181 | The current parity (0 or 1) of the block, or None if we don't know it.
182 | """
183 | return self._correct_parity
184 |
185 | def set_correct_parity(self, correct_parity):
186 | """
187 | Set the correct parity of the block.
188 |
189 | Params:
190 | correct_parity (int): The current parity (0 or 1).
191 | """
192 | self._correct_parity = correct_parity
193 |
194 | def is_top_block(self):
195 | """
196 | Is this block a top-level block?
197 |
198 | Returns:
199 | True if the block was created by splitting a shuffled key into blocks. False if the
200 | block was created by splitting a block into sub-blocks.
201 | """
202 | return self._parent_block is None
203 |
204 | def get_parent_block(self):
205 | """
206 | Return the parent block of this block, if it has one.
207 |
208 | Returns:
209 | The parent block, or None if there is no parent block.
210 | """
211 | return self._parent_block
212 |
213 | def get_left_sub_block(self):
214 | """
215 | Return the left sub-block of this block, if it has one.
216 |
217 | Returns:
218 | The left sub-block, or None if there is no left sub-block.
219 | """
220 | return self._left_sub_block
221 |
222 | def create_left_sub_block(self):
223 | """
224 | Create the left sub-block of this block. If the block has an odd size, the left sub-block
225 | will be one bit larger than the right sub-block. The block must be at least 2 bits in size.
226 |
227 | Returns:
228 | The left sub-block.
229 | """
230 | middle_index = self._start_index + (self._end_index - self._start_index + 1) // 2
231 | self._left_sub_block = Block(self._key, self._shuffle, self._start_index, middle_index,
232 | self)
233 | return self._left_sub_block
234 |
235 | def get_right_sub_block(self):
236 | """
237 | Return the right sub-block of this block, if it has one.
238 |
239 | Returns:
240 | The right sub-block, or None if there is no right sub-block.
241 | """
242 | return self._right_sub_block
243 |
244 | def create_right_sub_block(self):
245 | """
246 | Create the right sub-block of this block. If the block has an odd size, the left sub-block
247 | will be one bit larger than the right sub-block. The block must be at least 2 bits in size.
248 |
249 | Returns:
250 | The right sub-block.
251 | """
252 | middle_index = self._start_index + (self._end_index - self._start_index + 1) // 2
253 | self._right_sub_block = Block(self._key, self._shuffle, middle_index, self._end_index, self)
254 | return self._right_sub_block
255 |
256 | def get_error_parity(self):
257 | """
258 | Does this block have an odd or an even number of errors?
259 |
260 | Returns:
261 | * ERRORS_ODD = The block contains an odd number of errors.
262 | * ERRORS_EVEN = The block contains an even number of errors.
263 | * ERRORS_UNKNOWN = We don't yet know whether the block contains an odd or even number of
264 | errors because we have not yet asked what the parity of the original key (witout
265 | noise) is.
266 | """
267 | if self._correct_parity is None:
268 | return Block.ERRORS_UNKNOWN
269 | if self._current_parity == self._correct_parity:
270 | return Block.ERRORS_EVEN
271 | return Block.ERRORS_ODD
272 |
273 | def get_key_index(self, shuffle_index):
274 | """
275 | The the key index that corresponds to a given shuffle index.
276 |
277 | Params:
278 | shuffle_index: The shuffle index.
279 |
280 | Returns:
281 | The key index.
282 | """
283 | return self._shuffle.get_key_index(shuffle_index)
284 |
285 | def flip_bit(self, flipped_shuffle_index):
286 | """
287 | Flip a bit in the block.
288 |
289 | Params:
290 | flipped_shuffle_index: The shuffle index of the bit to flip.
291 | """
292 | self._shuffle.flip_bit(self._key, flipped_shuffle_index)
293 |
294 | def flip_parity(self):
295 | """
296 | Flip the current parity of this block. This is needed when a single bit in the block is
297 | flipped as a result of a single bit error correction.
298 | """
299 | self._current_parity = 1 - self._current_parity
300 |
--------------------------------------------------------------------------------
/cascade/tests/test_block.py:
--------------------------------------------------------------------------------
1 | from cascade.block import Block
2 | from cascade.key import Key
3 | from cascade.shuffle import Shuffle
4 |
5 | def test_create_block():
6 |
7 | # Block covers entire shuffle.
8 | Key.set_random_seed(2221)
9 | Shuffle.set_random_seed(2222)
10 | key = Key.create_random_key(8)
11 | assert key.__repr__() == "Key: 10111010"
12 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
13 | assert shuffle.__repr__() == "Shuffle: 0->0 1->1 2->6 3->4 4->3 5->2 6->5 7->7"
14 | block = Block(key, shuffle, 3, 6, None)
15 | assert block.__repr__() == "Block: 3->4=1 4->3=1 5->2=1"
16 | block = Block(key, shuffle, 0, 8, None)
17 | assert block.__repr__() == "Block: 0->0=1 1->1=0 2->6=1 3->4=1 4->3=1 5->2=1 6->5=0 7->7=0"
18 |
19 | # Block covers part of the shuffle.
20 | block = Block(key, shuffle, 1, 3, None)
21 | assert block.__repr__() == "Block: 1->1=0 2->6=1"
22 |
23 | # Single bit block.
24 | block = Block(key, shuffle, 2, 3, None)
25 | assert block.__repr__() == "Block: 2->6=1"
26 |
27 | def test_create_covering_blocks():
28 |
29 | # Prepare key and shuffle.
30 | Key.set_random_seed(3331)
31 | Shuffle.set_random_seed(3332)
32 | key = Key.create_random_key(16)
33 | assert key.__repr__() == "Key: 0011011001100110"
34 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
35 | assert shuffle.__repr__() == ("Shuffle: 0->4 1->14 2->5 3->15 4->0 5->1 6->7 7->11 "
36 | "8->6 9->12 10->13 11->3 12->9 13->8 14->2 15->10")
37 |
38 | # Multiple blocks, last block is partially filled.
39 | blocks = Block.create_covering_blocks(key, shuffle, 5)
40 | assert len(blocks) == 4
41 | assert blocks[0].__repr__() == "Block: 0->4=0 1->14=1 2->5=1 3->15=0 4->0=0"
42 | assert blocks[1].__repr__() == "Block: 5->1=0 6->7=0 7->11=0 8->6=1 9->12=0"
43 | assert blocks[2].__repr__() == "Block: 10->13=1 11->3=1 12->9=1 13->8=0 14->2=1"
44 | assert blocks[3].__repr__() == "Block: 15->10=1"
45 |
46 | # Multiple blocks, last block is fully filled.
47 | blocks = Block.create_covering_blocks(key, shuffle, 4)
48 | assert len(blocks) == 4
49 | assert blocks[0].__repr__() == "Block: 0->4=0 1->14=1 2->5=1 3->15=0"
50 | assert blocks[1].__repr__() == "Block: 4->0=0 5->1=0 6->7=0 7->11=0"
51 | assert blocks[2].__repr__() == "Block: 8->6=1 9->12=0 10->13=1 11->3=1"
52 | assert blocks[3].__repr__() == "Block: 12->9=1 13->8=0 14->2=1 15->10=1"
53 |
54 | # Single block, partially filled.
55 | key = Key.create_random_key(4)
56 | assert key.__repr__() == "Key: 1111"
57 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
58 | assert shuffle.__repr__() == "Shuffle: 0->0 1->1 2->3 3->2"
59 | blocks = Block.create_covering_blocks(key, shuffle, 5)
60 | assert len(blocks) == 1
61 | assert blocks[0].__repr__() == "Block: 0->0=1 1->1=1 2->3=1 3->2=1"
62 |
63 | # Single block, fully filled.
64 | blocks = Block.create_covering_blocks(key, shuffle, 4)
65 | assert len(blocks) == 1
66 | assert blocks[0].__repr__() == "Block: 0->0=1 1->1=1 2->3=1 3->2=1"
67 |
68 | def test_repr():
69 | Key.set_random_seed(4441)
70 | Shuffle.set_random_seed(4442)
71 | key = Key.create_random_key(4)
72 | assert key.__repr__() == "Key: 1111"
73 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
74 | assert shuffle.__repr__() == "Shuffle: 0->3 1->2 2->1 3->0"
75 | blocks = Block.create_covering_blocks(key, shuffle, 5)
76 | assert len(blocks) == 1
77 | assert blocks[0].__repr__() == "Block: 0->3=1 1->2=1 2->1=1 3->0=1"
78 |
79 | def test_str():
80 | Key.set_random_seed(55511)
81 | Shuffle.set_random_seed(55522)
82 | key = Key.create_random_key(4)
83 | assert key.__str__() == "1010"
84 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
85 | assert shuffle.__str__() == "0->1 1->2 2->3 3->0"
86 | blocks = Block.create_covering_blocks(key, shuffle, 5)
87 | assert len(blocks) == 1
88 | assert blocks[0].__str__() == "0101"
89 |
90 | def test_lt():
91 | Key.set_random_seed(55533)
92 | Shuffle.set_random_seed(55544)
93 | key = Key.create_random_key(6)
94 | assert key.__str__() == "001101"
95 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
96 | assert shuffle.__str__() == "0->3 1->2 2->4 3->5 4->1 5->0"
97 | blocks = Block.create_covering_blocks(key, shuffle, 3)
98 | assert len(blocks) == 2
99 | assert (blocks[0] < blocks[1]) or (blocks[1] < blocks[0])
100 | # pylint:disable=comparison-with-itself
101 | assert not blocks[0] < blocks[0]
102 |
103 | def test_get_start_index():
104 | Key.set_random_seed(55555)
105 | Shuffle.set_random_seed(55566)
106 | key = Key.create_random_key(6)
107 | assert key.__str__() == "001100"
108 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
109 | assert shuffle.__str__() == "0->2 1->1 2->0 3->5 4->3 5->4"
110 | blocks = Block.create_covering_blocks(key, shuffle, 3)
111 | assert len(blocks) == 2
112 | assert blocks[0].get_start_index() == 0
113 | assert blocks[1].get_start_index() == 3
114 |
115 | def test_get_end_index():
116 | Key.set_random_seed(55577)
117 | Shuffle.set_random_seed(55588)
118 | key = Key.create_random_key(6)
119 | assert key.__str__() == "100000"
120 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
121 | assert shuffle.__str__() == "0->4 1->0 2->5 3->2 4->3 5->1"
122 | blocks = Block.create_covering_blocks(key, shuffle, 3)
123 | assert len(blocks) == 2
124 | assert blocks[0].get_end_index() == 3
125 | assert blocks[1].get_end_index() == 6
126 |
127 | def test_get_shuffle():
128 | Key.set_random_seed(55591)
129 | Shuffle.set_random_seed(55592)
130 | key = Key.create_random_key(6)
131 | assert key.__str__() == "001000"
132 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
133 | assert shuffle.__str__() == "0->5 1->3 2->4 3->0 4->1 5->2"
134 | blocks = Block.create_covering_blocks(key, shuffle, 3)
135 | assert len(blocks) == 2
136 | assert blocks[0].get_shuffle() == shuffle
137 | assert blocks[1].get_shuffle() == shuffle
138 |
139 | def test_get_size():
140 | Key.set_random_seed(5551)
141 | Shuffle.set_random_seed(5552)
142 | key = Key.create_random_key(65)
143 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
144 | blocks = Block.create_covering_blocks(key, shuffle, 30)
145 | assert len(blocks) == 3
146 | assert blocks[0].get_size() == 30
147 | assert blocks[1].get_size() == 30
148 | assert blocks[2].get_size() == 5
149 |
150 | def test_get_key_indexes():
151 | Key.set_random_seed(55593)
152 | Shuffle.set_random_seed(55594)
153 | key = Key.create_random_key(6)
154 | assert key.__str__() == "010011"
155 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
156 | assert shuffle.__str__() == "0->4 1->5 2->2 3->3 4->0 5->1"
157 | blocks = Block.create_covering_blocks(key, shuffle, 3)
158 | assert len(blocks) == 2
159 | assert blocks[0].get_key_indexes() == [4, 5, 2]
160 | assert blocks[1].get_key_indexes() == [3, 0, 1]
161 |
162 | def test_get_current_parity():
163 |
164 | # Even parity block.
165 | Key.set_random_seed(6661)
166 | Shuffle.set_random_seed(6662)
167 | key = Key.create_random_key(10)
168 | assert key.__str__() == "0111101111"
169 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
170 | assert shuffle.__str__() == "0->1 1->6 2->7 3->8 4->4 5->2 6->0 7->9 8->3 9->5"
171 | blocks = Block.create_covering_blocks(key, shuffle, 10)
172 | assert len(blocks) == 1
173 | block = blocks[0]
174 | assert block.__str__() == "1111110110"
175 | assert block.get_current_parity() == 0
176 |
177 | # Odd parity block.
178 | key = Key.create_random_key(12)
179 | assert key.__str__() == "010100111101"
180 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
181 | assert shuffle.__str__() == "0->7 1->9 2->11 3->2 4->8 5->1 6->6 7->5 8->0 9->10 10->3 11->4"
182 | blocks = Block.create_covering_blocks(key, shuffle, 12)
183 | assert len(blocks) == 1
184 | block = blocks[0]
185 | assert block.__str__() == "111011100010"
186 | assert block.get_current_parity() == 1
187 |
188 | # Split block into sub-blocks.
189 | left_sub_block = block.create_left_sub_block()
190 | right_sub_block = block.create_right_sub_block()
191 |
192 | # Odd parity sub-block.
193 | assert left_sub_block.__str__() == "111011"
194 | assert left_sub_block.get_current_parity() == 1
195 |
196 | # Even parity sub-block.
197 | assert right_sub_block.__str__() == "100010"
198 | assert right_sub_block.get_current_parity() == 0
199 |
200 | def test_get_and_set_correct_parity():
201 | Key.set_random_seed(6663)
202 | Shuffle.set_random_seed(6664)
203 | key = Key.create_random_key(10)
204 | assert key.__str__() == "1010111100"
205 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
206 | assert shuffle.__str__() == "0->8 1->5 2->7 3->0 4->2 5->4 6->1 7->3 8->9 9->6"
207 | blocks = Block.create_covering_blocks(key, shuffle, 10)
208 | assert len(blocks) == 1
209 | block = blocks[0]
210 | assert block.__str__() == "0111110001"
211 | assert block.get_correct_parity() is None
212 | block.set_correct_parity(0)
213 | assert block.get_correct_parity() == 0
214 | block.set_correct_parity(1)
215 | assert block.get_correct_parity() == 1
216 |
217 | def test_is_top_block():
218 | Key.set_random_seed(6665)
219 | Shuffle.set_random_seed(6666)
220 | key = Key.create_random_key(10)
221 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
222 | blocks = Block.create_covering_blocks(key, shuffle, 10)
223 | assert len(blocks) == 1
224 | top_block = blocks[0]
225 | assert top_block.is_top_block()
226 | left_sub_block = top_block.create_left_sub_block()
227 | assert not left_sub_block.is_top_block()
228 | right_sub_block = top_block.create_left_sub_block()
229 | assert not right_sub_block.is_top_block()
230 | left_left_sub_block = left_sub_block.create_left_sub_block()
231 | assert not left_left_sub_block.is_top_block()
232 |
233 | def test_get_parent_block():
234 | Key.set_random_seed(6665)
235 | Shuffle.set_random_seed(6666)
236 | key = Key.create_random_key(10)
237 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
238 | blocks = Block.create_covering_blocks(key, shuffle, 10)
239 | assert len(blocks) == 1
240 | top_block = blocks[0]
241 | assert top_block.get_parent_block() is None
242 | left_sub_block = top_block.create_left_sub_block()
243 | assert left_sub_block.get_parent_block() == top_block
244 | right_sub_block = top_block.create_left_sub_block()
245 | assert right_sub_block.get_parent_block() == top_block
246 | left_left_sub_block = left_sub_block.create_left_sub_block()
247 | assert left_left_sub_block.get_parent_block() == left_sub_block
248 |
249 | def test_get_and_create_left_sub_block():
250 | Key.set_random_seed(6667)
251 | Shuffle.set_random_seed(6668)
252 | key = Key.create_random_key(12)
253 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
254 | blocks = Block.create_covering_blocks(key, shuffle, 8)
255 | assert len(blocks) == 2
256 | top_block = blocks[0]
257 | assert top_block.get_left_sub_block() is None
258 | left_sub_block = top_block.create_left_sub_block()
259 | assert top_block.get_left_sub_block() is left_sub_block
260 | left_left_sub_block = left_sub_block.create_left_sub_block()
261 | assert left_sub_block.get_left_sub_block() is left_left_sub_block
262 |
263 | def test_get_and_create_right_sub_block():
264 | Key.set_random_seed(6667)
265 | Shuffle.set_random_seed(6668)
266 | key = Key.create_random_key(12)
267 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
268 | blocks = Block.create_covering_blocks(key, shuffle, 8)
269 | assert len(blocks) == 2
270 | top_block = blocks[0]
271 | assert top_block.get_right_sub_block() is None
272 | right_sub_block = top_block.create_right_sub_block()
273 | assert top_block.get_right_sub_block() is right_sub_block
274 | right_right_sub_block = right_sub_block.create_right_sub_block()
275 | assert right_sub_block.get_right_sub_block() is right_right_sub_block
276 |
277 | def test_get_error_parity():
278 |
279 | # Create the original (sent) key.
280 | Key.set_random_seed(8881)
281 | Shuffle.set_random_seed(8882)
282 | correct_key = Key.create_random_key(16)
283 | assert correct_key.__repr__() == "Key: 1011111100101110"
284 |
285 | # Create the noisy (received) key, which has 3 errors relative to the original key.
286 | noisy_key = correct_key.copy(0.1875, Key.ERROR_METHOD_EXACT)
287 | assert correct_key.__repr__() == "Key: 1011111100101110"
288 | assert noisy_key.__repr__() == "Key: 1111111110101100"
289 | # Errors: ^ ^ ^
290 | # 111111
291 | # 0123456789012345
292 |
293 | # Create a random shuffling.
294 | shuffle = Shuffle(noisy_key.get_size(), Shuffle.SHUFFLE_RANDOM)
295 | assert shuffle.__repr__() == ("Shuffle: 0->8 1->12 2->6 3->7 4->4 5->10 6->11 7->2 8->1 9->9 "
296 | "10->15 11->0 12->13 13->3 14->5 15->14")
297 |
298 | # Create a block that covers the entire shuffled noisy key.
299 | # The block has errors at the following shuffle indexes: 2->6 8->1 14->5
300 | rx_blocks = Block.create_covering_blocks(noisy_key, shuffle, noisy_key.get_size())
301 | assert len(rx_blocks) == 1
302 | rx_block = rx_blocks[0]
303 | assert rx_block.__repr__() == ("Block: 0->8=1 1->12=1 2->6=1 3->7=1 4->4=1 5->10=1 6->11=0 "
304 | "7->2=1 8->1=1 9->9=0 10->15=0 11->0=1 12->13=1 13->3=1 14->5=1 "
305 | "15->14=0")
306 | assert rx_block.__str__() == "1111110110011110" # 12 ones -> even parity
307 | # Errors: ^ ^^ # 3 errors -> odd number of errors
308 | # 111111
309 | # 0123456789012345
310 | assert rx_block.get_current_parity() == 0
311 |
312 | # At this point, we have not yet corrected any error in the block.
313 | assert rx_block.get_error_parity() == Block.ERRORS_UNKNOWN
314 |
315 | def test_get_key_index():
316 | Key.set_random_seed(77711)
317 | Shuffle.set_random_seed(77712)
318 | key = Key.create_random_key(6)
319 | assert key.__str__() == "110101"
320 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
321 | assert shuffle.__str__() == "0->0 1->5 2->1 3->4 4->2 5->3"
322 | blocks = Block.create_covering_blocks(key, shuffle, 3)
323 | block = blocks[0]
324 | assert block.get_key_index(1) == 5
325 |
326 | def test_flip_bit():
327 | Key.set_random_seed(77713)
328 | Shuffle.set_random_seed(77714)
329 | key = Key.create_random_key(6)
330 | assert key.__str__() == "111100"
331 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
332 | assert shuffle.__str__() == "0->1 1->2 2->4 3->5 4->0 5->3"
333 | blocks = Block.create_covering_blocks(key, shuffle, 3)
334 | block = blocks[1]
335 | assert block.__str__() == "011"
336 | block.flip_bit(4)
337 | assert key.__str__() == "011100"
338 | assert block.__str__() == "001"
339 |
340 | def test_flip_parity():
341 | Key.set_random_seed(77715)
342 | Shuffle.set_random_seed(77716)
343 | key = Key.create_random_key(6)
344 | assert key.__str__() == "001010"
345 | shuffle = Shuffle(key.get_size(), Shuffle.SHUFFLE_RANDOM)
346 | assert shuffle.__str__() == "0->2 1->5 2->0 3->4 4->1 5->3"
347 | blocks = Block.create_covering_blocks(key, shuffle, 3)
348 | block = blocks[1]
349 | assert block.__str__() == "100"
350 | assert block.get_error_parity() == Block.ERRORS_UNKNOWN
351 | block.set_correct_parity(0)
352 | assert block.get_error_parity() == Block.ERRORS_ODD
353 | block.flip_bit(4)
354 | assert key.__str__() == "011010"
355 | assert block.__str__() == "110"
356 | assert block.get_error_parity() == Block.ERRORS_ODD
357 | block.flip_parity()
358 | assert block.get_error_parity() == Block.ERRORS_EVEN
359 |
--------------------------------------------------------------------------------
/study/graphs_demystifying.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "graph_name": "demystifying_figure_1",
4 | "title": "Figure 1 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
5 | "x_axis": {
6 | "title": "Quantum Bit Error Rate (QBER)",
7 | "variable": "requested_bit_error_rate"
8 | },
9 | "y_axis": {
10 | "title": "Reconciliation efficiency",
11 | "variable": "efficiency",
12 | "range": [1.0, 1.3]
13 | },
14 | "series": [
15 | {
16 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
17 | "legend": "Cascade orig.",
18 | "line_color": "black",
19 | "deviation_color": "lightgray"
20 | },
21 | {
22 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
23 | "legend": "Cascade mod. (1)",
24 | "line_color": "blue",
25 | "deviation_color": "lightblue"
26 | }
27 | ]
28 | },
29 | {
30 | "graph_name": "demystifying_figure_2",
31 | "title": "Figure 2 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
32 | "x_axis": {
33 | "title": "Quantum Bit Error Rate (QBER)",
34 | "variable": "requested_bit_error_rate"
35 | },
36 | "y_axis": {
37 | "title": "Channel uses",
38 | "variable": "ask_parity_messages"
39 | },
40 | "series": [
41 | {
42 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
43 | "legend": "Cascade orig.",
44 | "line_color": "black",
45 | "deviation_color": "lightgray"
46 | },
47 | {
48 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
49 | "legend": "Cascade mod. (1)",
50 | "line_color": "blue",
51 | "deviation_color": "lightblue"
52 | }
53 | ]
54 | },
55 | {
56 | "graph_name": "demystifying_figure_3",
57 | "title": "Figure 3 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
58 | "x_axis": {
59 | "title": "Key Size",
60 | "type": "log",
61 | "variable": "key_size"
62 | },
63 | "y_axis": {
64 | "title": "Channel uses",
65 | "type": "log",
66 | "variable": "ask_parity_messages"
67 | },
68 | "series": [
69 | {
70 | "data_file": "data__algorithm=original;key_size=vary;error_rate=0.01",
71 | "legend": "Q=1%",
72 | "line_color": "red",
73 | "deviation_color": "none"
74 | },
75 | {
76 | "data_file": "data__algorithm=original;key_size=vary;error_rate=0.02",
77 | "legend": "Q=2%",
78 | "line_color": "green",
79 | "deviation_color": "none"
80 | },
81 | {
82 | "data_file": "data__algorithm=original;key_size=vary;error_rate=0.05",
83 | "legend": "Q=5%",
84 | "line_color": "blue",
85 | "deviation_color": "none"
86 | }
87 | ]
88 | },
89 | {
90 | "graph_name": "demystifying_figure_4",
91 | "title": "Figure 4 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
92 | "x_axis": {
93 | "title": "Quantum Bit Error Rate (QBER)",
94 | "variable": "requested_bit_error_rate",
95 | "range": [0.0, 0.11]
96 | },
97 | "y_axis": {
98 | "title": "Frame error rate",
99 | "type": "log",
100 | "variable": "remaining_frame_error_rate",
101 | "range": [-6.0, 0.0]
102 | },
103 | "series": [
104 | {
105 | "data_file": "data__algorithm=original;key_size=1000;error_rate=vary",
106 | "legend": "original key_size=1,000",
107 | "mode": "lines+markers",
108 | "marker": {
109 | "symbol": "square"
110 | },
111 | "line_color": "black",
112 | "deviation_color": "none",
113 | "filter": {
114 | "variable": "remaining_frame_error_rate",
115 | "min_value": 1e-8
116 | }
117 | },
118 | {
119 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
120 | "legend": "original key_size=10,000",
121 | "mode": "lines+markers",
122 | "marker": {
123 | "symbol": "circle"
124 | },
125 | "dash": "dot",
126 | "line_color": "black",
127 | "deviation_color": "none",
128 | "filter": {
129 | "variable": "remaining_frame_error_rate",
130 | "min_value": 1e-8
131 | }
132 | },
133 | {
134 | "data_file": "data__algorithm=biconf;key_size=1000;error_rate=vary",
135 | "legend": "biconf key_size=1,000",
136 | "mode": "lines+markers",
137 | "marker": {
138 | "symbol": "triangle-up"
139 | },
140 | "line_color": "blue",
141 | "deviation_color": "none",
142 | "filter": {
143 | "variable": "remaining_frame_error_rate",
144 | "min_value": 1e-8
145 | }
146 | },
147 | {
148 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
149 | "legend": "biconf key_size=10,000",
150 | "mode": "lines+markers",
151 | "marker": {
152 | "symbol": "triangle-down"
153 | },
154 | "dash": "dot",
155 | "line_color": "blue",
156 | "deviation_color": "none",
157 | "filter": {
158 | "variable": "remaining_frame_error_rate",
159 | "min_value": 1e-8
160 | }
161 | }
162 | ]
163 | },
164 | {
165 | "graph_name": "demystifying_figure_5",
166 | "title": "Figure 5 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
167 | "x_axis": {
168 | "title": "Quantum Bit Error Rate (QBER)",
169 | "variable": "requested_bit_error_rate",
170 | "range": [0.0, 0.11]
171 | },
172 | "y_axis": {
173 | "title": "Bit error rate",
174 | "type": "log",
175 | "variable": "remaining_bit_error_rate",
176 | "range": [-10.0, -2.0]
177 | },
178 | "series": [
179 | {
180 | "data_file": "data__algorithm=original;key_size=1000;error_rate=vary",
181 | "legend": "original key_size=1,000",
182 | "mode": "lines+markers",
183 | "marker": {
184 | "symbol": "square"
185 | },
186 | "line_color": "black",
187 | "deviation_color": "none",
188 | "filter": {
189 | "variable": "remaining_bit_error_rate",
190 | "min_value": 1e-11
191 | }
192 | },
193 | {
194 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
195 | "legend": "original key_size=10,000",
196 | "mode": "lines+markers",
197 | "marker": {
198 | "symbol": "circle"
199 | },
200 | "dash": "dot",
201 | "line_color": "black",
202 | "deviation_color": "none",
203 | "filter": {
204 | "variable": "remaining_bit_error_rate",
205 | "min_value": 1e-11
206 | }
207 | },
208 | {
209 | "data_file": "data__algorithm=biconf;key_size=1000;error_rate=vary",
210 | "legend": "biconf key_size=1,000",
211 | "mode": "lines+markers",
212 | "marker": {
213 | "symbol": "triangle-up"
214 | },
215 | "line_color": "blue",
216 | "deviation_color": "none",
217 | "filter": {
218 | "variable": "remaining_bit_error_rate",
219 | "min_value": 1e-11
220 | }
221 | },
222 | {
223 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
224 | "legend": "biconf key_size=10,000",
225 | "mode": "lines+markers",
226 | "marker": {
227 | "symbol": "triangle-down"
228 | },
229 | "dash": "dot",
230 | "line_color": "blue",
231 | "deviation_color": "none",
232 | "filter": {
233 | "variable": "remaining_bit_error_rate",
234 | "min_value": 1e-11
235 | }
236 | }
237 | ]
238 | },
239 | {
240 | "graph_name": "demystifying_figure_8",
241 | "title": "Figure 8 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
242 | "x_axis": {
243 | "title": "Quantum Bit Error Rate (QBER)",
244 | "variable": "requested_bit_error_rate"
245 | },
246 | "y_axis": {
247 | "title": "Reconciliation efficiency",
248 | "variable": "efficiency",
249 | "range": [1.0, 1.3]
250 | },
251 | "series": [
252 | {
253 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
254 | "legend": "Cascade orig.",
255 | "line_color": "black",
256 | "deviation_color": "none"
257 | },
258 | {
259 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
260 | "legend": "Cascade mod. (1)",
261 | "line_color": "blue",
262 | "deviation_color": "none"
263 | },
264 | {
265 | "data_file": "data__algorithm=yanetal;key_size=10000;error_rate=vary",
266 | "legend": "Cascade opt. (2)",
267 | "line_color": "red",
268 | "deviation_color": "none"
269 | },
270 | {
271 | "data_file": "data__algorithm=option3;key_size=10000;error_rate=vary",
272 | "legend": "Cascade opt. (3)",
273 | "line_color": "green",
274 | "deviation_color": "none"
275 | }
276 | ]
277 | },
278 | {
279 | "graph_name": "demystifying_figure_9",
280 | "title": "Figure 9 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
281 | "x_axis": {
282 | "title": "Quantum Bit Error Rate (QBER)",
283 | "variable": "requested_bit_error_rate"
284 | },
285 | "y_axis": {
286 | "title": "Channel uses",
287 | "variable": "ask_parity_messages"
288 | },
289 | "series": [
290 | {
291 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
292 | "legend": "Cascade orig.",
293 | "line_color": "black",
294 | "deviation_color": "none"
295 | },
296 | {
297 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
298 | "legend": "Cascade mod. (1)",
299 | "line_color": "blue",
300 | "deviation_color": "none"
301 | },
302 | {
303 | "data_file": "data__algorithm=yanetal;key_size=10000;error_rate=vary",
304 | "legend": "Cascade opt. (2)",
305 | "line_color": "red",
306 | "deviation_color": "none"
307 | },
308 | {
309 | "data_file": "data__algorithm=option3;key_size=10000;error_rate=vary",
310 | "legend": "Cascade opt. (3)",
311 | "line_color": "green",
312 | "deviation_color": "none"
313 | }
314 | ]
315 | },
316 | {
317 | "graph_name": "demystifying_figure_10",
318 | "title": "Figure 10 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
319 | "x_axis": {
320 | "title": "Quantum Bit Error Rate (QBER)",
321 | "variable": "requested_bit_error_rate",
322 | "range": [0.0, 0.11]
323 | },
324 | "y_axis": {
325 | "title": "Frame error rate",
326 | "type": "log",
327 | "variable": "remaining_frame_error_rate",
328 | "range": [-6.0, 0.0]
329 | },
330 | "series": [
331 | {
332 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
333 | "legend": "Cascade orig.",
334 | "mode": "lines+markers",
335 | "marker": {
336 | "symbol": "square"
337 | },
338 | "line_color": "black",
339 | "deviation_color": "none",
340 | "filter": {
341 | "variable": "remaining_frame_error_rate",
342 | "min_value": 1e-8
343 | }
344 | },
345 | {
346 | "data_file": "data__algorithm=biconf;key_size=10000;error_rate=vary",
347 | "legend": "Cascade mod. (1)",
348 | "mode": "lines+markers",
349 | "marker": {
350 | "symbol": "circle"
351 | },
352 | "line_color": "blue",
353 | "deviation_color": "none",
354 | "filter": {
355 | "variable": "remaining_frame_error_rate",
356 | "min_value": 1e-8
357 | }
358 | },
359 | {
360 | "data_file": "data__algorithm=yanetal;key_size=10000;error_rate=vary",
361 | "legend": "Cascade opt. (2)",
362 | "mode": "lines+markers",
363 | "marker": {
364 | "symbol": "triangle-up"
365 | },
366 | "line_color": "red",
367 | "deviation_color": "none",
368 | "filter": {
369 | "variable": "remaining_frame_error_rate",
370 | "min_value": 1e-8
371 | }
372 | },
373 | {
374 | "data_file": "data__algorithm=option3;key_size=10000;error_rate=vary",
375 | "legend": "Cascade opt. (3)",
376 | "mode": "lines+markers",
377 | "marker": {
378 | "symbol": "triangle-down"
379 | },
380 | "line_color": "green",
381 | "deviation_color": "none",
382 | "filter": {
383 | "variable": "remaining_frame_error_rate",
384 | "min_value": 1e-8
385 | }
386 | }
387 | ]
388 | },
389 | {
390 | "graph_name": "demystifying_figure_11",
391 | "title": "Figure 11 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
392 | "x_axis": {
393 | "title": "Quantum Bit Error Rate (QBER)",
394 | "variable": "requested_bit_error_rate"
395 | },
396 | "y_axis": {
397 | "title": "Reconciliation efficiency",
398 | "variable": "efficiency",
399 | "range": [1.0, 1.3]
400 | },
401 | "series": [
402 | {
403 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
404 | "legend": "Cascade orig.",
405 | "line_color": "black",
406 | "deviation_color": "none"
407 | },
408 | {
409 | "data_file": "data__algorithm=option3;key_size=10000;error_rate=vary",
410 | "legend": "Cascade opt. (3)",
411 | "line_color": "green",
412 | "deviation_color": "none"
413 | },
414 | {
415 | "data_file": "data__algorithm=option4;key_size=10000;error_rate=vary",
416 | "legend": "Cascade opt. (4)",
417 | "line_color": "brown",
418 | "deviation_color": "none"
419 | }
420 | ]
421 | },
422 | {
423 | "graph_name": "demystifying_figure_13",
424 | "title": "Figure 13 from \"Demystifying the Information Reconciliation Protocol Cascade\"",
425 | "x_axis": {
426 | "title": "Quantum Bit Error Rate (QBER)",
427 | "variable": "requested_bit_error_rate"
428 | },
429 | "y_axis": {
430 | "title": "Reconciliation efficiency",
431 | "variable": "efficiency",
432 | "range": [1.0, 1.3]
433 | },
434 | "series": [
435 | {
436 | "data_file": "data__algorithm=original;key_size=10000;error_rate=vary",
437 | "legend": "Cascade orig.",
438 | "line_color": "black",
439 | "deviation_color": "none"
440 | },
441 | {
442 | "data_file": "data__algorithm=option3;key_size=10000;error_rate=vary",
443 | "legend": "Cascade opt. (3)",
444 | "line_color": "green",
445 | "deviation_color": "none"
446 | },
447 | {
448 | "data_file": "data__algorithm=option4;key_size=10000;error_rate=vary",
449 | "legend": "Cascade opt. (4)",
450 | "line_color": "brown",
451 | "deviation_color": "none"
452 | },
453 | {
454 | "data_file": "data__algorithm=option7;key_size=10000;error_rate=vary",
455 | "legend": "Cascade opt. (7)",
456 | "line_color": "orange",
457 | "deviation_color": "none"
458 | },
459 | {
460 | "data_file": "data__algorithm=option8;key_size=10000;error_rate=vary",
461 | "legend": "Cascade opt. (8)",
462 | "line_color": "slateblue",
463 | "deviation_color": "none"
464 | }
465 | ]
466 | }
467 | ]
--------------------------------------------------------------------------------