├── commpy ├── tests │ ├── __init__.py │ ├── test_modulation.py │ └── test_links.py ├── channelcoding │ ├── tests │ │ ├── __init__.py │ │ ├── test_algcode.py │ │ ├── test_ldpc.py │ │ ├── test_gfields.py │ │ └── test_convcode.py │ ├── doc │ │ └── assets │ │ │ ├── punct.png │ │ │ ├── BlockCont.png │ │ │ ├── FECexamp2.png │ │ │ ├── nebitrate.png │ │ │ ├── FECmainidea1.png │ │ │ ├── coderateblock.png │ │ │ ├── conv_code_177_133.png │ │ │ ├── n7o4bsf7_htlv10gsatc-yojbrq[1].png │ │ │ └── v3v5w2gbwk34nzk_2qt25baoebq[1].png │ ├── __init__.py │ ├── interleavers.py │ ├── algcode.py │ ├── designs │ │ └── ldpc │ │ │ └── gallager │ │ │ ├── 96.3.963.txt │ │ │ └── 96.33.964.txt │ ├── README.md │ ├── gfields.py │ ├── ldpc.py │ └── turbo.py ├── __init__.py ├── examples │ ├── plotConsModem.py │ └── conv_encode_decode.py ├── impairments.py ├── sequences.py ├── utilities.py ├── filters.py ├── links.py └── modulation.py ├── setup.cfg ├── doc ├── channels.rst ├── filters.rst ├── impairments.rst ├── sequences.rst ├── sphinxext │ ├── __init__.py │ ├── MANIFEST.in │ ├── setup.py │ ├── README.txt │ ├── traitsdoc.py │ ├── comment_eater.py │ ├── LICENSE.txt │ ├── phantom_import.py │ ├── numpydoc.py │ ├── docscrape_sphinx.py │ ├── docscrape.py │ └── tests │ │ └── test_docscrape.py ├── utilities.rst ├── links.rst ├── modulation.rst ├── channelcoding.rst ├── commpydoc.rst ├── index.rst ├── Makefile └── conf.py ├── MANIFEST.in ├── .coveralls.yml ├── requirements.txt ├── .coveragerc ├── .gitignore ├── runner ├── THANKS.txt ├── .travis.yml ├── LICENSE.txt ├── setup.py └── README.md /commpy/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /commpy/channelcoding/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_ext] 2 | inplace=1 3 | -------------------------------------------------------------------------------- /doc/channels.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.channels 2 | -------------------------------------------------------------------------------- /doc/filters.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.filters 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.rst 2 | include runner README 3 | -------------------------------------------------------------------------------- /doc/impairments.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.impairments 2 | -------------------------------------------------------------------------------- /doc/sequences.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.sequences 2 | -------------------------------------------------------------------------------- /doc/sphinxext/__init__.py: -------------------------------------------------------------------------------- 1 | from numpydoc import setup 2 | -------------------------------------------------------------------------------- /doc/utilities.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.utilities 2 | -------------------------------------------------------------------------------- /doc/links.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.links 2 | :members: 3 | -------------------------------------------------------------------------------- /doc/modulation.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.modulation 2 | :members: 3 | -------------------------------------------------------------------------------- /doc/sphinxext/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include tests *.py 2 | include *.txt 3 | -------------------------------------------------------------------------------- /.coveralls.yml: -------------------------------------------------------------------------------- 1 | repo_token: Xuy5WffNF2eTtvkvhVhvRa5jRfVqqVDio 2 | service_name: travis-ci 3 | -------------------------------------------------------------------------------- /doc/channelcoding.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: commpy.channelcoding 2 | :members: 3 | :noindex: 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.9.2 2 | scipy>=0.15.0 3 | matplotlib>=1.4.3 4 | nose>=1.3.4 5 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = commpy 4 | include = */commpy/* 5 | omit = 6 | */setup.py 7 | -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/punct.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/punct.png -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/BlockCont.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/BlockCont.png -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/FECexamp2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/FECexamp2.png -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/nebitrate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/nebitrate.png -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/FECmainidea1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/FECmainidea1.png -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/coderateblock.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/coderateblock.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | *.png 4 | *.pdf 5 | 6 | /doc/_build 7 | /doc/generated 8 | /build 9 | /dist 10 | /scikit_commpy.egg-info 11 | -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/conv_code_177_133.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/conv_code_177_133.png -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/n7o4bsf7_htlv10gsatc-yojbrq[1].png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/n7o4bsf7_htlv10gsatc-yojbrq[1].png -------------------------------------------------------------------------------- /commpy/channelcoding/doc/assets/v3v5w2gbwk34nzk_2qt25baoebq[1].png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kirlf/CommPy/master/commpy/channelcoding/doc/assets/v3v5w2gbwk34nzk_2qt25baoebq[1].png -------------------------------------------------------------------------------- /runner: -------------------------------------------------------------------------------- 1 | ##This will work in development on a relative folder basis 2 | ##It will then work when installed in site-packages on a target system 3 | ##where the runner script is in /usr/bin (or wherever) 4 | ## 5 | ##So, you don't need anything special - no fancy path tricks. 6 | 7 | import package.module 8 | 9 | package.module.start () 10 | -------------------------------------------------------------------------------- /commpy/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | CommPy 3 | ================================================ 4 | 5 | 6 | Contents 7 | -------- 8 | 9 | Subpackages 10 | ----------- 11 | :: 12 | 13 | channelcoding --- Channel Coding Algorithms [*] 14 | 15 | """ 16 | #from channelcoding import * 17 | from commpy.filters import * 18 | from commpy.modulation import * 19 | from commpy.impairments import * 20 | from commpy.sequences import * 21 | from commpy.channels import * 22 | 23 | try: 24 | from numpy.testing import Tester 25 | test = Tester().test 26 | except: 27 | pass 28 | -------------------------------------------------------------------------------- /doc/commpydoc.rst: -------------------------------------------------------------------------------- 1 | .. CommPy documentation master file, created by 2 | sphinx-quickstart on Sun Jan 29 23:37:16 2012. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | CommPy 7 | ================================== 8 | 9 | CommPy is an open source package implementing digital communications algorithms 10 | in Python using NumPy, SciPy and Matplotlib. 11 | 12 | Reference 13 | --------- 14 | .. toctree:: 15 | :maxdepth: 2 16 | 17 | channelcoding 18 | channels 19 | filters 20 | sequences 21 | -------------------------------------------------------------------------------- /commpy/examples/plotConsModem.py: -------------------------------------------------------------------------------- 1 | # Authors: Youness Akourim 2 | # License: BSD 3-Clause 3 | 4 | from commpy.modulation import PSKModem, QAMModem 5 | 6 | # ============================================================================= 7 | # Example constellation plot of Modem 8 | # ============================================================================= 9 | 10 | # Constellation corresponding to PSKModem for 4 bits per symbols 11 | psk = PSKModem(16) 12 | psk.plot_constellation() 13 | 14 | # Constellation corresponding to QAMModem for 2 bits per symbols 15 | qam = QAMModem(4) 16 | qam.plot_constellation() 17 | -------------------------------------------------------------------------------- /THANKS.txt: -------------------------------------------------------------------------------- 1 | Please add names as needed so that we can keep up with all the contributors. 2 | 3 | Veeresh Taranalli for initial creation and contribution of CommPy. 4 | Bastien Trotobas for adding some features, bugfixes, maintenance. 5 | Vladimir Fadeev for bugfixes, addition of convolutional code puncturing. 6 | Youness Akourim for adding features and fixing some bugs. 7 | Rey Tucker for python3 compatibility fixes. 8 | Dat Nguyen for type check fix for AWGN channel model. 9 | Mateusz Michalski for bugfix in AWGN channel model. 10 | Ravi Sharan for bugfix in PSK modem constellation mapping. 11 | @hoo89 for bugfix in AWGN channel model. 12 | @mborgerding for docstring update. -------------------------------------------------------------------------------- /commpy/channelcoding/tests/test_algcode.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3-Clause 4 | 5 | from numpy import array 6 | from numpy.testing import assert_array_equal 7 | from commpy.channelcoding.algcode import cyclic_code_genpoly 8 | 9 | class TestAlgebraicCoding(object): 10 | 11 | def test_cyclic_code_gen_poly(self): 12 | code_lengths = array([15, 31]) 13 | code_dims = array([4, 21]) 14 | desired_genpolys = array([[2479, 3171, 3929], 15 | [1653, 1667, 1503, 1207, 1787, 1561, 1903, 16 | 1219, 1137, 2013, 1453, 1897, 1975, 1395, 1547]]) 17 | count = 0 18 | for n, k in zip(code_lengths, code_dims): 19 | genpolys = cyclic_code_genpoly(n, k) 20 | assert_array_equal(genpolys, desired_genpolys[count]) 21 | count += 1 22 | -------------------------------------------------------------------------------- /doc/sphinxext/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | import setuptools 3 | import sys, os 4 | 5 | version = "0.4" 6 | 7 | setup( 8 | name="numpydoc", 9 | packages=["numpydoc"], 10 | package_dir={"numpydoc": ""}, 11 | version=version, 12 | description="Sphinx extension to support docstrings in Numpy format", 13 | # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers 14 | classifiers=["Development Status :: 3 - Alpha", 15 | "Environment :: Plugins", 16 | "License :: OSI Approved :: BSD License", 17 | "Topic :: Documentation"], 18 | keywords="sphinx numpy", 19 | author="Pauli Virtanen and others", 20 | author_email="pav@iki.fi", 21 | url="http://github.com/numpy/numpy/tree/master/doc/sphinxext", 22 | license="BSD", 23 | zip_safe=False, 24 | install_requires=["Sphinx >= 1.0.1"], 25 | package_data={'numpydoc': 'tests', '': ''}, 26 | entry_points={ 27 | "console_scripts": [ 28 | "autosummary_generate = numpydoc.autosummary_generate:main", 29 | ], 30 | }, 31 | ) 32 | -------------------------------------------------------------------------------- /commpy/impairments.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3-Clause 4 | 5 | """ 6 | ============================================ 7 | Impairments (:mod:`commpy.impairments`) 8 | ============================================ 9 | 10 | .. autosummary:: 11 | :toctree: generated/ 12 | 13 | add_frequency_offset -- Add frequency offset impairment. 14 | 15 | """ 16 | 17 | from numpy import exp, pi, arange 18 | 19 | __all__ = ['add_frequency_offset'] 20 | 21 | def add_frequency_offset(waveform, Fs, delta_f): 22 | """ 23 | Add frequency offset impairment to input signal. 24 | 25 | Parameters 26 | ---------- 27 | waveform : 1D ndarray of floats 28 | Input signal. 29 | 30 | Fs : float 31 | Sampling frequency (in Hz). 32 | 33 | delta_f : float 34 | Frequency offset (in Hz). 35 | 36 | Returns 37 | ------- 38 | output_waveform : 1D ndarray of floats 39 | Output signal with frequency offset. 40 | """ 41 | 42 | output_waveform = waveform*exp(1j*2*pi*(delta_f/Fs)*arange(len(waveform))) 43 | return output_waveform 44 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | # We don't actually use the Travis Python, but this keeps it organized. 4 | - "2.7" 5 | - "3.5" 6 | #- "3.4" 7 | notifications: 8 | email: false 9 | 10 | install: 11 | - sudo apt-get update 12 | # We do this conditionally because it saves us some downloading if the 13 | # version is the same. 14 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then 15 | wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; 16 | else 17 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; 18 | fi 19 | - bash miniconda.sh -b -p $HOME/miniconda 20 | - export PATH="$HOME/miniconda/bin:$PATH" 21 | - hash -r 22 | - conda config --set always_yes yes --set changeps1 no 23 | - conda update -q conda 24 | # Useful for debugging any issues with conda 25 | - conda info -a 26 | 27 | # Replace dep1 dep2 ... with your dependencies 28 | - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy scipy matplotlib nose 29 | - source activate test-environment 30 | #- conda install --yes -c dan_blanchard python-coveralls nose-cov 31 | - pip install python-coveralls 32 | - pip install coverage 33 | - python setup.py install 34 | script: 35 | # Your test script goes here 36 | - nosetests -a '!slow' --with-coverage --cover-package=commpy --logging-level=INFO 37 | # Calculate coverage 38 | after_success: 39 | - coveralls 40 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2012-2019, Veeresh Taranalli & contributors 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 11 | 12 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 15 | -------------------------------------------------------------------------------- /doc/sphinxext/README.txt: -------------------------------------------------------------------------------- 1 | ===================================== 2 | numpydoc -- Numpy's Sphinx extensions 3 | ===================================== 4 | 5 | Numpy's documentation uses several custom extensions to Sphinx. These 6 | are shipped in this ``numpydoc`` package, in case you want to make use 7 | of them in third-party projects. 8 | 9 | The following extensions are available: 10 | 11 | - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add 12 | the code description directives ``np:function``, ``np-c:function``, etc. 13 | that support the Numpy docstring syntax. 14 | 15 | - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. 16 | 17 | - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::`` 18 | directive. Note that this implementation may still undergo severe 19 | changes or eventually be deprecated. 20 | 21 | 22 | numpydoc 23 | ======== 24 | 25 | Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings 26 | following the Numpy/Scipy format to a form palatable to Sphinx. 27 | 28 | Options 29 | ------- 30 | 31 | The following options can be set in conf.py: 32 | 33 | - numpydoc_use_plots: bool 34 | 35 | Whether to produce ``plot::`` directives for Examples sections that 36 | contain ``import matplotlib``. 37 | 38 | - numpydoc_show_class_members: bool 39 | 40 | Whether to show all members of a class in the Methods and Attributes 41 | sections automatically. 42 | 43 | - numpydoc_edit_link: bool (DEPRECATED -- edit your HTML template instead) 44 | 45 | Whether to insert an edit link after docstrings. 46 | -------------------------------------------------------------------------------- /commpy/examples/conv_encode_decode.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3-Clause 4 | 5 | import numpy as np 6 | import commpy.channelcoding.convcode as cc 7 | from commpy.utilities import * 8 | 9 | # ============================================================================= 10 | # Example showing the encoding and decoding of convolutional codes 11 | # ============================================================================= 12 | 13 | # G(D) corresponding to the convolutional encoder 14 | generator_matrix = np.array([[0o5, 0o7]]) 15 | #generator_matrix = np.array([[0o3, 0o0, 0o2], [0o7, 0o4, 0o6]]) 16 | 17 | # Number of delay elements in the convolutional encoder 18 | M = np.array([2]) 19 | 20 | # Create trellis data structure 21 | trellis = cc.Trellis(M, generator_matrix) 22 | 23 | # Traceback depth of the decoder 24 | tb_depth = 5*(M.sum() + 1) 25 | 26 | for i in range(10): 27 | # Generate random message bits to be encoded 28 | message_bits = np.random.randint(0, 2, 1000) 29 | 30 | # Encode message bits 31 | coded_bits = cc.conv_encode(message_bits, trellis) 32 | 33 | # Introduce bit errors (channel) 34 | #coded_bits[4] = 0 35 | #coded_bits[7] = 0 36 | 37 | # Decode the received bits 38 | decoded_bits = cc.viterbi_decode(coded_bits.astype(float), trellis, tb_depth) 39 | 40 | num_bit_errors = hamming_dist(message_bits, decoded_bits[:-M]) 41 | #num_bit_errors = 1 42 | 43 | if num_bit_errors !=0: 44 | #print(num_bit_errors, "Bit Errors found!") 45 | #print(message_bits) 46 | #print(decoded_bits[tb_depth+3:]) 47 | #print(decoded_bits) 48 | break 49 | else: 50 | print("No Bit Errors :)") 51 | 52 | #print("==== Message Bits ===") 53 | #print(message_bits) 54 | #print("==== Coded Bits =====") 55 | #print(coded_bits) 56 | #print("==== Decoded Bits ===") 57 | #print(decoded_bits[tb_depth:]) 58 | -------------------------------------------------------------------------------- /commpy/tests/test_modulation.py: -------------------------------------------------------------------------------- 1 | # Authors: Youness Akourim 2 | # License: BSD 3-Clause 3 | 4 | from numpy import zeros, identity, arange, concatenate, log2, array 5 | from numpy.random import seed 6 | from numpy.testing import run_module_suite, assert_allclose, dec 7 | 8 | from commpy.channels import MIMOFlatChannel 9 | from commpy.links import * 10 | from commpy.modulation import QAMModem, mimo_ml, bit_lvl_repr 11 | 12 | 13 | @dec.slow 14 | def test_bit_lvl_repr(): 15 | qam = QAMModem(4) 16 | 17 | nb_rx = 2 18 | nb_tx = 2 19 | RayleighChannel = MIMOFlatChannel(nb_tx, nb_rx) 20 | RayleighChannel.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx)) 21 | 22 | SNR = arange(10, 16, 5) 23 | 24 | def receiver_with_blr(y, H, cons): 25 | beta = int(log2(len(cons))) 26 | # creation de w 27 | reel = [pow(2, i) for i in range(beta // 2 - 1, -1, -1)] 28 | im = [1j * pow(2, i) for i in range(beta // 2 - 1, -1, -1)] 29 | w = concatenate((reel, im), axis=None) 30 | A = bit_lvl_repr(H, w) 31 | mes = array(mimo_ml(y, A, [-1, 1])) 32 | mes[mes == -1] = 0 33 | return mes 34 | 35 | def receiver_without_blr(y, H, cons): 36 | return qam.demodulate(mimo_ml(y, H, cons), 'hard') 37 | 38 | my_model_without_blr = \ 39 | LinkModel(qam.modulate, RayleighChannel, receiver_without_blr, qam.num_bits_symbol, qam.constellation, qam.Es) 40 | my_model_with_blr = \ 41 | LinkModel(qam.modulate, RayleighChannel, receiver_with_blr, qam.num_bits_symbol, qam.constellation, qam.Es) 42 | 43 | ber_without_blr = link_performance(my_model_without_blr, SNR, 300e4, 300) 44 | ber_with_blr = link_performance(my_model_with_blr, SNR, 300e4, 300) 45 | assert_allclose(ber_without_blr, ber_with_blr, rtol=0.5, 46 | err_msg='bit_lvl_repr changes the performance') 47 | 48 | 49 | if __name__ == "__main__": 50 | seed(17121996) 51 | run_module_suite() 52 | -------------------------------------------------------------------------------- /commpy/channelcoding/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================================ 3 | Channel Coding (:mod:`commpy.channelcoding`) 4 | ============================================ 5 | 6 | .. module:: commpy.channelcoding 7 | 8 | Galois Fields 9 | ============= 10 | 11 | .. autosummary:: 12 | :toctree: generated/ 13 | 14 | GF -- Class representing a Galois Field object. 15 | 16 | Algebraic Codes 17 | =============== 18 | 19 | .. autosummary:: 20 | :toctree: generated/ 21 | 22 | cyclic_code_genpoly -- Generate a cylic code generator polynomial. 23 | 24 | 25 | Convolutional Codes 26 | =================== 27 | 28 | .. autosummary:: 29 | :toctree: generated/ 30 | 31 | Trellis -- Class representing convolutional code trellis. 32 | conv_encode -- Convolutional Encoder. 33 | viterbi_decode -- Convolutional Decoder using the Viterbi algorithm. 34 | 35 | 36 | Turbo Codes 37 | =========== 38 | 39 | .. autosummary:: 40 | :toctree: generated/ 41 | 42 | turbo_encode -- Turbo Encoder. 43 | map_decode -- Convolutional Code decoder using MAP algorithm. 44 | turbo_decode -- Turbo Decoder. 45 | 46 | LDPC Codes 47 | ========== 48 | 49 | .. autosummary:: 50 | :toctree: generated/ 51 | 52 | get_ldpc_code_params -- Extract parameters from LDPC code design file. 53 | ldpc_bp_decode -- LDPC Code Decoder using Belief propagation. 54 | 55 | Interleavers and De-interleavers 56 | ================================ 57 | 58 | .. autosummary:: 59 | :toctree: generated/ 60 | 61 | RandInterlv -- Random Interleaver. 62 | 63 | """ 64 | 65 | from commpy.channelcoding.convcode import Trellis, conv_encode, viterbi_decode 66 | from commpy.channelcoding.interleavers import * 67 | from commpy.channelcoding.turbo import turbo_encode, map_decode, turbo_decode 68 | from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode 69 | from commpy.channelcoding.gfields import * 70 | from commpy.channelcoding.algcode import * 71 | 72 | try: 73 | from numpy.testing import Tester 74 | test = Tester().test 75 | except: 76 | pass 77 | -------------------------------------------------------------------------------- /commpy/tests/test_links.py: -------------------------------------------------------------------------------- 1 | # Authors: Bastien Trotobas 2 | # License: BSD 3-Clause 3 | 4 | from __future__ import division # Python 2 compatibility 5 | 6 | from numpy import arange, sqrt, log10 7 | from numpy.random import seed 8 | from numpy.testing import run_module_suite, assert_allclose, dec 9 | from scipy.special import erfc 10 | 11 | from commpy.channels import MIMOFlatChannel, SISOFlatChannel 12 | from commpy.links import link_performance, LinkModel 13 | from commpy.modulation import QAMModem, kbest 14 | 15 | 16 | @dec.slow 17 | def test_link_performance(): 18 | # Apply link_performance to SISO QPSK and AWGN channel 19 | QPSK = QAMModem(4) 20 | 21 | def receiver(y, h, constellation): 22 | return QPSK.demodulate(y, 'hard') 23 | model = LinkModel(QPSK.modulate, SISOFlatChannel(fading_param=(1 + 0j, 0)), receiver, 24 | QPSK.num_bits_symbol, QPSK.constellation, QPSK.Es) 25 | 26 | BERs = link_performance(model, range(0, 9, 2), 600e4, 600) 27 | desired = erfc(sqrt(10**(arange(0, 9, 2) / 10) / 2)) / 2 28 | assert_allclose(BERs, desired, rtol=0.25, 29 | err_msg='Wrong performance for SISO QPSK and AWGN channel') 30 | 31 | # Apply link_performance to MIMO 16QAM and 4x4 Rayleigh channel 32 | QAM16 = QAMModem(16) 33 | RayleighChannel = MIMOFlatChannel(4, 4) 34 | RayleighChannel.uncorr_rayleigh_fading(complex) 35 | 36 | def receiver(y, h, constellation): 37 | return QAM16.demodulate(kbest(y, h, constellation, 16), 'hard') 38 | model = LinkModel(QAM16.modulate, RayleighChannel, receiver, 39 | QAM16.num_bits_symbol, QAM16.constellation, QAM16.Es) 40 | SNRs = arange(0, 21, 5) + 10 * log10(QAM16.num_bits_symbol) 41 | 42 | BERs = link_performance(model, SNRs, 600e4, 600) 43 | desired = (2e-1, 1e-1, 3e-2, 2e-3, 4e-5) # From reference 44 | assert_allclose(BERs, desired, rtol=1.25, 45 | err_msg='Wrong performance for MIMO 16QAM and 4x4 Rayleigh channel') 46 | 47 | 48 | if __name__ == "__main__": 49 | seed(17121996) 50 | run_module_suite() 51 | -------------------------------------------------------------------------------- /commpy/channelcoding/interleavers.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3-Clause 4 | 5 | """ Interleavers and De-interleavers """ 6 | 7 | from numpy import arange, zeros 8 | from numpy.random import mtrand 9 | 10 | __all__ = ['RandInterlv'] 11 | 12 | 13 | class _Interleaver: 14 | 15 | def interlv(self, in_array): 16 | """ Interleave input array using the specific interleaver. 17 | 18 | Parameters 19 | ---------- 20 | in_array : 1D ndarray of ints 21 | Input data to be interleaved. 22 | 23 | Returns 24 | ------- 25 | out_array : 1D ndarray of ints 26 | Interleaved output data. 27 | 28 | """ 29 | out_array = in_array[self.p_array] 30 | return out_array 31 | 32 | def deinterlv(self, in_array): 33 | """ De-interleave input array using the specific interleaver. 34 | 35 | Parameters 36 | ---------- 37 | in_array : 1D ndarray of ints 38 | Input data to be de-interleaved. 39 | 40 | Returns 41 | ------- 42 | out_array : 1D ndarray of ints 43 | De-interleaved output data. 44 | 45 | """ 46 | out_array = zeros(len(in_array), in_array.dtype) 47 | for index, element in enumerate(self.p_array): 48 | out_array[element] = in_array[index] 49 | return out_array 50 | 51 | 52 | class RandInterlv(_Interleaver): 53 | """ Random Interleaver. 54 | 55 | Parameters 56 | ---------- 57 | length : int 58 | Length of the interleaver. 59 | 60 | seed : int 61 | Seed to initialize the random number generator 62 | which generates the random permutation for 63 | interleaving. 64 | 65 | Returns 66 | ------- 67 | random_interleaver : RandInterlv object 68 | A random interleaver object. 69 | 70 | Note 71 | ---- 72 | The random number generator is the 73 | RandomState object from NumPy, 74 | which uses the Mersenne Twister algorithm. 75 | 76 | """ 77 | def __init__(self, length, seed): 78 | rand_gen = mtrand.RandomState(seed) 79 | self.p_array = rand_gen.permutation(arange(length)) 80 | -------------------------------------------------------------------------------- /commpy/channelcoding/tests/test_ldpc.py: -------------------------------------------------------------------------------- 1 | # Authors: Veeresh Taranalli 2 | # License: BSD 3-Clause 3 | 4 | import os 5 | 6 | from nose.plugins.attrib import attr 7 | from numpy import array, sqrt, zeros 8 | from numpy.random import randn 9 | from numpy.testing import assert_allclose 10 | 11 | from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode 12 | from commpy.utilities import hamming_dist 13 | 14 | 15 | @attr('slow') 16 | class TestLDPCCode(object): 17 | 18 | @classmethod 19 | def setup_class(cls): 20 | dir = os.path.dirname(__file__) 21 | ldpc_design_file_1 = os.path.join(dir, '../designs/ldpc/gallager/96.33.964.txt') 22 | #ldpc_design_file_1 = "../designs/ldpc/gallager/96.33.964.txt" 23 | cls.ldpc_code_params = get_ldpc_code_params(ldpc_design_file_1) 24 | 25 | @classmethod 26 | def teardown_class(cls): 27 | pass 28 | 29 | def test_ldpc_bp_decode(self): 30 | N = 96 31 | k = 48 32 | rate = 0.5 33 | Es = 1.0 34 | snr_list = array([2.0, 2.5]) 35 | niters = 10000000 36 | tx_codeword = zeros(N, int) 37 | ldpcbp_iters = 100 38 | 39 | fer_array_ref = array([200.0/1000, 200.0/2000]) 40 | fer_array_test = zeros(len(snr_list)) 41 | 42 | for idx, ebno in enumerate(snr_list): 43 | 44 | noise_std = 1/sqrt((10**(ebno/10.0))*rate*2/Es) 45 | fer_cnt_bp = 0 46 | 47 | for iter_cnt in range(niters): 48 | 49 | awgn_array = noise_std * randn(N) 50 | rx_word = 1-(2*tx_codeword) + awgn_array 51 | rx_llrs = 2.0*rx_word/(noise_std**2) 52 | 53 | [dec_word, out_llrs] = ldpc_bp_decode(rx_llrs, self.ldpc_code_params, 'SPA', 54 | ldpcbp_iters) 55 | 56 | num_bit_errors = hamming_dist(tx_codeword, dec_word) 57 | if num_bit_errors > 0: 58 | fer_cnt_bp += 1 59 | 60 | if fer_cnt_bp >= 200: 61 | fer_array_test[idx] = float(fer_cnt_bp)/(iter_cnt+1) 62 | break 63 | 64 | assert_allclose(fer_array_test, fer_array_ref, rtol=.5, atol=0) 65 | -------------------------------------------------------------------------------- /commpy/channelcoding/algcode.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Authors: Veeresh Taranalli 4 | # License: BSD 3-Clause 5 | 6 | from fractions import gcd 7 | from numpy import array, arange, concatenate, convolve 8 | 9 | from commpy.channelcoding.gfields import GF, polymultiply, poly_to_string 10 | from commpy.utilities import dec2bitarray, bitarray2dec 11 | 12 | __all__ = ['cyclic_code_genpoly'] 13 | 14 | def cyclic_code_genpoly(n, k): 15 | """ 16 | Generate all possible generator polynomials for a (n, k)-cyclic code. 17 | 18 | Parameters 19 | ---------- 20 | n : int 21 | Code blocklength of the cyclic code. 22 | 23 | k : int 24 | Information blocklength of the cyclic code. 25 | 26 | Returns 27 | ------- 28 | poly_list : 1D ndarray of ints 29 | A list of generator polynomials (represented as integers) for the (n, k)-cyclic code. 30 | 31 | """ 32 | 33 | 34 | if n%2 == 0: 35 | raise ValueError("n cannot be an even number") 36 | 37 | for m in arange(1, 18): 38 | if (2**m-1)%n == 0: 39 | break 40 | 41 | x_gf = GF(arange(1, 2**m), m) 42 | coset_fields = x_gf.cosets() 43 | 44 | coset_leaders = array([]) 45 | minpol_degrees = array([]) 46 | for field in coset_fields: 47 | coset_leaders = concatenate((coset_leaders, array([field.elements[0]]))) 48 | minpol_degrees = concatenate((minpol_degrees, array([len(field.elements)]))) 49 | 50 | y_gf = GF(coset_leaders, m) 51 | minpol_list = y_gf.minpolys() 52 | idx_list = arange(1, len(minpol_list)) 53 | poly_list = array([]) 54 | 55 | for i in range(1, 2**len(minpol_list)): 56 | i_array = dec2bitarray(i, len(minpol_list)) 57 | subset_array = minpol_degrees[i_array == 1] 58 | if int(subset_array.sum()) == (n-k): 59 | poly_set = minpol_list[i_array == 1] 60 | gpoly = 1 61 | for poly in poly_set: 62 | gpoly_array = dec2bitarray(gpoly, 2**m) 63 | poly_array = dec2bitarray(poly, 2**m) 64 | gpoly = bitarray2dec(convolve(gpoly_array, poly_array) % 2) 65 | poly_list = concatenate((poly_list, array([gpoly]))) 66 | 67 | return poly_list.astype(int) 68 | 69 | 70 | if __name__ == "__main__": 71 | genpolys = cyclic_code_genpoly(31, 21) 72 | for poly in genpolys: 73 | print(poly_to_string(poly)) 74 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. CommPy documentation master file, created by 2 | sphinx-quickstart on Sun Jan 29 23:37:16 2012. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | CommPy 7 | ================================== 8 | 9 | CommPy is an open source package implementing digital communications algorithms 10 | in Python using NumPy, SciPy and Matplotlib. 11 | 12 | Available Features 13 | ------------------ 14 | Channel Coding 15 | ~~~~~~~~~~~~~~ 16 | - Encoder for Convolutional Codes (Polynomial, Recursive Systematic). Supports all rates and puncture matrices. 17 | - Viterbi Decoder for Convolutional Codes (Hard Decision Output). 18 | - MAP Decoder for Convolutional Codes (Based on the BCJR algorithm). 19 | - Encoder for a rate-1/3 systematic parallel concatenated Turbo Code. 20 | - Turbo Decoder for a rate-1/3 systematic parallel concatenated turbo code (Based on the MAP decoder/BCJR algorithm). 21 | - Binary Galois Field GF(2^m) with minimal polynomials and cyclotomic cosets. 22 | - Create all possible generator polynomials for a (n,k) cyclic code. 23 | - Random Interleavers and De-interleavers. 24 | 25 | Channel Models 26 | ~~~~~~~~~~~~~~ 27 | - SISO Channel with Rayleigh or Rician fading. 28 | - MIMO Channel with Rayleigh or Rician fading. 29 | - Binary Erasure Channel (BEC) 30 | - Binary Symmetric Channel (BSC) 31 | - Binary AWGN Channel (BAWGNC) 32 | 33 | Filters 34 | ~~~~~~~ 35 | - Rectangular 36 | - Raised Cosine (RC), Root Raised Cosine (RRC) 37 | - Gaussian 38 | 39 | Impairments 40 | ~~~~~~~~~~~ 41 | - Carrier Frequency Offset (CFO) 42 | 43 | Modulation/Demodulation 44 | ~~~~~~~~~~~~~~~~~~~~~~~ 45 | - Phase Shift Keying (PSK) 46 | - Quadrature Amplitude Modulation (QAM) 47 | - OFDM Tx/Rx signal processing 48 | - MIMO Maximum Likelihood (ML) Detection. 49 | - MIMO K-best Schnorr-Euchner Detection. 50 | 51 | Sequences 52 | ~~~~~~~~~ 53 | - PN Sequence 54 | - Zadoff-Chu (ZC) Sequence 55 | 56 | Utilities 57 | ~~~~~~~~~ 58 | - Decimal to bit-array, bit-array to decimal. 59 | - Hamming distance, Euclidean distance. 60 | - Upsample 61 | - Power of a discrete-time signal 62 | 63 | Links 64 | ~~~~~ 65 | - Estimate the BER performance of a link model with Monte Carlo simulation. 66 | - Link model object. 67 | 68 | Reference 69 | --------- 70 | .. toctree:: 71 | :maxdepth: 4 72 | 73 | channelcoding 74 | channels 75 | filters 76 | impairments 77 | modulation 78 | sequences 79 | utilities 80 | -------------------------------------------------------------------------------- /commpy/sequences.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3-Clause 4 | 5 | """ 6 | ================================================== 7 | Sequences (:mod:`commpy.sequences`) 8 | ================================================== 9 | 10 | .. autosummary:: 11 | :toctree: generated/ 12 | 13 | pnsequence -- PN Sequence Generator. 14 | zcsequence -- Zadoff-Chu (ZC) Sequence Generator. 15 | 16 | """ 17 | __all__ = ['pnsequence', 'zcsequence'] 18 | 19 | from numpy import array, empty, zeros, roll, exp, pi, arange 20 | 21 | def pnsequence(pn_order, pn_seed, pn_mask, seq_length): 22 | """ 23 | Generate a PN (Pseudo-Noise) sequence using a Linear Feedback Shift Register (LFSR). 24 | 25 | Parameters 26 | ---------- 27 | pn_order : int 28 | Number of delay elements used in the LFSR. 29 | 30 | pn_seed : string containing 0's and 1's 31 | Seed for the initialization of the LFSR delay elements. 32 | The length of this string must be equal to 'pn_order'. 33 | 34 | pn_mask : string containing 0's and 1's 35 | Mask representing which delay elements contribute to the feedback 36 | in the LFSR. The length of this string must be equal to 'pn_order'. 37 | 38 | seq_length : int 39 | Length of the PN sequence to be generated. Usually (2^pn_order - 1) 40 | 41 | Returns 42 | ------- 43 | pnseq : 1D ndarray of ints 44 | PN sequence generated. 45 | 46 | """ 47 | # Check if pn_order is equal to the length of the strings 'pn_seed' and 'pn_mask' 48 | 49 | pnseq = zeros(seq_length) 50 | 51 | # Initialize shift register with the pn_seed 52 | sr = array(map(lambda i: int(pn_seed[i]), range(0, len(pn_seed)))) 53 | 54 | for i in range(seq_length): 55 | new_bit = 0 56 | for j in range(pn_order): 57 | if int(pn_mask[j]) == 1: 58 | new_bit = new_bit ^ sr[j] 59 | pnseq[i] = sr[pn_order-1] 60 | sr = roll(sr, 1) 61 | sr[0] = new_bit 62 | 63 | return pnseq.astype(int) 64 | 65 | def zcsequence(u, seq_length): 66 | """ 67 | Generate a Zadoff-Chu (ZC) sequence. 68 | 69 | Parameters 70 | ---------- 71 | u : int 72 | Root index of the the ZC sequence. 73 | 74 | seq_length : int 75 | Length of the sequence to be generated. Usually a prime number. 76 | 77 | Returns 78 | ------- 79 | zcseq : 1D ndarray of complex floats 80 | ZC sequence generated. 81 | """ 82 | zcseq = exp((-1j * pi * u * arange(seq_length) * (arange(seq_length)+1)) / seq_length) 83 | 84 | return zcseq 85 | -------------------------------------------------------------------------------- /commpy/channelcoding/tests/test_gfields.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3 clause 4 | 5 | from numpy import array, ones_like, arange 6 | from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_, assert_equal 7 | from commpy.channelcoding.gfields import GF 8 | 9 | 10 | class TestGaloisFields(object): 11 | 12 | def test_closure(self): 13 | for m in arange(1, 9): 14 | x = GF(arange(2**m), m) 15 | for a in x.elements: 16 | for b in x.elements: 17 | assert_((GF(array([a]), m) + GF(array([b]), m)).elements[0] in x.elements) 18 | assert_((GF(array([a]), m) * GF(array([b]), m)).elements[0] in x.elements) 19 | 20 | def test_addition(self): 21 | m = 3 22 | x = GF(arange(2**m), m) 23 | y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m) 24 | z = GF(array([6, 5, 1, 2, 6, 5, 3, 0]), m) 25 | assert_array_equal((x+y).elements, z.elements) 26 | 27 | def test_multiplication(self): 28 | m = 3 29 | x = GF(array([7, 6, 5, 4, 3, 2, 1, 0]), m) 30 | y = GF(array([6, 4, 3, 1, 2, 0, 5, 7]), m) 31 | z = GF(array([4, 5, 4, 4, 6, 0, 5, 0]), m) 32 | assert_array_equal((x*y).elements, z.elements) 33 | 34 | def test_tuple_form(self): 35 | m = 3 36 | x = GF(arange(0, 2**m-1), m) 37 | y = x.power_to_tuple() 38 | z = GF(array([1, 2, 4, 3, 6, 7, 5]), m) 39 | assert_array_equal(y.elements, z.elements) 40 | 41 | def test_power_form(self): 42 | m = 3 43 | x = GF(arange(1, 2**m), m) 44 | y = x.tuple_to_power() 45 | z = GF(array([0, 1, 3, 2, 6, 4, 5]), m) 46 | assert_array_equal(y.elements, z.elements) 47 | m = 4 48 | x = GF(arange(1, 2**m), m) 49 | y = x.tuple_to_power() 50 | z = GF(array([0, 1, 4, 2, 8, 5, 10, 3, 14, 9, 7, 6, 13, 11, 12]), m) 51 | assert_array_equal(y.elements, z.elements) 52 | 53 | def test_order(self): 54 | m = 4 55 | x = GF(arange(1, 2**m), m) 56 | y = x.order() 57 | z = array([1, 15, 15, 15, 15, 3, 3, 5, 15, 5, 15, 5, 15, 15, 5]) 58 | assert_array_equal(y, z) 59 | 60 | def test_minpols(self): 61 | m = 4 62 | x = GF(arange(2**m), m) 63 | z = array([2, 3, 19, 19, 19, 19, 7, 7, 31, 25, 31, 25, 31, 25, 25, 31]) 64 | assert_array_equal(x.minpolys(), z) 65 | m = 6 66 | x = GF(array([2, 8, 32, 6, 24, 35, 10, 40, 59, 41, 14, 37]), m) 67 | z = array([67, 87, 103, 73, 13, 109, 91, 117, 7, 115, 11, 97]) 68 | assert_array_equal(x.minpolys(), z) 69 | -------------------------------------------------------------------------------- /commpy/channelcoding/designs/ldpc/gallager/96.3.963.txt: -------------------------------------------------------------------------------- 1 | 96 48 2 | 3 6 3 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 | 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 5 | 10 30 40 6 | 5 32 45 7 | 16 18 39 8 | 12 22 38 9 | 15 19 47 10 | 2 17 34 11 | 9 24 42 12 | 1 29 33 13 | 4 27 36 14 | 3 26 35 15 | 11 31 43 16 | 7 21 44 17 | 8 20 48 18 | 14 23 46 19 | 6 28 37 20 | 13 25 41 21 | 14 32 43 22 | 5 23 37 23 | 2 31 36 24 | 1 28 34 25 | 7 25 47 26 | 10 21 33 27 | 15 30 35 28 | 16 26 48 29 | 3 22 46 30 | 12 20 41 31 | 8 18 38 32 | 4 19 45 33 | 6 24 40 34 | 9 27 39 35 | 13 17 42 36 | 11 29 44 37 | 8 24 34 38 | 6 25 36 39 | 9 19 43 40 | 1 20 46 41 | 14 27 42 42 | 7 22 39 43 | 13 18 35 44 | 4 26 40 45 | 16 29 38 46 | 15 21 48 47 | 11 23 45 48 | 3 17 47 49 | 5 28 44 50 | 12 32 33 51 | 2 30 41 52 | 10 31 37 53 | 10 18 36 54 | 4 23 44 55 | 9 29 40 56 | 2 27 38 57 | 8 30 42 58 | 12 28 43 59 | 11 20 37 60 | 1 19 35 61 | 15 31 39 62 | 16 32 41 63 | 5 26 33 64 | 3 25 45 65 | 13 21 34 66 | 14 24 48 67 | 7 17 46 68 | 6 22 47 69 | 7 27 40 70 | 11 18 33 71 | 2 32 35 72 | 10 28 47 73 | 5 24 41 74 | 12 25 37 75 | 3 19 39 76 | 14 31 44 77 | 16 30 34 78 | 13 20 38 79 | 9 22 36 80 | 6 17 45 81 | 4 21 42 82 | 15 29 46 83 | 8 26 43 84 | 1 23 48 85 | 1 25 42 86 | 15 22 40 87 | 8 21 41 88 | 9 18 47 89 | 6 27 43 90 | 11 30 46 91 | 7 31 35 92 | 5 20 36 93 | 14 17 38 94 | 16 28 45 95 | 4 32 37 96 | 13 23 33 97 | 12 26 44 98 | 3 29 48 99 | 2 24 39 100 | 10 19 34 101 | 8 20 36 56 80 81 102 | 6 19 47 52 67 95 103 | 10 25 44 60 71 94 104 | 9 28 40 50 77 91 105 | 2 18 45 59 69 88 106 | 15 29 34 64 76 85 107 | 12 21 38 63 65 87 108 | 13 27 33 53 79 83 109 | 7 30 35 51 75 84 110 | 1 22 48 49 68 96 111 | 11 32 43 55 66 86 112 | 4 26 46 54 70 93 113 | 16 31 39 61 74 92 114 | 14 17 37 62 72 89 115 | 5 23 42 57 78 82 116 | 3 24 41 58 73 90 117 | 6 31 44 63 76 89 118 | 3 27 39 49 66 84 119 | 5 28 35 56 71 96 120 | 13 26 36 55 74 88 121 | 12 22 42 61 77 83 122 | 4 25 38 64 75 82 123 | 14 18 43 50 80 92 124 | 7 29 33 62 69 95 125 | 16 21 34 60 70 81 126 | 10 24 40 59 79 93 127 | 9 30 37 52 65 85 128 | 15 20 45 54 68 90 129 | 8 32 41 51 78 94 130 | 1 23 47 53 73 86 131 | 11 19 48 57 72 87 132 | 2 17 46 58 67 91 133 | 8 22 46 59 66 92 134 | 6 20 33 61 73 96 135 | 10 23 39 56 67 87 136 | 9 19 34 49 75 88 137 | 15 18 48 55 70 91 138 | 4 27 41 52 74 89 139 | 3 30 38 57 71 95 140 | 1 29 40 51 65 82 141 | 16 26 47 58 69 83 142 | 7 31 37 53 77 81 143 | 11 17 35 54 79 85 144 | 12 32 45 50 72 93 145 | 2 28 43 60 76 90 146 | 14 25 36 63 78 86 147 | 5 21 44 64 68 84 148 | 13 24 42 62 80 94 149 | -------------------------------------------------------------------------------- /commpy/channelcoding/designs/ldpc/gallager/96.33.964.txt: -------------------------------------------------------------------------------- 1 | 96 48 2 | 3 6 3 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 | 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 5 | 47 4 21 6 | 33 38 31 7 | 11 1 33 8 | 3 48 37 9 | 42 9 36 10 | 17 22 7 11 | 48 15 13 12 | 40 28 47 13 | 22 42 5 14 | 28 33 30 15 | 27 18 19 16 | 2 34 10 17 | 38 41 27 18 | 18 7 32 19 | 16 32 45 20 | 26 24 1 21 | 25 16 22 22 | 35 25 34 23 | 37 2 11 24 | 21 3 39 25 | 34 21 28 26 | 12 13 6 27 | 1 39 38 28 | 9 8 12 29 | 44 12 48 30 | 29 14 9 31 | 31 29 26 32 | 5 46 14 33 | 36 6 24 34 | 46 23 3 35 | 45 30 4 36 | 24 11 8 37 | 23 10 42 38 | 7 35 43 39 | 32 19 41 40 | 19 20 25 41 | 15 47 46 42 | 39 31 2 43 | 13 43 20 44 | 43 40 15 45 | 8 5 35 46 | 4 26 44 47 | 6 37 17 48 | 10 45 18 49 | 20 27 29 50 | 30 17 16 51 | 41 36 23 52 | 14 44 40 53 | 7 31 42 54 | 25 23 21 55 | 22 34 41 56 | 42 3 19 57 | 40 35 27 58 | 21 19 17 59 | 4 8 28 60 | 35 45 31 61 | 2 28 32 62 | 37 30 9 63 | 38 40 30 64 | 34 36 13 65 | 33 46 10 66 | 32 12 40 67 | 18 41 11 68 | 17 1 2 69 | 45 39 29 70 | 9 48 4 71 | 47 11 34 72 | 19 29 24 73 | 44 17 5 74 | 15 2 3 75 | 16 21 33 76 | 11 20 44 77 | 20 9 47 78 | 23 47 38 79 | 24 16 12 80 | 41 24 37 81 | 39 5 43 82 | 6 43 23 83 | 31 10 16 84 | 48 33 35 85 | 28 18 48 86 | 8 42 18 87 | 36 32 8 88 | 14 6 25 89 | 29 15 36 90 | 46 38 26 91 | 5 4 6 92 | 27 44 22 93 | 26 22 45 94 | 43 27 1 95 | 10 25 39 96 | 12 14 7 97 | 13 7 46 98 | 30 13 14 99 | 3 26 20 100 | 1 37 15 101 | 23 96 3 64 16 90 102 | 12 57 19 70 38 64 103 | 4 95 20 52 30 70 104 | 42 55 1 87 31 66 105 | 28 87 41 77 9 69 106 | 43 78 29 84 22 87 107 | 34 49 14 93 6 92 108 | 41 82 24 55 32 83 109 | 24 66 5 73 26 58 110 | 44 91 33 79 12 61 111 | 3 72 32 67 19 63 112 | 22 92 25 62 24 75 113 | 39 93 22 94 7 60 114 | 48 84 26 92 28 94 115 | 37 70 7 85 40 96 116 | 15 71 17 75 46 79 117 | 6 64 46 69 43 54 118 | 14 63 11 81 44 82 119 | 36 68 35 54 11 52 120 | 45 73 36 72 39 95 121 | 20 54 21 71 1 50 122 | 9 51 6 89 17 88 123 | 33 74 30 50 47 78 124 | 32 75 16 76 29 68 125 | 17 50 18 91 36 84 126 | 16 89 42 95 27 86 127 | 11 88 45 90 13 53 128 | 10 81 8 57 21 55 129 | 26 85 27 68 45 65 130 | 46 94 31 58 10 59 131 | 27 79 38 49 2 56 132 | 35 62 15 83 14 57 133 | 2 61 10 80 3 71 134 | 21 60 12 51 18 67 135 | 18 56 34 53 41 80 136 | 29 83 47 60 5 85 137 | 19 58 43 96 4 76 138 | 13 59 2 86 23 74 139 | 38 77 23 65 20 91 140 | 8 53 40 59 48 62 141 | 47 76 13 63 35 51 142 | 5 52 9 82 33 49 143 | 40 90 39 78 34 77 144 | 25 69 48 88 42 72 145 | 31 65 44 56 15 89 146 | 30 86 28 61 37 93 147 | 1 67 37 74 8 73 148 | 7 80 4 66 25 81 149 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3-Clause 4 | 5 | import os, sys, shutil, numpy 6 | from setuptools import find_packages 7 | from distutils.core import setup 8 | from distutils.extension import Extension 9 | 10 | #use_cython = False 11 | 12 | #try: 13 | # from Cython.Distutils import build_ext 14 | #use_cython = True 15 | #except ImportError: 16 | #from distutils.command import build_ext 17 | # use_cython = False 18 | 19 | #cmdclass = { } 20 | #ext_modules = [ ] 21 | 22 | #if use_cython: 23 | # ext_modules += [ 24 | # Extension("commpy.channelcoding.acstb", [ "commpy/channelcoding/acstb.pyx" ], include_dirs=[numpy.get_include()]), 25 | # Extension("commpy.channelcoding.map_c", [ "commpy/channelcoding/map_c.pyx" ], include_dirs=[numpy.get_include()]) 26 | # ] 27 | # cmdclass.update({ 'build_ext': build_ext }) 28 | # print "Using Cython" 29 | #else: 30 | # ext_modules += [ 31 | # Extension("commpy.channelcoding.acstb", [ "commpy/channelcoding/acstb.c" ], include_dirs=[numpy.get_include()]), 32 | # Extension("commpy.channelcoding.map_c", [ "commpy/channelcoding/map_c.c" ], include_dirs=[numpy.get_include()]) 33 | # ] 34 | 35 | # Taken from scikit-learn setup.py 36 | DISTNAME = 'scikit-commpy' 37 | DESCRIPTION = 'Digital Communication Algorithms with Python' 38 | LONG_DESCRIPTION = open('README.md').read() 39 | MAINTAINER = 'Veeresh Taranalli' 40 | MAINTAINER_EMAIL = 'veeresht@gmail.com' 41 | URL = 'http://veeresht.github.com/CommPy' 42 | LICENSE = 'BSD 3-Clause' 43 | # DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/' 44 | VERSION = '0.3.0' 45 | 46 | #This is a list of files to install, and where 47 | #(relative to the 'root' dir, where setup.py is) 48 | #You could be more specific. 49 | files = ["channelcoding/*, channelcoding/tests/*"] 50 | 51 | setup( 52 | name = DISTNAME, 53 | maintainer=MAINTAINER, 54 | maintainer_email=MAINTAINER_EMAIL, 55 | description=DESCRIPTION, 56 | license=LICENSE, 57 | url=URL, 58 | version=VERSION, 59 | #Name the folder where your packages live: 60 | #(If you have other packages (dirs) or modules (py files) then 61 | #put them into the package directory - they will be found 62 | #recursively.) 63 | packages = ['commpy', 'commpy.channelcoding', 'commpy.channelcoding.tests'], 64 | #package_dir={ 65 | # 'commpy' : 'commpy', 66 | #}, 67 | install_requires=[ 68 | 'numpy', 69 | 'scipy', 70 | 'matplotlib', 71 | ], 72 | #'package' package must contain files (see list above) 73 | #This dict maps the package name =to=> directories 74 | #It says, package *needs* these files. 75 | package_data = {'commpy' : files }, 76 | #'runner' is in the root. 77 | scripts = ["runner"], 78 | test_suite='nose.collector', 79 | tests_require=['nose'], 80 | 81 | long_description = LONG_DESCRIPTION, 82 | classifiers = [ 83 | 'Development Status :: 1 - Planning', 84 | 'Intended Audience :: Science/Research', 85 | 'Intended Audience :: Telecommunications Industry', 86 | 'Operating System :: Unix', 87 | 'Programming Language :: Python', 88 | 'Topic :: Scientific/Engineering', 89 | 'Topic :: Software Development', 90 | ] 91 | ) 92 | -------------------------------------------------------------------------------- /commpy/channelcoding/tests/test_convcode.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Authors: Veeresh Taranalli 4 | # License: BSD 3-Clause 5 | 6 | from numpy import array 7 | from numpy.random import randint 8 | from numpy.testing import assert_array_equal 9 | from commpy.channelcoding.convcode import Trellis, conv_encode, viterbi_decode 10 | 11 | class TestConvCode(object): 12 | 13 | @classmethod 14 | def setup_class(cls): 15 | # Convolutional Code 1: G(D) = [1+D^2, 1+D+D^2] 16 | memory = array([2]) 17 | g_matrix = array([[0o5, 0o7]]) 18 | cls.code_type_1 = 'default' 19 | cls.trellis_1 = Trellis(memory, g_matrix, 0, cls.code_type_1) 20 | cls.desired_next_state_table_1 = array([[0, 2], 21 | [0, 2], 22 | [1, 3], 23 | [1, 3]]) 24 | cls.desired_output_table_1 = array([[0, 3], 25 | [3, 0], 26 | [1, 2], 27 | [2, 1]]) 28 | 29 | 30 | # Convolutional Code 2: G(D) = [1 1+D+D^2/1+D] 31 | memory = array([2]) 32 | g_matrix = array([[0o1, 0o7]]) 33 | feedback = 0o5 34 | cls.code_type_2 = 'rsc' 35 | cls.trellis_2 = Trellis(memory, g_matrix, feedback, cls.code_type_2) 36 | cls.desired_next_state_table_2 = array([[0, 2], 37 | [2, 0], 38 | [1, 3], 39 | [3, 1]]) 40 | cls.desired_output_table_2 = array([[0, 3], 41 | [0, 3], 42 | [1, 2], 43 | [1, 2]]) 44 | 45 | 46 | @classmethod 47 | def teardown_class(cls): 48 | pass 49 | 50 | def test_next_state_table(self): 51 | assert_array_equal(self.trellis_1.next_state_table, self.desired_next_state_table_1) 52 | assert_array_equal(self.trellis_2.next_state_table, self.desired_next_state_table_2) 53 | 54 | def test_output_table(self): 55 | assert_array_equal(self.trellis_1.output_table, self.desired_output_table_1) 56 | assert_array_equal(self.trellis_2.output_table, self.desired_output_table_2) 57 | 58 | def test_conv_encode(self): 59 | pass 60 | 61 | def test_viterbi_decode(self): 62 | pass 63 | 64 | def test_conv_encode_viterbi_decode(self): 65 | niters = 10 66 | blocklength = 1000 67 | 68 | for i in range(niters): 69 | msg = randint(0, 2, blocklength) 70 | 71 | coded_bits = conv_encode(msg, self.trellis_1) 72 | decoded_bits = viterbi_decode(coded_bits.astype(float), self.trellis_1, 15) 73 | assert_array_equal(decoded_bits[:-2], msg) 74 | 75 | coded_bits = conv_encode(msg, self.trellis_1, termination = 'cont') 76 | decoded_bits = viterbi_decode(coded_bits.astype(float), self.trellis_1, 15) 77 | assert_array_equal(decoded_bits, msg) 78 | 79 | coded_bits = conv_encode(msg, self.trellis_1) 80 | coded_syms = 2.0*coded_bits - 1 81 | decoded_bits = viterbi_decode(coded_syms, self.trellis_1, 15, 'unquantized') 82 | assert_array_equal(decoded_bits[:-2], msg) 83 | 84 | coded_bits = conv_encode(msg, self.trellis_2) 85 | decoded_bits = viterbi_decode(coded_bits.astype(float), self.trellis_2, 15) 86 | assert_array_equal(decoded_bits[:-2], msg) 87 | 88 | coded_bits = conv_encode(msg, self.trellis_2) 89 | coded_syms = 2.0*coded_bits - 1 90 | decoded_bits = viterbi_decode(coded_syms, self.trellis_2, 15, 'unquantized') 91 | assert_array_equal(decoded_bits[:-2], msg) 92 | -------------------------------------------------------------------------------- /commpy/utilities.py: -------------------------------------------------------------------------------- 1 | # Authors: Veeresh Taranalli & Bastien Trotobas 2 | # License: BSD 3-Clause 3 | 4 | """ 5 | ============================================ 6 | Utilities (:mod:`commpy.utilities`) 7 | ============================================ 8 | 9 | .. autosummary:: 10 | :toctree: generated/ 11 | 12 | dec2bitarray -- Integer to binary (bit array). 13 | bitarray2dec -- Binary (bit array) to integer. 14 | hamming_dist -- Hamming distance. 15 | euclid_dist -- Squared Euclidean distance. 16 | upsample -- Upsample by an integral factor (zero insertion). 17 | signal_power -- Compute the power of a discrete time signal. 18 | """ 19 | from __future__ import division # Python 2 compatibility 20 | 21 | import numpy as np 22 | 23 | __all__ = ['dec2bitarray', 'bitarray2dec', 'hamming_dist', 'euclid_dist', 'upsample', 24 | 'signal_power'] 25 | 26 | def dec2bitarray(in_number, bit_width): 27 | """ 28 | Converts a positive integer to NumPy array of the specified size containing 29 | bits (0 and 1). 30 | 31 | Parameters 32 | ---------- 33 | in_number : int 34 | Positive integer to be converted to a bit array. 35 | 36 | bit_width : int 37 | Size of the output bit array. 38 | 39 | Returns 40 | ------- 41 | bitarray : 1D ndarray of ints 42 | Array containing the binary representation of the input decimal. 43 | 44 | """ 45 | 46 | binary_string = bin(in_number) 47 | length = len(binary_string) 48 | bitarray = np.zeros(bit_width, 'int') 49 | for i in range(length - 2): 50 | bitarray[bit_width - i - 1] = int(binary_string[length - i - 1]) 51 | 52 | return bitarray 53 | 54 | 55 | def bitarray2dec(in_bitarray): 56 | """ 57 | Converts an input NumPy array of bits (0 and 1) to a decimal integer. 58 | 59 | Parameters 60 | ---------- 61 | in_bitarray : 1D ndarray of ints 62 | Input NumPy array of bits. 63 | 64 | Returns 65 | ------- 66 | number : int 67 | Integer representation of input bit array. 68 | """ 69 | 70 | number = 0 71 | 72 | for i in range(len(in_bitarray)): 73 | number = number + in_bitarray[i] * pow(2, len(in_bitarray) - 1 - i) 74 | 75 | return number 76 | 77 | 78 | def hamming_dist(in_bitarray_1, in_bitarray_2): 79 | """ 80 | Computes the Hamming distance between two NumPy arrays of bits (0 and 1). 81 | 82 | Parameters 83 | ---------- 84 | in_bit_array_1 : 1D ndarray of ints 85 | NumPy array of bits. 86 | 87 | in_bit_array_2 : 1D ndarray of ints 88 | NumPy array of bits. 89 | 90 | Returns 91 | ------- 92 | distance : int 93 | Hamming distance between input bit arrays. 94 | """ 95 | 96 | distance = np.bitwise_xor(in_bitarray_1, in_bitarray_2).sum() 97 | 98 | return distance 99 | 100 | 101 | def euclid_dist(in_array1, in_array2): 102 | """ 103 | Computes the squared euclidean distance between two NumPy arrays 104 | 105 | Parameters 106 | ---------- 107 | in_array1 : 1D ndarray of floats 108 | NumPy array of real values. 109 | 110 | in_array2 : 1D ndarray of floats 111 | NumPy array of real values. 112 | 113 | Returns 114 | ------- 115 | distance : float 116 | Squared Euclidean distance between two input arrays. 117 | """ 118 | distance = ((in_array1 - in_array2) * (in_array1 - in_array2)).sum() 119 | 120 | return distance 121 | 122 | 123 | def upsample(x, n): 124 | """ 125 | Upsample the input array by a factor of n 126 | 127 | Adds n-1 zeros between consecutive samples of x 128 | 129 | Parameters 130 | ---------- 131 | x : 1D ndarray 132 | Input array. 133 | 134 | n : int 135 | Upsampling factor 136 | 137 | Returns 138 | ------- 139 | y : 1D ndarray 140 | Output upsampled array. 141 | """ 142 | y = np.empty(len(x) * n, dtype=complex) 143 | y[0::n] = x 144 | zero_array = np.zeros(len(x), dtype=complex) 145 | for i in range(1, n): 146 | y[i::n] = zero_array 147 | 148 | return y 149 | 150 | 151 | def signal_power(signal): 152 | """ 153 | Compute the power of a discrete time signal. 154 | 155 | Parameters 156 | ---------- 157 | signal : 1D ndarray 158 | Input signal. 159 | 160 | Returns 161 | ------- 162 | P : float 163 | Power of the input signal. 164 | """ 165 | 166 | @np.vectorize 167 | def square_abs(s): 168 | return abs(s) ** 2 169 | 170 | P = np.mean(square_abs(signal)) 171 | return P 172 | -------------------------------------------------------------------------------- /doc/sphinxext/traitsdoc.py: -------------------------------------------------------------------------------- 1 | """ 2 | ========= 3 | traitsdoc 4 | ========= 5 | 6 | Sphinx extension that handles docstrings in the Numpy standard format, [1] 7 | and support Traits [2]. 8 | 9 | This extension can be used as a replacement for ``numpydoc`` when support 10 | for Traits is required. 11 | 12 | .. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard 13 | .. [2] http://code.enthought.com/projects/traits/ 14 | 15 | """ 16 | 17 | import inspect 18 | import os 19 | import pydoc 20 | 21 | import docscrape 22 | import docscrape_sphinx 23 | from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString 24 | 25 | import numpydoc 26 | 27 | import comment_eater 28 | 29 | class SphinxTraitsDoc(SphinxClassDoc): 30 | def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): 31 | if not inspect.isclass(cls): 32 | raise ValueError("Initialise using a class. Got %r" % cls) 33 | self._cls = cls 34 | 35 | if modulename and not modulename.endswith('.'): 36 | modulename += '.' 37 | self._mod = modulename 38 | self._name = cls.__name__ 39 | self._func_doc = func_doc 40 | 41 | docstring = pydoc.getdoc(cls) 42 | docstring = docstring.split('\n') 43 | 44 | # De-indent paragraph 45 | try: 46 | indent = min(len(s) - len(s.lstrip()) for s in docstring 47 | if s.strip()) 48 | except ValueError: 49 | indent = 0 50 | 51 | for n,line in enumerate(docstring): 52 | docstring[n] = docstring[n][indent:] 53 | 54 | self._doc = docscrape.Reader(docstring) 55 | self._parsed_data = { 56 | 'Signature': '', 57 | 'Summary': '', 58 | 'Description': [], 59 | 'Extended Summary': [], 60 | 'Parameters': [], 61 | 'Returns': [], 62 | 'Raises': [], 63 | 'Warns': [], 64 | 'Other Parameters': [], 65 | 'Traits': [], 66 | 'Methods': [], 67 | 'See Also': [], 68 | 'Notes': [], 69 | 'References': '', 70 | 'Example': '', 71 | 'Examples': '', 72 | 'index': {} 73 | } 74 | 75 | self._parse() 76 | 77 | def _str_summary(self): 78 | return self['Summary'] + [''] 79 | 80 | def _str_extended_summary(self): 81 | return self['Description'] + self['Extended Summary'] + [''] 82 | 83 | def __str__(self, indent=0, func_role="func"): 84 | out = [] 85 | out += self._str_signature() 86 | out += self._str_index() + [''] 87 | out += self._str_summary() 88 | out += self._str_extended_summary() 89 | for param_list in ('Parameters', 'Traits', 'Methods', 90 | 'Returns','Raises'): 91 | out += self._str_param_list(param_list) 92 | out += self._str_see_also("obj") 93 | out += self._str_section('Notes') 94 | out += self._str_references() 95 | out += self._str_section('Example') 96 | out += self._str_section('Examples') 97 | out = self._str_indent(out,indent) 98 | return '\n'.join(out) 99 | 100 | def looks_like_issubclass(obj, classname): 101 | """ Return True if the object has a class or superclass with the given class 102 | name. 103 | 104 | Ignores old-style classes. 105 | """ 106 | t = obj 107 | if t.__name__ == classname: 108 | return True 109 | for klass in t.__mro__: 110 | if klass.__name__ == classname: 111 | return True 112 | return False 113 | 114 | def get_doc_object(obj, what=None, config=None): 115 | if what is None: 116 | if inspect.isclass(obj): 117 | what = 'class' 118 | elif inspect.ismodule(obj): 119 | what = 'module' 120 | elif callable(obj): 121 | what = 'function' 122 | else: 123 | what = 'object' 124 | if what == 'class': 125 | doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) 126 | if looks_like_issubclass(obj, 'HasTraits'): 127 | for name, trait, comment in comment_eater.get_class_traits(obj): 128 | # Exclude private traits. 129 | if not name.startswith('_'): 130 | doc['Traits'].append((name, trait, comment.splitlines())) 131 | return doc 132 | elif what in ('function', 'method'): 133 | return SphinxFunctionDoc(obj, '', config=config) 134 | else: 135 | return SphinxDocString(pydoc.getdoc(obj), config=config) 136 | 137 | def setup(app): 138 | # init numpydoc 139 | numpydoc.setup(app, get_doc_object) 140 | 141 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | [![Build Status](https://secure.travis-ci.org/veeresht/CommPy.svg?branch=master)](https://secure.travis-ci.org/veeresht/CommPy) 4 | [![Coverage](https://coveralls.io/repos/veeresht/CommPy/badge.svg)](https://coveralls.io/r/veeresht/CommPy) 5 | [![PyPi](https://badge.fury.io/py/scikit-commpy.svg)](https://badge.fury.io/py/scikit-commpy) 6 | [![Docs](https://readthedocs.org/projects/commpy/badge/?version=latest)](http://commpy.readthedocs.io/en/latest/?badge=latest) 7 | 8 | CommPy 9 | ====== 10 | 11 | CommPy is an open source toolkit implementing digital communications algorithms 12 | in Python using NumPy and SciPy. 13 | 14 | Objectives 15 | ---------- 16 | - To provide readable and useable implementations of algorithms used in the research, design and implementation of digital communication systems. 17 | 18 | Available Features 19 | ------------------ 20 | [Channel Coding](https://github.com/veeresht/CommPy/tree/master/commpy/channelcoding) 21 | -------------- 22 | - Encoder for Convolutional Codes (Polynomial, Recursive Systematic). Supports all rates and puncture matrices. 23 | - Viterbi Decoder for Convolutional Codes (Hard Decision Output). 24 | - MAP Decoder for Convolutional Codes (Based on the BCJR algorithm). 25 | - Encoder for a rate-1/3 systematic parallel concatenated Turbo Code. 26 | - Turbo Decoder for a rate-1/3 systematic parallel concatenated turbo code (Based on the MAP decoder/BCJR algorithm). 27 | - Binary Galois Field GF(2^m) with minimal polynomials and cyclotomic cosets. 28 | - Create all possible generator polynomials for a (n,k) cyclic code. 29 | - Random Interleavers and De-interleavers. 30 | - Belief Propagation (BP) Decoder for LDPC Codes. 31 | 32 | [Channel Models](https://github.com/veeresht/CommPy/blob/master/commpy/channels.py) 33 | -------------- 34 | - SISO Channel with Rayleigh or Rician fading. 35 | - MIMO Channel with Rayleigh or Rician fading. 36 | - Binary Erasure Channel (BEC) 37 | - Binary Symmetric Channel (BSC) 38 | - Binary AWGN Channel (BAWGNC) 39 | 40 | [Filters](https://github.com/veeresht/CommPy/blob/master/commpy/filters.py) 41 | ------- 42 | - Rectangular 43 | - Raised Cosine (RC), Root Raised Cosine (RRC) 44 | - Gaussian 45 | 46 | [Impairments](https://github.com/veeresht/CommPy/blob/master/commpy/impairments.py) 47 | ----------- 48 | - Carrier Frequency Offset (CFO) 49 | 50 | [Modulation/Demodulation](https://github.com/veeresht/CommPy/blob/master/commpy/modulation.py) 51 | ----------------------- 52 | - Phase Shift Keying (PSK) 53 | - Quadrature Amplitude Modulation (QAM) 54 | - OFDM Tx/Rx signal processing 55 | - MIMO Maximum Likelihood (ML) Detection. 56 | - MIMO K-best Schnorr-Euchner Detection. 57 | - Convert channel matrix to Bit-level representation. 58 | 59 | [Sequences](https://github.com/veeresht/CommPy/blob/master/commpy/sequences.py) 60 | --------- 61 | - PN Sequence 62 | - Zadoff-Chu (ZC) Sequence 63 | 64 | [Utilities](https://github.com/veeresht/CommPy/blob/master/commpy/utilities.py) 65 | --------- 66 | - Decimal to bit-array, bit-array to decimal. 67 | - Hamming distance, Euclidean distance. 68 | - Upsample 69 | - Power of a discrete-time signal 70 | 71 | [Links](https://github.com/veeresht/CommPy/blob/master/commpy/links.py) 72 | ----- 73 | - Estimate the BER performance of a link model with Monte Carlo simulation. 74 | - Link model object. 75 | 76 | FAQs 77 | ---- 78 | Why are you developing this? 79 | ---------------------------- 80 | During my coursework in communication theory and systems at UCSD, I realized that the best way to actually learn and understand the theory is to try and implement ''the Math'' in practice :). Having used Scipy before, I thought there should be a similar package for Digital Communications in Python. This is a start! 81 | 82 | What programming languages do you use? 83 | -------------------------------------- 84 | CommPy uses Python as its base programming language and python packages like NumPy, SciPy and Matplotlib. 85 | 86 | How can I contribute? 87 | --------------------- 88 | Implement any feature you want and send me a pull request :). If you want to suggest new features or discuss anything related to CommPy, please get in touch with me (veeresht@gmail.com). 89 | 90 | How do I use CommPy? 91 | -------------------- 92 | Requirements/Dependencies 93 | ------------------------- 94 | - python 2.7 or above 95 | - numpy 1.10 or above 96 | - scipy 0.15 or above 97 | - matplotlib 1.4 or above 98 | - nose 1.3 or above 99 | 100 | Installation 101 | ------------ 102 | 103 | - To use the released version on PyPi, use pip or conda to install as follows:: 104 | ``` 105 | $ pip install scikit-commpy 106 | $ conda install -c https://conda.binstar.org/veeresht scikit-commpy 107 | ``` 108 | - To work with the development branch, clone from github and install as follows:: 109 | ``` 110 | $ git clone https://github.com/veeresht/CommPy.git 111 | $ cd CommPy 112 | $ python setup.py install 113 | ``` 114 | 115 | Citing CommPy 116 | ------------- 117 | If you use CommPy for a publication, presentation or a demo, I request you to please cite CommPy as follows: 118 | 119 | Veeresh Taranalli, "CommPy: Digital Communication with Python, version 0.3.0. Available at https://github.com/veeresht/CommPy", 2015. 120 | 121 | I would also greatly appreciate your feedback if you have found CommPy useful. Just send me a mail: veeresht@gmail.com 122 | 123 | For more details on CommPy, please visit http://veeresht.github.com/CommPy 124 | -------------------------------------------------------------------------------- /commpy/filters.py: -------------------------------------------------------------------------------- 1 | 2 | # Authors: Veeresh Taranalli 3 | # License: BSD 3-Clause 4 | 5 | """ 6 | ============================================= 7 | Pulse Shaping Filters (:mod:`commpy.filters`) 8 | ============================================= 9 | 10 | .. autosummary:: 11 | :toctree: generated/ 12 | 13 | rcosfilter -- Raised Cosine (RC) Filter. 14 | rrcosfilter -- Root Raised Cosine (RRC) Filter. 15 | gaussianfilter -- Gaussian Filter. 16 | rectfilter -- Rectangular Filter. 17 | 18 | """ 19 | 20 | import numpy as np 21 | 22 | __all__=['rcosfilter', 'rrcosfilter', 'gaussianfilter', 'rectfilter'] 23 | 24 | def rcosfilter(N, alpha, Ts, Fs): 25 | """ 26 | Generates a raised cosine (RC) filter (FIR) impulse response. 27 | 28 | Parameters 29 | ---------- 30 | N : int 31 | Length of the filter in samples. 32 | 33 | alpha : float 34 | Roll off factor (Valid values are [0, 1]). 35 | 36 | Ts : float 37 | Symbol period in seconds. 38 | 39 | Fs : float 40 | Sampling Rate in Hz. 41 | 42 | Returns 43 | ------- 44 | 45 | time_idx : 1-D ndarray (float) 46 | Array containing the time indices, in seconds, for the impulse response. 47 | 48 | h_rc : 1-D ndarray (float) 49 | Impulse response of the raised cosine filter. 50 | """ 51 | 52 | T_delta = 1/float(Fs) 53 | time_idx = ((np.arange(N)-N/2))*T_delta 54 | sample_num = np.arange(N) 55 | h_rc = np.zeros(N, dtype=float) 56 | 57 | for x in sample_num: 58 | t = (x-N/2)*T_delta 59 | if t == 0.0: 60 | h_rc[x] = 1.0 61 | elif alpha != 0 and t == Ts/(2*alpha): 62 | h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts)) 63 | elif alpha != 0 and t == -Ts/(2*alpha): 64 | h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts)) 65 | else: 66 | h_rc[x] = (np.sin(np.pi*t/Ts)/(np.pi*t/Ts))* \ 67 | (np.cos(np.pi*alpha*t/Ts)/(1-(((2*alpha*t)/Ts)*((2*alpha*t)/Ts)))) 68 | 69 | return time_idx, h_rc 70 | 71 | def rrcosfilter(N, alpha, Ts, Fs): 72 | """ 73 | Generates a root raised cosine (RRC) filter (FIR) impulse response. 74 | 75 | Parameters 76 | ---------- 77 | N : int 78 | Length of the filter in samples. 79 | 80 | alpha : float 81 | Roll off factor (Valid values are [0, 1]). 82 | 83 | Ts : float 84 | Symbol period in seconds. 85 | 86 | Fs : float 87 | Sampling Rate in Hz. 88 | 89 | Returns 90 | --------- 91 | 92 | time_idx : 1-D ndarray of floats 93 | Array containing the time indices, in seconds, for 94 | the impulse response. 95 | 96 | h_rrc : 1-D ndarray of floats 97 | Impulse response of the root raised cosine filter. 98 | """ 99 | 100 | T_delta = 1/float(Fs) 101 | time_idx = ((np.arange(N)-N/2))*T_delta 102 | sample_num = np.arange(N) 103 | h_rrc = np.zeros(N, dtype=float) 104 | 105 | for x in sample_num: 106 | t = (x-N/2)*T_delta 107 | if t == 0.0: 108 | h_rrc[x] = 1.0 - alpha + (4*alpha/np.pi) 109 | elif alpha != 0 and t == Ts/(4*alpha): 110 | h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \ 111 | (np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha))))) 112 | elif alpha != 0 and t == -Ts/(4*alpha): 113 | h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \ 114 | (np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha))))) 115 | else: 116 | h_rrc[x] = (np.sin(np.pi*t*(1-alpha)/Ts) + \ 117 | 4*alpha*(t/Ts)*np.cos(np.pi*t*(1+alpha)/Ts))/ \ 118 | (np.pi*t*(1-(4*alpha*t/Ts)*(4*alpha*t/Ts))/Ts) 119 | 120 | return time_idx, h_rrc 121 | 122 | def gaussianfilter(N, alpha, Ts, Fs): 123 | """ 124 | Generates a gaussian filter (FIR) impulse response. 125 | 126 | Parameters 127 | ---------- 128 | 129 | N : int 130 | Length of the filter in samples. 131 | 132 | alpha : float 133 | Roll off factor (Valid values are [0, 1]). 134 | 135 | Ts : float 136 | Symbol period in seconds. 137 | 138 | Fs : float 139 | Sampling Rate in Hz. 140 | 141 | Returns 142 | ------- 143 | 144 | time_index : 1-D ndarray of floats 145 | Array containing the time indices for the impulse response. 146 | 147 | h_gaussian : 1-D ndarray of floats 148 | Impulse response of the gaussian filter. 149 | """ 150 | 151 | T_delta = 1/float(Fs) 152 | time_idx = ((np.arange(N)-N/2))*T_delta 153 | h_gaussian = (np.sqrt(np.pi)/alpha)*np.exp(-((np.pi*time_idx/alpha)*(np.pi*time_idx/alpha))) 154 | 155 | return time_idx, h_gaussian 156 | 157 | def rectfilter(N, Ts, Fs): 158 | """ 159 | Generates a rectangular filter (FIR) impulse response. 160 | 161 | Parameters 162 | ---------- 163 | 164 | N : int 165 | Length of the filter in samples. 166 | 167 | Ts : float 168 | Symbol period in seconds. 169 | 170 | Fs : float 171 | Sampling Rate in Hz. 172 | 173 | Returns 174 | ------- 175 | 176 | time_index : 1-D ndarray of floats 177 | Array containing the time indices for the impulse response. 178 | 179 | h_rect : 1-D ndarray of floats 180 | Impulse response of the rectangular filter. 181 | """ 182 | 183 | h_rect = np.ones(N) 184 | T_delta = 1/float(Fs) 185 | time_idx = ((np.arange(N)-N/2))*T_delta 186 | 187 | return time_idx, h_rect 188 | -------------------------------------------------------------------------------- /commpy/links.py: -------------------------------------------------------------------------------- 1 | # Authors: Youness Akourim & Bastien Trotobas 2 | # License: BSD 3-Clause 3 | 4 | """ 5 | ============================================ 6 | Links (:mod:`commpy.links`) 7 | ============================================ 8 | 9 | .. autosummary:: 10 | :toctree: generated/ 11 | 12 | link_performance -- Estimate the BER performance of a link model with Monte Carlo simulation. 13 | LinkModel -- Link model object. 14 | """ 15 | from __future__ import division # Python 2 compatibility 16 | 17 | import numpy as np 18 | 19 | from commpy.channels import MIMOFlatChannel 20 | 21 | __all__ = ['link_performance', 'LinkModel'] 22 | 23 | 24 | def link_performance(link_model, SNRs, send_max, err_min, send_chunk=None, code_rate=1): 25 | """ 26 | Estimate the BER performance of a link model with Monte Carlo simulation. 27 | 28 | Parameters 29 | ---------- 30 | link_model : linkModel object. 31 | 32 | SNRs : 1D arraylike 33 | Signal to Noise ratio in dB defined as :math:`SNR_{dB} = (E_b/N_0)_{dB} + 10 \log_{10}(R_cM_c)` 34 | where :math:`Rc` is the code rate and :math:`Mc` the modulation rate. 35 | 36 | send_max : int 37 | Maximum number of bits send for each SNR. 38 | 39 | err_min : int 40 | link_performance send bits until it reach err_min errors (see also send_max). 41 | 42 | send_chunk : int 43 | Number of bits to be send at each iteration. 44 | *Default*: send_chunck = err_min 45 | 46 | code_rate : float in (0,1] 47 | Rate of the used code. 48 | *Default*: 1 i.e. no code. 49 | 50 | Returns 51 | ------- 52 | BERs : 1d ndarray 53 | Estimated Bit Error Ratio corresponding to each SNRs 54 | """ 55 | 56 | # Initialization 57 | BERs = np.empty_like(SNRs, dtype=float) 58 | # Set chunk size and round it to be a multiple of num_bits_symbol*nb_tx to avoid padding 59 | if send_chunk is None: 60 | send_chunk = err_min 61 | divider = link_model.num_bits_symbol * link_model.channel.nb_tx 62 | send_chunk = max(divider, send_chunk // divider * divider) 63 | 64 | # Computations 65 | for id_SNR in range(len(SNRs)): 66 | link_model.channel.set_SNR_dB(SNRs[id_SNR], code_rate, link_model.Es) 67 | bit_send = 0 68 | bit_err = 0 69 | receive_size = link_model.channel.nb_tx * link_model.num_bits_symbol 70 | while bit_send < send_max and bit_err < err_min: 71 | # Propagate some bits 72 | msg = np.random.choice((0, 1), send_chunk) 73 | symbs = link_model.modulate(msg) 74 | channel_output = link_model.channel.propagate(symbs) 75 | 76 | # Deals with MIMO channel 77 | if isinstance(link_model.channel, MIMOFlatChannel): 78 | nb_symb_vector = len(channel_output) 79 | received_msg = np.empty_like(msg, int) 80 | for i in range(nb_symb_vector): 81 | received_msg[receive_size * i:receive_size * (i+1)] = \ 82 | link_model.receive(channel_output[i], link_model.channel.channel_gains[i], link_model.constellation) 83 | else: 84 | received_msg = link_model.receive(channel_output, link_model.channel.channel_gains, link_model.constellation) 85 | # Count errors 86 | bit_err += (msg != received_msg).sum() # Remove MIMO padding 87 | bit_send += send_chunk 88 | BERs[id_SNR] = bit_err / bit_send 89 | return BERs 90 | 91 | 92 | class LinkModel: 93 | """ 94 | Construct a link model. 95 | 96 | Parameters 97 | ---------- 98 | modulate : function with same prototype as Modem.modulate 99 | 100 | channel : _FlatChannel object 101 | 102 | receive : function with prototype receive(y, H, constellation) that return a binary array. 103 | y : 1D ndarray 104 | Received complex symbols (shape: num_receive_antennas x 1) 105 | 106 | h : 2D ndarray 107 | Channel Matrix (shape: num_receive_antennas x num_transmit_antennas) 108 | 109 | constellation : 1D ndarray 110 | 111 | num_bits_symbols : int 112 | 113 | constellation : array of float or complex 114 | 115 | Es : float 116 | Average energy per symbols. 117 | *Default* Es=1. 118 | 119 | Attributes 120 | ---------- 121 | modulate : function with same prototype as Modem.modulate 122 | 123 | channel : _FlatChannel object 124 | 125 | receive : function with prototype receive(y, H, constellation) that return a binary array. 126 | y : 1D ndarray of floats 127 | Received complex symbols (shape: num_receive_antennas x 1) 128 | 129 | h : 2D ndarray of floats 130 | Channel Matrix (shape: num_receive_antennas x num_transmit_antennas) 131 | 132 | constellation : 1D ndarray of floats 133 | 134 | num_bits_symbols : int 135 | 136 | constellation : array of float or complex 137 | 138 | Es : float 139 | Average energy per symbols. 140 | *Default* Es=1. 141 | """ 142 | def __init__(self, modulate, channel, receive, num_bits_symbol, constellation, Es=1): 143 | self.modulate = modulate 144 | self.channel = channel 145 | self.receive = receive 146 | self.num_bits_symbol = num_bits_symbol 147 | self.constellation = constellation 148 | self.Es = Es 149 | -------------------------------------------------------------------------------- /doc/sphinxext/comment_eater.py: -------------------------------------------------------------------------------- 1 | from cStringIO import StringIO 2 | import compiler 3 | import inspect 4 | import textwrap 5 | import tokenize 6 | 7 | from compiler_unparse import unparse 8 | 9 | 10 | class Comment(object): 11 | """ A comment block. 12 | """ 13 | is_comment = True 14 | def __init__(self, start_lineno, end_lineno, text): 15 | # int : The first line number in the block. 1-indexed. 16 | self.start_lineno = start_lineno 17 | # int : The last line number. Inclusive! 18 | self.end_lineno = end_lineno 19 | # str : The text block including '#' character but not any leading spaces. 20 | self.text = text 21 | 22 | def add(self, string, start, end, line): 23 | """ Add a new comment line. 24 | """ 25 | self.start_lineno = min(self.start_lineno, start[0]) 26 | self.end_lineno = max(self.end_lineno, end[0]) 27 | self.text += string 28 | 29 | def __repr__(self): 30 | return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, 31 | self.end_lineno, self.text) 32 | 33 | 34 | class NonComment(object): 35 | """ A non-comment block of code. 36 | """ 37 | is_comment = False 38 | def __init__(self, start_lineno, end_lineno): 39 | self.start_lineno = start_lineno 40 | self.end_lineno = end_lineno 41 | 42 | def add(self, string, start, end, line): 43 | """ Add lines to the block. 44 | """ 45 | if string.strip(): 46 | # Only add if not entirely whitespace. 47 | self.start_lineno = min(self.start_lineno, start[0]) 48 | self.end_lineno = max(self.end_lineno, end[0]) 49 | 50 | def __repr__(self): 51 | return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, 52 | self.end_lineno) 53 | 54 | 55 | class CommentBlocker(object): 56 | """ Pull out contiguous comment blocks. 57 | """ 58 | def __init__(self): 59 | # Start with a dummy. 60 | self.current_block = NonComment(0, 0) 61 | 62 | # All of the blocks seen so far. 63 | self.blocks = [] 64 | 65 | # The index mapping lines of code to their associated comment blocks. 66 | self.index = {} 67 | 68 | def process_file(self, file): 69 | """ Process a file object. 70 | """ 71 | for token in tokenize.generate_tokens(file.next): 72 | self.process_token(*token) 73 | self.make_index() 74 | 75 | def process_token(self, kind, string, start, end, line): 76 | """ Process a single token. 77 | """ 78 | if self.current_block.is_comment: 79 | if kind == tokenize.COMMENT: 80 | self.current_block.add(string, start, end, line) 81 | else: 82 | self.new_noncomment(start[0], end[0]) 83 | else: 84 | if kind == tokenize.COMMENT: 85 | self.new_comment(string, start, end, line) 86 | else: 87 | self.current_block.add(string, start, end, line) 88 | 89 | def new_noncomment(self, start_lineno, end_lineno): 90 | """ We are transitioning from a noncomment to a comment. 91 | """ 92 | block = NonComment(start_lineno, end_lineno) 93 | self.blocks.append(block) 94 | self.current_block = block 95 | 96 | def new_comment(self, string, start, end, line): 97 | """ Possibly add a new comment. 98 | 99 | Only adds a new comment if this comment is the only thing on the line. 100 | Otherwise, it extends the noncomment block. 101 | """ 102 | prefix = line[:start[1]] 103 | if prefix.strip(): 104 | # Oops! Trailing comment, not a comment block. 105 | self.current_block.add(string, start, end, line) 106 | else: 107 | # A comment block. 108 | block = Comment(start[0], end[0], string) 109 | self.blocks.append(block) 110 | self.current_block = block 111 | 112 | def make_index(self): 113 | """ Make the index mapping lines of actual code to their associated 114 | prefix comments. 115 | """ 116 | for prev, block in zip(self.blocks[:-1], self.blocks[1:]): 117 | if not block.is_comment: 118 | self.index[block.start_lineno] = prev 119 | 120 | def search_for_comment(self, lineno, default=None): 121 | """ Find the comment block just before the given line number. 122 | 123 | Returns None (or the specified default) if there is no such block. 124 | """ 125 | if not self.index: 126 | self.make_index() 127 | block = self.index.get(lineno, None) 128 | text = getattr(block, 'text', default) 129 | return text 130 | 131 | 132 | def strip_comment_marker(text): 133 | """ Strip # markers at the front of a block of comment text. 134 | """ 135 | lines = [] 136 | for line in text.splitlines(): 137 | lines.append(line.lstrip('#')) 138 | text = textwrap.dedent('\n'.join(lines)) 139 | return text 140 | 141 | 142 | def get_class_traits(klass): 143 | """ Yield all of the documentation for trait definitions on a class object. 144 | """ 145 | # FIXME: gracefully handle errors here or in the caller? 146 | source = inspect.getsource(klass) 147 | cb = CommentBlocker() 148 | cb.process_file(StringIO(source)) 149 | mod_ast = compiler.parse(source) 150 | class_ast = mod_ast.node.nodes[0] 151 | for node in class_ast.code.nodes: 152 | # FIXME: handle other kinds of assignments? 153 | if isinstance(node, compiler.ast.Assign): 154 | name = node.nodes[0].name 155 | rhs = unparse(node.expr).strip() 156 | doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) 157 | yield name, rhs, doc 158 | 159 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/CommPy.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/CommPy.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/CommPy" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/CommPy" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /doc/sphinxext/LICENSE.txt: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------- 2 | The files 3 | - numpydoc.py 4 | - autosummary.py 5 | - autosummary_generate.py 6 | - docscrape.py 7 | - docscrape_sphinx.py 8 | - phantom_import.py 9 | have the following license: 10 | 11 | Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen 12 | 13 | Redistribution and use in source and binary forms, with or without 14 | modification, are permitted provided that the following conditions are 15 | met: 16 | 17 | 1. Redistributions of source code must retain the above copyright 18 | notice, this list of conditions and the following disclaimer. 19 | 2. Redistributions in binary form must reproduce the above copyright 20 | notice, this list of conditions and the following disclaimer in 21 | the documentation and/or other materials provided with the 22 | distribution. 23 | 24 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 | IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 26 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 27 | DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 28 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 29 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 30 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 32 | STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 33 | IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 | POSSIBILITY OF SUCH DAMAGE. 35 | 36 | ------------------------------------------------------------------------------- 37 | The files 38 | - compiler_unparse.py 39 | - comment_eater.py 40 | - traitsdoc.py 41 | have the following license: 42 | 43 | This software is OSI Certified Open Source Software. 44 | OSI Certified is a certification mark of the Open Source Initiative. 45 | 46 | Copyright (c) 2006, Enthought, Inc. 47 | All rights reserved. 48 | 49 | Redistribution and use in source and binary forms, with or without 50 | modification, are permitted provided that the following conditions are met: 51 | 52 | * Redistributions of source code must retain the above copyright notice, this 53 | list of conditions and the following disclaimer. 54 | * Redistributions in binary form must reproduce the above copyright notice, 55 | this list of conditions and the following disclaimer in the documentation 56 | and/or other materials provided with the distribution. 57 | * Neither the name of Enthought, Inc. nor the names of its contributors may 58 | be used to endorse or promote products derived from this software without 59 | specific prior written permission. 60 | 61 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 62 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 63 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 64 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 65 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 66 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 67 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 68 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 69 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 70 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 71 | 72 | 73 | ------------------------------------------------------------------------------- 74 | The files 75 | - only_directives.py 76 | - plot_directive.py 77 | originate from Matplotlib (http://matplotlib.sf.net/) which has 78 | the following license: 79 | 80 | Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. 81 | 82 | 1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. 83 | 84 | 2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. 85 | 86 | 3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. 87 | 88 | 4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 89 | 90 | 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 91 | 92 | 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 93 | 94 | 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 95 | 96 | 8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. 97 | 98 | -------------------------------------------------------------------------------- /doc/sphinxext/phantom_import.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============== 3 | phantom_import 4 | ============== 5 | 6 | Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar 7 | extensions to use docstrings loaded from an XML file. 8 | 9 | This extension loads an XML file in the Pydocweb format [1] and 10 | creates a dummy module that contains the specified docstrings. This 11 | can be used to get the current docstrings from a Pydocweb instance 12 | without needing to rebuild the documented module. 13 | 14 | .. [1] http://code.google.com/p/pydocweb 15 | 16 | """ 17 | import imp, sys, compiler, types, os, inspect, re 18 | 19 | def setup(app): 20 | app.connect('builder-inited', initialize) 21 | app.add_config_value('phantom_import_file', None, True) 22 | 23 | def initialize(app): 24 | fn = app.config.phantom_import_file 25 | if (fn and os.path.isfile(fn)): 26 | print "[numpydoc] Phantom importing modules from", fn, "..." 27 | import_phantom_module(fn) 28 | 29 | #------------------------------------------------------------------------------ 30 | # Creating 'phantom' modules from an XML description 31 | #------------------------------------------------------------------------------ 32 | def import_phantom_module(xml_file): 33 | """ 34 | Insert a fake Python module to sys.modules, based on a XML file. 35 | 36 | The XML file is expected to conform to Pydocweb DTD. The fake 37 | module will contain dummy objects, which guarantee the following: 38 | 39 | - Docstrings are correct. 40 | - Class inheritance relationships are correct (if present in XML). 41 | - Function argspec is *NOT* correct (even if present in XML). 42 | Instead, the function signature is prepended to the function docstring. 43 | - Class attributes are *NOT* correct; instead, they are dummy objects. 44 | 45 | Parameters 46 | ---------- 47 | xml_file : str 48 | Name of an XML file to read 49 | 50 | """ 51 | import lxml.etree as etree 52 | 53 | object_cache = {} 54 | 55 | tree = etree.parse(xml_file) 56 | root = tree.getroot() 57 | 58 | # Sort items so that 59 | # - Base classes come before classes inherited from them 60 | # - Modules come before their contents 61 | all_nodes = dict([(n.attrib['id'], n) for n in root]) 62 | 63 | def _get_bases(node, recurse=False): 64 | bases = [x.attrib['ref'] for x in node.findall('base')] 65 | if recurse: 66 | j = 0 67 | while True: 68 | try: 69 | b = bases[j] 70 | except IndexError: break 71 | if b in all_nodes: 72 | bases.extend(_get_bases(all_nodes[b])) 73 | j += 1 74 | return bases 75 | 76 | type_index = ['module', 'class', 'callable', 'object'] 77 | 78 | def base_cmp(a, b): 79 | x = cmp(type_index.index(a.tag), type_index.index(b.tag)) 80 | if x != 0: return x 81 | 82 | if a.tag == 'class' and b.tag == 'class': 83 | a_bases = _get_bases(a, recurse=True) 84 | b_bases = _get_bases(b, recurse=True) 85 | x = cmp(len(a_bases), len(b_bases)) 86 | if x != 0: return x 87 | if a.attrib['id'] in b_bases: return -1 88 | if b.attrib['id'] in a_bases: return 1 89 | 90 | return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) 91 | 92 | nodes = root.getchildren() 93 | nodes.sort(base_cmp) 94 | 95 | # Create phantom items 96 | for node in nodes: 97 | name = node.attrib['id'] 98 | doc = (node.text or '').decode('string-escape') + "\n" 99 | if doc == "\n": doc = "" 100 | 101 | # create parent, if missing 102 | parent = name 103 | while True: 104 | parent = '.'.join(parent.split('.')[:-1]) 105 | if not parent: break 106 | if parent in object_cache: break 107 | obj = imp.new_module(parent) 108 | object_cache[parent] = obj 109 | sys.modules[parent] = obj 110 | 111 | # create object 112 | if node.tag == 'module': 113 | obj = imp.new_module(name) 114 | obj.__doc__ = doc 115 | sys.modules[name] = obj 116 | elif node.tag == 'class': 117 | bases = [object_cache[b] for b in _get_bases(node) 118 | if b in object_cache] 119 | bases.append(object) 120 | init = lambda self: None 121 | init.__doc__ = doc 122 | obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) 123 | obj.__name__ = name.split('.')[-1] 124 | elif node.tag == 'callable': 125 | funcname = node.attrib['id'].split('.')[-1] 126 | argspec = node.attrib.get('argspec') 127 | if argspec: 128 | argspec = re.sub('^[^(]*', '', argspec) 129 | doc = "%s%s\n\n%s" % (funcname, argspec, doc) 130 | obj = lambda: 0 131 | obj.__argspec_is_invalid_ = True 132 | obj.func_name = funcname 133 | obj.__name__ = name 134 | obj.__doc__ = doc 135 | if inspect.isclass(object_cache[parent]): 136 | obj.__objclass__ = object_cache[parent] 137 | else: 138 | class Dummy(object): pass 139 | obj = Dummy() 140 | obj.__name__ = name 141 | obj.__doc__ = doc 142 | if inspect.isclass(object_cache[parent]): 143 | obj.__get__ = lambda: None 144 | object_cache[name] = obj 145 | 146 | if parent: 147 | if inspect.ismodule(object_cache[parent]): 148 | obj.__module__ = parent 149 | setattr(object_cache[parent], name.split('.')[-1], obj) 150 | 151 | # Populate items 152 | for node in root: 153 | obj = object_cache.get(node.attrib['id']) 154 | if obj is None: continue 155 | for ref in node.findall('ref'): 156 | if node.tag == 'class': 157 | if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): 158 | setattr(obj, ref.attrib['name'], 159 | object_cache.get(ref.attrib['ref'])) 160 | else: 161 | setattr(obj, ref.attrib['name'], 162 | object_cache.get(ref.attrib['ref'])) 163 | -------------------------------------------------------------------------------- /doc/sphinxext/numpydoc.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======== 3 | numpydoc 4 | ======== 5 | 6 | Sphinx extension that handles docstrings in the Numpy standard format. [1] 7 | 8 | It will: 9 | 10 | - Convert Parameters etc. sections to field lists. 11 | - Convert See Also section to a See also entry. 12 | - Renumber references. 13 | - Extract the signature from the docstring, if it can't be determined otherwise. 14 | 15 | .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt 16 | 17 | """ 18 | 19 | import sphinx 20 | 21 | if sphinx.__version__ < '1.0.1': 22 | raise RuntimeError("Sphinx 1.0.1 or newer is required") 23 | 24 | import os, re, pydoc 25 | from docscrape_sphinx import get_doc_object, SphinxDocString 26 | from sphinx.util.compat import Directive 27 | import inspect 28 | 29 | def mangle_docstrings(app, what, name, obj, options, lines, 30 | reference_offset=[0]): 31 | 32 | cfg = dict(use_plots=app.config.numpydoc_use_plots, 33 | show_class_members=app.config.numpydoc_show_class_members) 34 | 35 | if what == 'module': 36 | # Strip top title 37 | title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', 38 | re.I|re.S) 39 | lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") 40 | else: 41 | doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) 42 | lines[:] = unicode(doc).split(u"\n") 43 | 44 | if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ 45 | obj.__name__: 46 | if hasattr(obj, '__module__'): 47 | v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) 48 | else: 49 | v = dict(full_name=obj.__name__) 50 | lines += [u'', u'.. htmlonly::', ''] 51 | lines += [u' %s' % x for x in 52 | (app.config.numpydoc_edit_link % v).split("\n")] 53 | 54 | # replace reference numbers so that there are no duplicates 55 | references = [] 56 | for line in lines: 57 | line = line.strip() 58 | m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) 59 | if m: 60 | references.append(m.group(1)) 61 | 62 | # start renaming from the longest string, to avoid overwriting parts 63 | references.sort(key=lambda x: -len(x)) 64 | if references: 65 | for i, line in enumerate(lines): 66 | for r in references: 67 | if re.match(ur'^\d+$', r): 68 | new_r = u"R%d" % (reference_offset[0] + int(r)) 69 | else: 70 | new_r = u"%s%d" % (r, reference_offset[0]) 71 | lines[i] = lines[i].replace(u'[%s]_' % r, 72 | u'[%s]_' % new_r) 73 | lines[i] = lines[i].replace(u'.. [%s]' % r, 74 | u'.. [%s]' % new_r) 75 | 76 | reference_offset[0] += len(references) 77 | 78 | def mangle_signature(app, what, name, obj, options, sig, retann): 79 | # Do not try to inspect classes that don't define `__init__` 80 | if (inspect.isclass(obj) and 81 | (not hasattr(obj, '__init__') or 82 | 'initializes x; see ' in pydoc.getdoc(obj.__init__))): 83 | return '', '' 84 | 85 | if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return 86 | if not hasattr(obj, '__doc__'): return 87 | 88 | doc = SphinxDocString(pydoc.getdoc(obj)) 89 | if doc['Signature']: 90 | sig = re.sub(u"^[^(]*", u"", doc['Signature']) 91 | return sig, u'' 92 | 93 | def setup(app, get_doc_object_=get_doc_object): 94 | global get_doc_object 95 | get_doc_object = get_doc_object_ 96 | 97 | app.connect('autodoc-process-docstring', mangle_docstrings) 98 | app.connect('autodoc-process-signature', mangle_signature) 99 | app.add_config_value('numpydoc_edit_link', None, False) 100 | app.add_config_value('numpydoc_use_plots', None, False) 101 | app.add_config_value('numpydoc_show_class_members', True, True) 102 | 103 | # Extra mangling domains 104 | app.add_domain(NumpyPythonDomain) 105 | app.add_domain(NumpyCDomain) 106 | 107 | #------------------------------------------------------------------------------ 108 | # Docstring-mangling domains 109 | #------------------------------------------------------------------------------ 110 | 111 | from docutils.statemachine import ViewList 112 | from sphinx.domains.c import CDomain 113 | from sphinx.domains.python import PythonDomain 114 | 115 | class ManglingDomainBase(object): 116 | directive_mangling_map = {} 117 | 118 | def __init__(self, *a, **kw): 119 | super(ManglingDomainBase, self).__init__(*a, **kw) 120 | self.wrap_mangling_directives() 121 | 122 | def wrap_mangling_directives(self): 123 | for name, objtype in self.directive_mangling_map.items(): 124 | self.directives[name] = wrap_mangling_directive( 125 | self.directives[name], objtype) 126 | 127 | class NumpyPythonDomain(ManglingDomainBase, PythonDomain): 128 | name = 'np' 129 | directive_mangling_map = { 130 | 'function': 'function', 131 | 'class': 'class', 132 | 'exception': 'class', 133 | 'method': 'function', 134 | 'classmethod': 'function', 135 | 'staticmethod': 'function', 136 | 'attribute': 'attribute', 137 | } 138 | 139 | class NumpyCDomain(ManglingDomainBase, CDomain): 140 | name = 'np-c' 141 | directive_mangling_map = { 142 | 'function': 'function', 143 | 'member': 'attribute', 144 | 'macro': 'function', 145 | 'type': 'class', 146 | 'var': 'object', 147 | } 148 | 149 | def wrap_mangling_directive(base_directive, objtype): 150 | class directive(base_directive): 151 | def run(self): 152 | env = self.state.document.settings.env 153 | 154 | name = None 155 | if self.arguments: 156 | m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) 157 | name = m.group(2).strip() 158 | 159 | if not name: 160 | name = self.arguments[0] 161 | 162 | lines = list(self.content) 163 | mangle_docstrings(env.app, objtype, name, None, None, lines) 164 | self.content = ViewList(lines, self.content.parent) 165 | 166 | return base_directive.run(self) 167 | 168 | return directive 169 | 170 | -------------------------------------------------------------------------------- /commpy/channelcoding/README.md: -------------------------------------------------------------------------------- 1 | # Channel codes basics 2 | 3 | ## Main idea 4 | 5 | The main idea of the channel codes can be formulated as following thesises: 6 | - **noise immunity** of the signal should be increased; 7 | - **redundant bits** are added for *error detection* and *error correction*; 8 | - some special algorithms (coding schemes) are used for this. 9 | 10 | 11 | 12 | The fact how "further" a certain algorithm divides the code words among themselves, and determines how strongly it protects the signal from noise [1, p.23]. 13 | 14 | 15 | 16 | In the case of binary codes, the minimum distance between all existing code words is called **Hamming distance** and is usually denoted **dmin**: 17 | 18 | examp2 19 | 20 | 21 | ## Classification 22 | 23 | Some classification is needed to talk about those or other implementations of the encoding and decoding algorithms. 24 | 25 | First, the channel codes: 26 | - can only [*detect*](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) the presence of errors 27 | - and they can also [*correct* errors](https://en.wikipedia.org/wiki/Error_correction_code). 28 | 29 | Secondly, codes can be classified as **block** and **continuous**: 30 | 31 | ![](https://raw.githubusercontent.com/kirlf/CSP/master/FEC/assets/BlockCont.png) 32 | 33 | ## Net bit rate 34 | Redundancy of the channel coding schemes influences (decreases) bit rate. Actually, it is the cost for the noiseless increasing. 35 | [**Net bit rate**](https://en.wikipedia.org/wiki/Bit_rate#Information_rate) concept is usually used: 36 | 37 | net 38 | 39 | To change the code rate (k/n) of the block code dimensions of the Generator matrix can be changed: 40 | ![blockcoderate](https://raw.githubusercontent.com/kirlf/CSP/master/FEC/assets/coderateblock.png) 41 | 42 | To change the coderate of the continuous code, e.g. convolutional code, **puncturing** procedure is frequently used: 43 | 44 | ![punct](https://raw.githubusercontent.com/kirlf/CSP/master/FEC/assets/punct.png) 45 | 46 | ## Example 47 | 48 | Let us consider implematation of the **convolutional codes** as an example: 49 | 50 | 51 | 52 | *Main modeling routines: random message genaration, channel encoding, baseband modulation, additive noise (e.g. AWGN), baseband demodulation, channel decoding, BER calculation.* 53 | 54 | ```python 55 | import numpy as np 56 | import commpy.channelcoding.convcode as cc 57 | import commpy.modulation as modulation 58 | 59 | def BER_calc(a, b): 60 | num_ber = np.sum(np.abs(a - b)) 61 | ber = np.mean(np.abs(a - b)) 62 | return int(num_ber), ber 63 | 64 | N = 100000 #number of symbols per the frame 65 | message_bits = np.random.randint(0, 2, N) # message 66 | 67 | M = 4 # modulation order (QPSK) 68 | k = np.log2(M) #number of bit per modulation symbol 69 | modem = modulation.PSKModem(M) # M-PSK modem initialization 70 | ``` 71 | 72 | The [following](https://en.wikipedia.org/wiki/File:Conv_code_177_133.png) convolutional code will be used: 73 | 74 | ![](https://upload.wikimedia.org/wikipedia/commons/thumb/b/b3/Conv_code_177_133.png/800px-Conv_code_177_133.png) 75 | 76 | *Shift-register for the (7, [171, 133]) convolutional code polynomial.* 77 | 78 | Convolutional encoder parameters: 79 | 80 | ```python 81 | rate = 1/2 # code rate 82 | L = 7 # constraint length 83 | m = np.array([L-1]) # number of delay elements 84 | generator_matrix = np.array([[0o171, 0o133]]) # generator branches 85 | trellis = cc.Trellis(M, generator_matrix) # Trellis structure 86 | ``` 87 | 88 | Viterbi decoder parameters: 89 | 90 | ```python 91 | tb_depth = 5*(m.sum() + 1) # traceback depth 92 | ``` 93 | 94 | Two oppitions of the Viterbi decoder will be tested: 95 | - *hard* (hard inputs) 96 | - *unquatized* (soft inputs) 97 | 98 | Additionally, uncoded case will be considered. 99 | 100 | Simulation loop: 101 | 102 | ```python 103 | EbNo = 5 # energy per bit to noise power spectral density ratio (in dB) 104 | snrdB = EbNo + 10*np.log10(k*rate) # Signal-to-Noise ratio (in dB) 105 | noiseVar = 10**(-snrdB/10) # noise variance (power) 106 | 107 | N_c = 10 # number of trials 108 | 109 | BER_soft = np.empty((N_c,)) 110 | BER_hard = np.empty((N_c,)) 111 | BER_uncoded = np.empty((N_c,)) 112 | 113 | for cntr in range(N_c): 114 | 115 | message_bits = np.random.randint(0, 2, N) # message 116 | coded_bits = cc.conv_encode(message_bits, trellis) # encoding 117 | 118 | modulated = modem.modulate(coded_bits) # modulation 119 | modulated_uncoded = modem.modulate(message_bits) # modulation (uncoded case) 120 | 121 | Es = np.mean(np.abs(modulated)**2) # symbol energy 122 | No = Es/((10**(EbNo/10))*np.log2(M)) # noise spectrum density 123 | 124 | noisy = modulated + np.sqrt(No/2)*\ 125 | (np.random.randn(modulated.shape[0])+\ 126 | 1j*np.random.randn(modulated.shape[0])) # AWGN 127 | 128 | noisy_uncoded = modulated_uncoded + np.sqrt(No/2)*\ 129 | (np.random.randn(modulated_uncoded.shape[0])+\ 130 | 1j*np.random.randn(modulated_uncoded.shape[0])) # AWGN (uncoded case) 131 | 132 | demodulated_soft = modem.demodulate(noisy, demod_type='soft', noise_var=noiseVar) # demodulation (soft output) 133 | demodulated_hard = modem.demodulate(noisy, demod_type='hard') # demodulation (hard output) 134 | demodulated_uncoded = modem.demodulate(noisy_uncoded, demod_type='hard') # demodulation (uncoded case) 135 | 136 | decoded_soft = cc.viterbi_decode(demodulated_soft, trellis, tb_depth, decoding_type='unquantized') # decoding (soft decision) 137 | decoded_hard = cc.viterbi_decode(demodulated_hard, trellis, tb_depth, decoding_type='hard') # decoding (hard decision) 138 | 139 | 140 | NumErr, BER_soft[cntr] = BER_calc(message_bits, decoded_soft[:-(L-1)]) # bit-error ratio (soft decision) 141 | NumErr, BER_hard[cntr] = BER_calc(message_bits, decoded_hard[:-(L-1)]) # bit-error ratio (hard decision) 142 | NumErr, BER_uncoded[cntr] = BER_calc(message_bits, demodulated_uncoded) # bit-error ratio (uncoded case) 143 | 144 | mean_BER_soft = np.mean(BER_soft) # averaged bit-error ratio (soft decision) 145 | mean_BER_hard = np.mean(BER_hard) # averaged bit-error ratio (hard decision) 146 | mean_BER_uncoded = np.mean(BER_uncoded) # averaged bit-error ratio (uncoded case) 147 | 148 | print("Soft decision:\n"+str(mean_BER_soft)+"\n") 149 | print("Hard decision:\n"+str(mean_BER_hard)+"\n") 150 | print("Uncoded message:\n"+str(mean_BER_uncoded)+"\n") 151 | ``` 152 | 153 | Outputs: 154 | 155 | ```python 156 | >>> Soft decision: 157 | >>> 0.0 158 | >>> 159 | >>> Hard decision: 160 | >>> 3.0000000000000004e-05 161 | >>> 162 | >>> Uncoded message: 163 | >>> 0.008782 164 | ``` 165 | 166 | ### Reference 167 | 168 | [1] Moon, Todd K. "Error correction coding." Mathematical Methods and Algorithms. Jhon Wiley and Son (2005). 169 | -------------------------------------------------------------------------------- /commpy/channelcoding/gfields.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Authors: Veeresh Taranalli 4 | # License: BSD 3-Clause 5 | 6 | """ Galois Fields """ 7 | 8 | from fractions import gcd 9 | from numpy import array, zeros, arange, convolve, ndarray, concatenate 10 | from itertools import * 11 | from commpy.utilities import dec2bitarray, bitarray2dec 12 | 13 | __all__ = ['GF', 'polydivide', 'polymultiply', 'poly_to_string'] 14 | 15 | class GF: 16 | """ 17 | Defines a Binary Galois Field of order m, containing n, 18 | where n can be a single element or a list of elements within the field. 19 | 20 | Parameters 21 | ---------- 22 | n : int 23 | Represents the Galois field element(s). 24 | 25 | m : int 26 | Specifies the order of the Galois Field. 27 | 28 | Returns 29 | ------- 30 | x : int 31 | A Galois Field GF(2\ :sup:`m`) object. 32 | 33 | Examples 34 | -------- 35 | >>> from numpy import arange 36 | >>> from gfields import GF 37 | >>> x = arange(16) 38 | >>> m = 4 39 | >>> x = GF(x, m) 40 | >>> print x.elements 41 | [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] 42 | >>> print x.prim_poly 43 | 19 44 | 45 | """ 46 | 47 | # Initialization 48 | def __init__(self, x, m): 49 | self.m = m 50 | primpoly_array = array([0, 3, 7, 11, 19, 37, 67, 137, 285, 529, 1033, 51 | 2053, 4179, 8219, 17475, 32771, 69643]) 52 | self.prim_poly = primpoly_array[self.m] 53 | if type(x) is int and x >= 0 and x < pow(2, m): 54 | self.elements = array([x]) 55 | elif type(x) is ndarray and len(x) >= 1: 56 | self.elements = x 57 | 58 | # Overloading addition operator for Galois Field 59 | def __add__(self, x): 60 | if len(self.elements) == len(x.elements): 61 | return GF(self.elements ^ x.elements, self.m) 62 | else: 63 | raise ValueError("The arguments should have the same number of elements") 64 | 65 | # Overloading multiplication operator for Galois Field 66 | def __mul__(self, x): 67 | if len(x.elements) == len(self.elements): 68 | prod_elements = arange(len(self.elements)) 69 | for i in range(len(self.elements)): 70 | prod_elements[i] = polymultiply(self.elements[i], x.elements[i], self.m, self.prim_poly) 71 | return GF(prod_elements, self.m) 72 | else: 73 | raise ValueError("Two sets of elements cannot be multiplied") 74 | 75 | def power_to_tuple(self): 76 | """ 77 | Convert Galois field elements from power form to tuple form representation. 78 | """ 79 | y = zeros(len(self.elements)) 80 | for idx, i in enumerate(self.elements): 81 | if 2**i < 2**self.m: 82 | y[idx] = 2**i 83 | else: 84 | y[idx] = polydivide(2**i, self.prim_poly) 85 | return GF(y, self.m) 86 | 87 | def tuple_to_power(self): 88 | """ 89 | Convert Galois field elements from tuple form to power form representation. 90 | """ 91 | y = zeros(len(self.elements)) 92 | for idx, i in enumerate(self.elements): 93 | if i != 0: 94 | init_state = 1 95 | cur_state = 1 96 | power = 0 97 | while cur_state != i: 98 | cur_state = ((cur_state << 1) & (2**self.m-1)) ^ (-((cur_state & 2**(self.m-1)) >> (self.m - 1)) & 99 | (self.prim_poly & (2**self.m-1))) 100 | power+=1 101 | y[idx] = power 102 | else: 103 | y[idx] = 0 104 | return GF(y, self.m) 105 | 106 | def order(self): 107 | """ 108 | Compute the orders of the Galois field elements. 109 | """ 110 | orders = zeros(len(self.elements)) 111 | power_gf = self.tuple_to_power() 112 | for idx, i in enumerate(power_gf.elements): 113 | orders[idx] = (2**self.m - 1)/(gcd(i, 2**self.m-1)) 114 | return orders 115 | 116 | def cosets(self): 117 | """ 118 | Compute the cyclotomic cosets of the Galois field. 119 | """ 120 | coset_list = [] 121 | x = self.tuple_to_power().elements 122 | mark_list = zeros(len(x)) 123 | coset_count = 1 124 | for idx in range(len(x)): 125 | if mark_list[idx] == 0: 126 | a = x[idx] 127 | mark_list[idx] = coset_count 128 | i = 1 129 | while (a*(2**i) % (2**self.m-1)) != a: 130 | for idx2 in range(len(x)): 131 | if (mark_list[idx2] == 0) and (x[idx2] == a*(2**i)%(2**self.m-1)): 132 | mark_list[idx2] = coset_count 133 | i+=1 134 | coset_count+=1 135 | 136 | for counts in range(1, coset_count): 137 | coset_list.append(GF(self.elements[mark_list==counts], self.m)) 138 | 139 | return coset_list 140 | 141 | def minpolys(self): 142 | """ 143 | Compute the minimal polynomials for all elements of the Galois field. 144 | """ 145 | minpol_list = array([]) 146 | full_gf = GF(arange(2**self.m), self.m) 147 | full_cosets = full_gf.cosets() 148 | for x in self.elements: 149 | for i in range(len(full_cosets)): 150 | if x in full_cosets[i].elements: 151 | t = array([1, full_cosets[i].elements[0]])[::-1] 152 | for root in full_cosets[i].elements[1:]: 153 | t2 = concatenate((zeros(len(t)-1), array([1, root]), zeros(len(t)-1))) 154 | prod_poly = array([]) 155 | for n in range(len(t2)-len(t)+1): 156 | root_sum = 0 157 | for k in range(len(t)): 158 | root_sum = root_sum ^ polymultiply(int(t[k]), int(t2[n+k]), self.m, self.prim_poly) 159 | prod_poly = concatenate((prod_poly, array([root_sum]))) 160 | t = prod_poly[::-1] 161 | minpol_list = concatenate((minpol_list, array([bitarray2dec(t[::-1])]))) 162 | 163 | return minpol_list.astype(int) 164 | 165 | # Divide two polynomials and returns the remainder 166 | def polydivide(x, y): 167 | r = y 168 | while len(bin(r)) >= len(bin(y)): 169 | shift_count = len(bin(x)) - len(bin(y)) 170 | if shift_count > 0: 171 | d = y << shift_count 172 | else: 173 | d = y 174 | x = x ^ d 175 | r = x 176 | return r 177 | 178 | def polymultiply(x, y, m, prim_poly): 179 | x_array = dec2bitarray(x, m) 180 | y_array = dec2bitarray(y, m) 181 | prod = bitarray2dec(convolve(x_array, y_array) % 2) 182 | return polydivide(prod, prim_poly) 183 | 184 | 185 | def poly_to_string(x): 186 | 187 | i = 0 188 | polystr = "" 189 | while x != 0: 190 | y = x%2 191 | x = x >> 1 192 | if y == 1: 193 | polystr = polystr + "x^" + str(i) + " + " 194 | i+=1 195 | 196 | return polystr[:-2] 197 | -------------------------------------------------------------------------------- /doc/sphinxext/docscrape_sphinx.py: -------------------------------------------------------------------------------- 1 | import re, inspect, textwrap, pydoc 2 | import sphinx 3 | from docscrape import NumpyDocString, FunctionDoc, ClassDoc 4 | 5 | class SphinxDocString(NumpyDocString): 6 | def __init__(self, docstring, config={}): 7 | self.use_plots = config.get('use_plots', False) 8 | NumpyDocString.__init__(self, docstring, config=config) 9 | 10 | # string conversion routines 11 | def _str_header(self, name, symbol='`'): 12 | return ['.. rubric:: ' + name, ''] 13 | 14 | def _str_field_list(self, name): 15 | return [':' + name + ':'] 16 | 17 | def _str_indent(self, doc, indent=4): 18 | out = [] 19 | for line in doc: 20 | out += [' '*indent + line] 21 | return out 22 | 23 | def _str_signature(self): 24 | return [''] 25 | if self['Signature']: 26 | return ['``%s``' % self['Signature']] + [''] 27 | else: 28 | return [''] 29 | 30 | def _str_summary(self): 31 | return self['Summary'] + [''] 32 | 33 | def _str_extended_summary(self): 34 | return self['Extended Summary'] + [''] 35 | 36 | def _str_param_list(self, name): 37 | out = [] 38 | if self[name]: 39 | out += self._str_field_list(name) 40 | out += [''] 41 | for param,param_type,desc in self[name]: 42 | out += self._str_indent(['**%s** : %s' % (param.strip(), 43 | param_type)]) 44 | out += [''] 45 | out += self._str_indent(desc,8) 46 | out += [''] 47 | return out 48 | 49 | @property 50 | def _obj(self): 51 | if hasattr(self, '_cls'): 52 | return self._cls 53 | elif hasattr(self, '_f'): 54 | return self._f 55 | return None 56 | 57 | def _str_member_list(self, name): 58 | """ 59 | Generate a member listing, autosummary:: table where possible, 60 | and a table where not. 61 | 62 | """ 63 | out = [] 64 | if self[name]: 65 | out += ['.. rubric:: %s' % name, ''] 66 | prefix = getattr(self, '_name', '') 67 | 68 | if prefix: 69 | prefix = '~%s.' % prefix 70 | 71 | autosum = [] 72 | others = [] 73 | for param, param_type, desc in self[name]: 74 | param = param.strip() 75 | if not self._obj or hasattr(self._obj, param): 76 | autosum += [" %s%s" % (prefix, param)] 77 | else: 78 | others.append((param, param_type, desc)) 79 | 80 | if autosum: 81 | out += ['.. autosummary::', ' :toctree:', ''] 82 | out += autosum 83 | 84 | if others: 85 | maxlen_0 = max([len(x[0]) for x in others]) 86 | maxlen_1 = max([len(x[1]) for x in others]) 87 | hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 88 | fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) 89 | n_indent = maxlen_0 + maxlen_1 + 4 90 | out += [hdr] 91 | for param, param_type, desc in others: 92 | out += [fmt % (param.strip(), param_type)] 93 | out += self._str_indent(desc, n_indent) 94 | out += [hdr] 95 | out += [''] 96 | return out 97 | 98 | def _str_section(self, name): 99 | out = [] 100 | if self[name]: 101 | out += self._str_header(name) 102 | out += [''] 103 | content = textwrap.dedent("\n".join(self[name])).split("\n") 104 | out += content 105 | out += [''] 106 | return out 107 | 108 | def _str_see_also(self, func_role): 109 | out = [] 110 | if self['See Also']: 111 | see_also = super(SphinxDocString, self)._str_see_also(func_role) 112 | out = ['.. seealso::', ''] 113 | out += self._str_indent(see_also[2:]) 114 | return out 115 | 116 | def _str_warnings(self): 117 | out = [] 118 | if self['Warnings']: 119 | out = ['.. warning::', ''] 120 | out += self._str_indent(self['Warnings']) 121 | return out 122 | 123 | def _str_index(self): 124 | idx = self['index'] 125 | out = [] 126 | if len(idx) == 0: 127 | return out 128 | 129 | out += ['.. index:: %s' % idx.get('default','')] 130 | for section, references in idx.iteritems(): 131 | if section == 'default': 132 | continue 133 | elif section == 'refguide': 134 | out += [' single: %s' % (', '.join(references))] 135 | else: 136 | out += [' %s: %s' % (section, ','.join(references))] 137 | return out 138 | 139 | def _str_references(self): 140 | out = [] 141 | if self['References']: 142 | out += self._str_header('References') 143 | if isinstance(self['References'], str): 144 | self['References'] = [self['References']] 145 | out.extend(self['References']) 146 | out += [''] 147 | # Latex collects all references to a separate bibliography, 148 | # so we need to insert links to it 149 | if sphinx.__version__ >= "0.6": 150 | out += ['.. only:: latex',''] 151 | else: 152 | out += ['.. latexonly::',''] 153 | items = [] 154 | for line in self['References']: 155 | m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) 156 | if m: 157 | items.append(m.group(1)) 158 | out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] 159 | return out 160 | 161 | def _str_examples(self): 162 | examples_str = "\n".join(self['Examples']) 163 | 164 | if (self.use_plots and 'import matplotlib' in examples_str 165 | and 'plot::' not in examples_str): 166 | out = [] 167 | out += self._str_header('Examples') 168 | out += ['.. plot::', ''] 169 | out += self._str_indent(self['Examples']) 170 | out += [''] 171 | return out 172 | else: 173 | return self._str_section('Examples') 174 | 175 | def __str__(self, indent=0, func_role="obj"): 176 | out = [] 177 | out += self._str_signature() 178 | out += self._str_index() + [''] 179 | out += self._str_summary() 180 | out += self._str_extended_summary() 181 | for param_list in ('Parameters', 'Returns', 'Other Parameters', 182 | 'Raises', 'Warns'): 183 | out += self._str_param_list(param_list) 184 | out += self._str_warnings() 185 | out += self._str_see_also(func_role) 186 | out += self._str_section('Notes') 187 | out += self._str_references() 188 | out += self._str_examples() 189 | for param_list in ('Attributes', 'Methods'): 190 | out += self._str_member_list(param_list) 191 | out = self._str_indent(out,indent) 192 | return '\n'.join(out) 193 | 194 | class SphinxFunctionDoc(SphinxDocString, FunctionDoc): 195 | def __init__(self, obj, doc=None, config={}): 196 | self.use_plots = config.get('use_plots', False) 197 | FunctionDoc.__init__(self, obj, doc=doc, config=config) 198 | 199 | class SphinxClassDoc(SphinxDocString, ClassDoc): 200 | def __init__(self, obj, doc=None, func_doc=None, config={}): 201 | self.use_plots = config.get('use_plots', False) 202 | ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) 203 | 204 | class SphinxObjDoc(SphinxDocString): 205 | def __init__(self, obj, doc=None, config={}): 206 | self._f = obj 207 | SphinxDocString.__init__(self, doc, config=config) 208 | 209 | def get_doc_object(obj, what=None, doc=None, config={}): 210 | if what is None: 211 | if inspect.isclass(obj): 212 | what = 'class' 213 | elif inspect.ismodule(obj): 214 | what = 'module' 215 | elif callable(obj): 216 | what = 'function' 217 | else: 218 | what = 'object' 219 | if what == 'class': 220 | return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, 221 | config=config) 222 | elif what in ('function', 'method'): 223 | return SphinxFunctionDoc(obj, doc=doc, config=config) 224 | else: 225 | if doc is None: 226 | doc = pydoc.getdoc(obj) 227 | return SphinxObjDoc(obj, doc, config=config) 228 | -------------------------------------------------------------------------------- /commpy/channelcoding/ldpc.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Authors: Veeresh Taranalli 4 | # License: BSD 3-Clause 5 | 6 | """ LDPC Codes """ 7 | import numpy as np 8 | 9 | __all__ = ['get_ldpc_code_params, ldpc_bp_decode'] 10 | 11 | MAX_POS_LLR = 38.0 12 | MIN_NEG_LLR = -38.0 13 | 14 | def get_ldpc_code_params(ldpc_design_filename): 15 | """ 16 | Extract parameters from LDPC code design file. 17 | 18 | Parameters 19 | ---------- 20 | ldpc_design_filename : string 21 | Filename of the LDPC code design file. 22 | 23 | Returns 24 | ------- 25 | ldpc_code_params : dictionary 26 | Parameters of the LDPC code. 27 | """ 28 | 29 | ldpc_design_file = open(ldpc_design_filename) 30 | 31 | ldpc_code_params = {} 32 | 33 | [n_vnodes, n_cnodes] = [int(x) for x in ldpc_design_file.readline().split(' ')] 34 | [max_vnode_deg, max_cnode_deg] = [int(x) for x in ldpc_design_file.readline().split(' ')] 35 | vnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32) 36 | cnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32) 37 | 38 | cnode_adj_list = -np.ones([n_cnodes, max_cnode_deg], int) 39 | vnode_adj_list = -np.ones([n_vnodes, max_vnode_deg], int) 40 | 41 | for vnode_idx in range(n_vnodes): 42 | vnode_adj_list[vnode_idx, 0:vnode_deg_list[vnode_idx]] = \ 43 | np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')]) 44 | 45 | for cnode_idx in range(n_cnodes): 46 | cnode_adj_list[cnode_idx, 0:cnode_deg_list[cnode_idx]] = \ 47 | np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')]) 48 | 49 | cnode_vnode_map = -np.ones([n_cnodes, max_cnode_deg], int) 50 | vnode_cnode_map = -np.ones([n_vnodes, max_vnode_deg], int) 51 | cnode_list = np.arange(n_cnodes) 52 | vnode_list = np.arange(n_vnodes) 53 | 54 | for cnode in range(n_cnodes): 55 | for i, vnode in enumerate(cnode_adj_list[cnode, 0:cnode_deg_list[cnode]]): 56 | cnode_vnode_map[cnode, i] = cnode_list[np.where(vnode_adj_list[vnode, :] == cnode)] 57 | 58 | for vnode in range(n_vnodes): 59 | for i, cnode in enumerate(vnode_adj_list[vnode, 0:vnode_deg_list[vnode]]): 60 | vnode_cnode_map[vnode, i] = vnode_list[np.where(cnode_adj_list[cnode, :] == vnode)] 61 | 62 | 63 | cnode_adj_list_1d = cnode_adj_list.flatten().astype(np.int32) 64 | vnode_adj_list_1d = vnode_adj_list.flatten().astype(np.int32) 65 | cnode_vnode_map_1d = cnode_vnode_map.flatten().astype(np.int32) 66 | vnode_cnode_map_1d = vnode_cnode_map.flatten().astype(np.int32) 67 | 68 | pmat = np.zeros([n_cnodes, n_vnodes], int) 69 | for cnode_idx in range(n_cnodes): 70 | pmat[cnode_idx, cnode_adj_list[cnode_idx, :]] = 1 71 | 72 | ldpc_code_params['n_vnodes'] = n_vnodes 73 | ldpc_code_params['n_cnodes'] = n_cnodes 74 | ldpc_code_params['max_cnode_deg'] = max_cnode_deg 75 | ldpc_code_params['max_vnode_deg'] = max_vnode_deg 76 | ldpc_code_params['cnode_adj_list'] = cnode_adj_list_1d 77 | ldpc_code_params['cnode_vnode_map'] = cnode_vnode_map_1d 78 | ldpc_code_params['vnode_adj_list'] = vnode_adj_list_1d 79 | ldpc_code_params['vnode_cnode_map'] = vnode_cnode_map_1d 80 | ldpc_code_params['cnode_deg_list'] = cnode_deg_list 81 | ldpc_code_params['vnode_deg_list'] = vnode_deg_list 82 | 83 | ldpc_design_file.close() 84 | 85 | return ldpc_code_params 86 | 87 | def _limit_llr(in_llr): 88 | 89 | out_llr = in_llr 90 | 91 | if in_llr > MAX_POS_LLR: 92 | out_llr = MAX_POS_LLR 93 | 94 | if in_llr < MIN_NEG_LLR: 95 | out_llr = MIN_NEG_LLR 96 | 97 | return out_llr 98 | 99 | def sum_product_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs, 100 | vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg): 101 | 102 | start_idx = cnode_idx*max_cnode_deg 103 | offset = cnode_deg_list[cnode_idx] 104 | vnode_list = cnode_adj_list[start_idx:start_idx+offset] 105 | vnode_list_msgs_tanh = np.tanh(vnode_msgs[vnode_list*max_vnode_deg + 106 | cnode_vnode_map[start_idx:start_idx+offset]]/2.0) 107 | msg_prod = np.prod(vnode_list_msgs_tanh) 108 | 109 | # Compute messages on outgoing edges using the incoming message product 110 | cnode_msgs[start_idx:start_idx+offset]= 2.0*np.arctanh(msg_prod/vnode_list_msgs_tanh) 111 | 112 | 113 | def min_sum_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs, 114 | vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg): 115 | 116 | start_idx = cnode_idx*max_cnode_deg 117 | offset = cnode_deg_list[cnode_idx] 118 | vnode_list = cnode_adj_list[start_idx:start_idx+offset] 119 | vnode_list_msgs = vnode_msgs[vnode_list*max_vnode_deg + 120 | cnode_vnode_map[start_idx:start_idx+offset]] 121 | vnode_list_msgs = np.ma.array(vnode_list_msgs, mask=False) 122 | 123 | # Compute messages on outgoing edges using the incoming messages 124 | for i in range(start_idx, start_idx+offset): 125 | vnode_list_msgs.mask[i-start_idx] = True 126 | cnode_msgs[i] = np.prod(np.sign(vnode_list_msgs))*np.min(np.abs(vnode_list_msgs)) 127 | vnode_list_msgs.mask[i-start_idx] = False 128 | #print cnode_msgs[i] 129 | 130 | def ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters): 131 | """ 132 | LDPC Decoder using Belief Propagation (BP). 133 | 134 | Parameters 135 | ---------- 136 | llr_vec : 1D array of float 137 | Received codeword LLR values from the channel. 138 | 139 | ldpc_code_params : dictionary 140 | Parameters of the LDPC code. 141 | 142 | decoder_algorithm: string 143 | Specify the decoder algorithm type. 144 | SPA for Sum-Product Algorithm 145 | MSA for Min-Sum Algorithm 146 | 147 | n_iters : int 148 | Max. number of iterations of decoding to be done. 149 | 150 | Returns 151 | ------- 152 | dec_word : 1D array of 0's and 1's 153 | The codeword after decoding. 154 | 155 | out_llrs : 1D array of float 156 | LLR values corresponding to the decoded output. 157 | """ 158 | 159 | n_cnodes = ldpc_code_params['n_cnodes'] 160 | n_vnodes = ldpc_code_params['n_vnodes'] 161 | max_cnode_deg = ldpc_code_params['max_cnode_deg'] 162 | max_vnode_deg = ldpc_code_params['max_vnode_deg'] 163 | cnode_adj_list = ldpc_code_params['cnode_adj_list'] 164 | cnode_vnode_map = ldpc_code_params['cnode_vnode_map'] 165 | vnode_adj_list = ldpc_code_params['vnode_adj_list'] 166 | vnode_cnode_map = ldpc_code_params['vnode_cnode_map'] 167 | cnode_deg_list = ldpc_code_params['cnode_deg_list'] 168 | vnode_deg_list = ldpc_code_params['vnode_deg_list'] 169 | 170 | dec_word = np.zeros(n_vnodes, int) 171 | out_llrs = np.zeros(n_vnodes, int) 172 | 173 | cnode_msgs = np.zeros(n_cnodes*max_cnode_deg) 174 | vnode_msgs = np.zeros(n_vnodes*max_vnode_deg) 175 | 176 | _limit_llr_v = np.vectorize(_limit_llr) 177 | 178 | if decoder_algorithm == 'SPA': 179 | check_node_update = sum_product_update 180 | elif decoder_algorithm == 'MSA': 181 | check_node_update = min_sum_update 182 | else: 183 | raise NameError('Please input a valid decoder_algorithm string.') 184 | 185 | # Initialize vnode messages with the LLR values received 186 | for vnode_idx in range(n_vnodes): 187 | start_idx = vnode_idx*max_vnode_deg 188 | offset = vnode_deg_list[vnode_idx] 189 | vnode_msgs[start_idx : start_idx+offset] = llr_vec[vnode_idx] 190 | 191 | # Main loop of Belief Propagation (BP) decoding iterations 192 | for iter_cnt in range(n_iters): 193 | 194 | continue_flag = 0 195 | 196 | # Check Node Update 197 | for cnode_idx in range(n_cnodes): 198 | 199 | check_node_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs, 200 | vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg) 201 | 202 | # Variable Node Update 203 | for vnode_idx in range(n_vnodes): 204 | 205 | # Compute sum of all incoming messages at the variable node 206 | start_idx = vnode_idx*max_vnode_deg 207 | offset = vnode_deg_list[vnode_idx] 208 | cnode_list = vnode_adj_list[start_idx:start_idx+offset] 209 | cnode_list_msgs = cnode_msgs[cnode_list*max_cnode_deg + vnode_cnode_map[start_idx:start_idx+offset]] 210 | msg_sum = np.sum(cnode_list_msgs) 211 | 212 | # Compute messages on outgoing edges using the incoming message sum 213 | vnode_msgs[start_idx:start_idx+offset] = _limit_llr_v(llr_vec[vnode_idx] + msg_sum - 214 | cnode_list_msgs) 215 | 216 | # Update output LLRs and decoded word 217 | out_llrs[vnode_idx] = llr_vec[vnode_idx] + msg_sum 218 | if out_llrs[vnode_idx] > 0: 219 | dec_word[vnode_idx] = 0 220 | else: 221 | dec_word[vnode_idx] = 1 222 | 223 | # Compute if early termination using parity check matrix 224 | for cnode_idx in range(n_cnodes): 225 | p_sum = 0 226 | for i in range(cnode_deg_list[cnode_idx]): 227 | p_sum ^= dec_word[cnode_adj_list[cnode_idx*max_cnode_deg + i]] 228 | 229 | if p_sum != 0: 230 | continue_flag = 1 231 | break 232 | 233 | # Stop iterations 234 | if continue_flag == 0: 235 | break 236 | 237 | return dec_word, out_llrs 238 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # CommPy documentation build configuration file, created by 4 | # sphinx-quickstart on Sun Jan 29 23:37:16 2012. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import os 15 | import sys 16 | 17 | import sphinx 18 | 19 | #from mock import Mock as MagicMock 20 | 21 | #class Mock(object): 22 | # def __init__(self, *args, **kwargs): 23 | # pass 24 | 25 | # def __call__(self, *args, **kwargs): 26 | # return Mock() 27 | 28 | # @classmethod 29 | # def __getattr__(self, name): 30 | # if name in ('__file__', '__path__'): 31 | # return '/dev/null' 32 | # elif name[0] == name[0].upper(): 33 | # return type(name, (), {}) 34 | # else: 35 | # return Mock() 36 | 37 | # class Mock(MagicMock): 38 | # @classmethod 39 | # def __getattr__(cls, name): 40 | # return Mock() 41 | 42 | 43 | # MOCK_MODULES = ['numpy', 'numpy.fft', 'scipy', 'matplotlib', 'matplotlib.pyplot', 44 | # 'matplotlib.collections', 'matplotlib.patches', 45 | # 'numpy.random'] 46 | # for mod_name in MOCK_MODULES: 47 | # #sys.modules[mod_name] = Mock() 48 | # #sys.modules[mod_name] = mock.Mock() 49 | # sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) 50 | 51 | # If extensions (or modules to document with autodoc) are in another directory, 52 | # add these directories to sys.path here. If the directory is relative to the 53 | # documentation root, use os.path.abspath to make it absolute, like shown here. 54 | #sys.path.insert(0, os.path.abspath('/home/veeresht//Development/commpy-dev/commpy-dev-py27/commpy/commpy')) 55 | sys.path.insert(0, os.path.abspath('sphinxext')) 56 | # -- General configuration ----------------------------------------------------- 57 | 58 | # If your documentation needs a minimal Sphinx version, state it here. 59 | #needs_sphinx = '1.0' 60 | 61 | # Add any Sphinx extension module names here, as strings. They can be extensions 62 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 63 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 64 | 'sphinx.ext.todo', 'sphinx.ext.coverage', 65 | 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon'] 66 | 67 | # Add any paths that contain templates here, relative to this directory. 68 | templates_path = ['templates'] 69 | 70 | # The suffix of source filenames. 71 | source_suffix = '.rst' 72 | 73 | # The encoding of source files. 74 | #source_encoding = 'utf-8-sig' 75 | 76 | # The master toctree document. 77 | master_doc = 'index' 78 | 79 | # General information about the project. 80 | project = u'CommPy' 81 | copyright = u'2012 - 2019, CommPy' 82 | 83 | # The version info for the project you're documenting, acts as replacement for 84 | # |version| and |release|, also used in various other places throughout the 85 | # built documents. 86 | # 87 | # The short X.Y version. 88 | version = '0.3' 89 | # The full version, including alpha/beta/rc tags. 90 | release = '0.3.0' 91 | 92 | # The language for content autogenerated by Sphinx. Refer to documentation 93 | # for a list of supported languages. 94 | #language = None 95 | 96 | # There are two options for replacing |today|: either, you set today to some 97 | # non-false value, then it is used: 98 | #today = '' 99 | # Else, today_fmt is used as the format for a strftime call. 100 | #today_fmt = '%B %d, %Y' 101 | 102 | # List of patterns, relative to source directory, that match files and 103 | # directories to ignore when looking for source files. 104 | exclude_patterns = ['_build'] 105 | 106 | # The reST default role (used for this markup: `text`) to use for all documents. 107 | default_role = "autolink" 108 | 109 | # If true, '()' will be appended to :func: etc. cross-reference text. 110 | add_function_parentheses = False 111 | 112 | # If true, the current module name will be prepended to all description 113 | # unit titles (such as .. function::). 114 | add_module_names = False 115 | 116 | # If true, sectionauthor and moduleauthor directives will be shown in the 117 | # output. They are ignored by default. 118 | #show_authors = False 119 | 120 | # The name of the Pygments (syntax highlighting) style to use. 121 | pygments_style = 'sphinx' 122 | 123 | # A list of ignored prefixes for module index sorting. 124 | #modindex_common_prefix = [] 125 | 126 | 127 | # -- Options for HTML output --------------------------------------------------- 128 | 129 | # The theme to use for HTML and HTML Help pages. See the documentation for 130 | # a list of builtin themes. 131 | # html_theme = 'classic' 132 | # html_theme_options = { 133 | # "sidebarwidth": 300 134 | # } 135 | 136 | numpydoc_show_class_members = False 137 | # Theme options are theme-specific and customize the look and feel of a theme 138 | # further. For a list of options available for each theme, see the 139 | # documentation. 140 | #html_theme_options = {} 141 | 142 | # Add any paths that contain custom themes here, relative to this directory. 143 | #html_theme_path = [] 144 | 145 | # The name for this set of Sphinx documents. If None, it defaults to 146 | # " v documentation". 147 | #html_title = None 148 | 149 | # A shorter title for the navigation bar. Default is the same as html_title. 150 | #html_short_title = None 151 | 152 | # The name of an image file (relative to this directory) to place at the top 153 | # of the sidebar. 154 | #html_logo = None 155 | 156 | # The name of an image file (within the static path) to use as favicon of the 157 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 158 | # pixels large. 159 | #html_favicon = None 160 | 161 | # Add any paths that contain custom static files (such as style sheets) here, 162 | # relative to this directory. They are copied after the builtin static files, 163 | # so a file named "default.css" will overwrite the builtin "default.css". 164 | html_static_path = ['static'] 165 | 166 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 167 | # using the given strftime format. 168 | #html_last_updated_fmt = '%b %d, %Y' 169 | 170 | # If true, SmartyPants will be used to convert quotes and dashes to 171 | # typographically correct entities. 172 | #html_use_smartypants = True 173 | 174 | # Custom sidebar templates, maps document names to template names. 175 | #html_sidebars = {} 176 | 177 | # Additional templates that should be rendered to pages, maps page names to 178 | # template names. 179 | #html_additional_pages = {} 180 | 181 | # If false, no module index is generated. 182 | #html_domain_indices = True 183 | 184 | # If false, no index is generated. 185 | #html_use_index = True 186 | 187 | # If true, the index is split into individual pages for each letter. 188 | #html_split_index = False 189 | 190 | # If true, links to the reST sources are added to the pages. 191 | html_show_sourcelink = False 192 | 193 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 194 | #html_show_sphinx = True 195 | 196 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 197 | html_show_copyright = True 198 | 199 | # If true, an OpenSearch description file will be output, and all pages will 200 | # contain a tag referring to it. The value of this option must be the 201 | # base URL from which the finished HTML is served. 202 | #html_use_opensearch = '' 203 | 204 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 205 | #html_file_suffix = None 206 | 207 | # Output file base name for HTML help builder. 208 | htmlhelp_basename = 'CommPydoc' 209 | 210 | 211 | # -- Options for LaTeX output -------------------------------------------------- 212 | 213 | latex_elements = { 214 | # The paper size ('letterpaper' or 'a4paper'). 215 | #'papersize': 'letterpaper', 216 | 217 | # The font size ('10pt', '11pt' or '12pt'). 218 | #'pointsize': '10pt', 219 | 220 | # Additional stuff for the LaTeX preamble. 221 | #'preamble': '', 222 | } 223 | 224 | # Grouping the document tree into LaTeX files. List of tuples 225 | # (source start file, target name, title, author, documentclass [howto/manual]). 226 | latex_documents = [ 227 | ('index', 'CommPy.tex', u'CommPy Documentation', 228 | u'Veeresh Taranalli', 'manual'), 229 | ] 230 | 231 | # The name of an image file (relative to this directory) to place at the top of 232 | # the title page. 233 | #latex_logo = None 234 | 235 | # For "manual" documents, if this is true, then toplevel headings are parts, 236 | # not chapters. 237 | #latex_use_parts = False 238 | 239 | # If true, show page references after internal links. 240 | #latex_show_pagerefs = False 241 | 242 | # If true, show URL addresses after external links. 243 | #latex_show_urls = False 244 | 245 | # Documents to append as an appendix to all manuals. 246 | #latex_appendices = [] 247 | 248 | # If false, no module index is generated. 249 | #latex_domain_indices = True 250 | 251 | 252 | # -- Options for manual page output -------------------------------------------- 253 | 254 | # One entry per manual page. List of tuples 255 | # (source start file, name, description, authors, manual section). 256 | man_pages = [ 257 | ('index', 'commpy', u'CommPy Documentation', 258 | [u'Veeresh Taranalli'], 1) 259 | ] 260 | 261 | # If true, show URL addresses after external links. 262 | #man_show_urls = False 263 | 264 | 265 | # -- Options for Texinfo output ------------------------------------------------ 266 | 267 | # Grouping the document tree into Texinfo files. List of tuples 268 | # (source start file, target name, title, author, 269 | # dir menu entry, description, category) 270 | texinfo_documents = [ 271 | ('index', 'CommPy', u'CommPy Documentation', 272 | u'Veeresh Taranalli', 'CommPy', 'One line description of project.', 273 | 'Miscellaneous'), 274 | ] 275 | 276 | # Documents to append as an appendix to all manuals. 277 | #texinfo_appendices = [] 278 | 279 | # If false, no module index is generated. 280 | #texinfo_domain_indices = True 281 | 282 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 283 | #texinfo_show_urls = 'footnote' 284 | 285 | # ----------------------------------------------------------------------------- 286 | # Autosummary 287 | # ----------------------------------------------------------------------------- 288 | 289 | if sphinx.__version__ >= "0.7": 290 | import glob 291 | autosummary_generate = glob.glob("*.rst") 292 | -------------------------------------------------------------------------------- /commpy/channelcoding/turbo.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Authors: Veeresh Taranalli 4 | # License: BSD 3-Clause 5 | 6 | """ Turbo Codes """ 7 | 8 | from numpy import array, append, zeros, exp, pi, log, empty 9 | from commpy.channelcoding import Trellis, conv_encode 10 | from commpy.utilities import dec2bitarray, bitarray2dec 11 | #from commpy.channelcoding.map_c import backward_recursion, forward_recursion_decoding 12 | 13 | def turbo_encode(msg_bits, trellis1, trellis2, interleaver): 14 | """ Turbo Encoder. 15 | 16 | Encode Bits using a parallel concatenated rate-1/3 17 | turbo code consisting of two rate-1/2 systematic 18 | convolutional component codes. 19 | 20 | Parameters 21 | ---------- 22 | msg_bits : 1D ndarray containing {0, 1} 23 | Stream of bits to be turbo encoded. 24 | 25 | trellis1 : Trellis object 26 | Trellis representation of the 27 | first code in the parallel concatenation. 28 | 29 | trellis2 : Trellis object 30 | Trellis representation of the 31 | second code in the parallel concatenation. 32 | 33 | interleaver : Interleaver object 34 | Interleaver used in the turbo code. 35 | 36 | Returns 37 | ------- 38 | [sys_stream, non_sys_stream1, non_sys_stream2] : list of 1D ndarrays 39 | Encoded bit streams corresponding 40 | to the systematic output 41 | 42 | and the two non-systematic 43 | outputs from the two component codes. 44 | """ 45 | 46 | stream = conv_encode(msg_bits, trellis1, 'rsc') 47 | sys_stream = stream[::2] 48 | non_sys_stream_1 = stream[1::2] 49 | 50 | interlv_msg_bits = interleaver.interlv(sys_stream) 51 | puncture_matrix = array([[0, 1]]) 52 | non_sys_stream_2 = conv_encode(interlv_msg_bits, trellis2, 'rsc', puncture_matrix) 53 | 54 | sys_stream = sys_stream[0:-trellis1.total_memory] 55 | non_sys_stream_1 = non_sys_stream_1[0:-trellis1.total_memory] 56 | non_sys_stream_2 = non_sys_stream_2[0:-trellis2.total_memory] 57 | 58 | return [sys_stream, non_sys_stream_1, non_sys_stream_2] 59 | 60 | 61 | def _compute_branch_prob(code_bit_0, code_bit_1, rx_symbol_0, rx_symbol_1, 62 | noise_variance): 63 | 64 | #cdef np.float64_t code_symbol_0, code_symbol_1, branch_prob, x, y 65 | 66 | code_symbol_0 = 2*code_bit_0 - 1 67 | code_symbol_1 = 2*code_bit_1 - 1 68 | 69 | x = rx_symbol_0 - code_symbol_0 70 | y = rx_symbol_1 - code_symbol_1 71 | 72 | # Normalized branch transition probability 73 | branch_prob = exp(-(x*x + y*y)/(2*noise_variance)) 74 | 75 | return branch_prob 76 | 77 | def _backward_recursion(trellis, msg_length, noise_variance, 78 | sys_symbols, non_sys_symbols, branch_probs, 79 | priors, b_state_metrics): 80 | 81 | n = trellis.n 82 | number_states = trellis.number_states 83 | number_inputs = trellis.number_inputs 84 | 85 | codeword_array = empty(n, 'int') 86 | next_state_table = trellis.next_state_table 87 | output_table = trellis.output_table 88 | 89 | # Backward recursion 90 | for reverse_time_index in reversed(range(1, msg_length+1)): 91 | 92 | for current_state in range(number_states): 93 | for current_input in range(number_inputs): 94 | next_state = next_state_table[current_state, current_input] 95 | code_symbol = output_table[current_state, current_input] 96 | codeword_array = dec2bitarray(code_symbol, n) 97 | parity_bit = codeword_array[1] 98 | msg_bit = codeword_array[0] 99 | rx_symbol_0 = sys_symbols[reverse_time_index-1] 100 | rx_symbol_1 = non_sys_symbols[reverse_time_index-1] 101 | branch_prob = _compute_branch_prob(msg_bit, parity_bit, 102 | rx_symbol_0, rx_symbol_1, 103 | noise_variance) 104 | branch_probs[current_input, current_state, reverse_time_index-1] = branch_prob 105 | b_state_metrics[current_state, reverse_time_index-1] += \ 106 | (b_state_metrics[next_state, reverse_time_index] * branch_prob * 107 | priors[current_input, reverse_time_index-1]) 108 | 109 | b_state_metrics[:,reverse_time_index-1] /= \ 110 | b_state_metrics[:,reverse_time_index-1].sum() 111 | 112 | 113 | def _forward_recursion_decoding(trellis, mode, msg_length, noise_variance, 114 | sys_symbols, non_sys_symbols, b_state_metrics, 115 | f_state_metrics, branch_probs, app, L_int, 116 | priors, L_ext, decoded_bits): 117 | 118 | n = trellis.n 119 | number_states = trellis.number_states 120 | number_inputs = trellis.number_inputs 121 | 122 | codeword_array = empty(n, 'int') 123 | next_state_table = trellis.next_state_table 124 | output_table = trellis.output_table 125 | 126 | # Forward Recursion 127 | for time_index in range(1, msg_length+1): 128 | 129 | app[:] = 0 130 | for current_state in range(number_states): 131 | for current_input in range(number_inputs): 132 | next_state = next_state_table[current_state, current_input] 133 | branch_prob = branch_probs[current_input, current_state, time_index-1] 134 | # Compute the forward state metrics 135 | f_state_metrics[next_state, 1] += (f_state_metrics[current_state, 0] * 136 | branch_prob * 137 | priors[current_input, time_index-1]) 138 | 139 | # Compute APP 140 | app[current_input] += (f_state_metrics[current_state, 0] * 141 | branch_prob * 142 | b_state_metrics[next_state, time_index]) 143 | 144 | lappr = L_int[time_index-1] + log(app[1]/app[0]) 145 | L_ext[time_index-1] = lappr 146 | 147 | if mode == 'decode': 148 | if lappr > 0: 149 | decoded_bits[time_index-1] = 1 150 | else: 151 | decoded_bits[time_index-1] = 0 152 | 153 | # Normalization of the forward state metrics 154 | f_state_metrics[:,1] = f_state_metrics[:,1]/f_state_metrics[:,1].sum() 155 | 156 | f_state_metrics[:,0] = f_state_metrics[:,1] 157 | f_state_metrics[:,1] = 0.0 158 | 159 | 160 | 161 | 162 | def map_decode(sys_symbols, non_sys_symbols, trellis, noise_variance, L_int, mode='decode'): 163 | """ Maximum a-posteriori probability (MAP) decoder. 164 | 165 | Decodes a stream of convolutionally encoded 166 | (rate 1/2) bits using the MAP algorithm. 167 | 168 | Parameters 169 | ---------- 170 | sys_symbols : 1D ndarray 171 | Received symbols corresponding to 172 | the systematic (first output) bits in 173 | the codeword. 174 | 175 | non_sys_symbols : 1D ndarray 176 | Received symbols corresponding to the non-systematic 177 | (second output) bits in the codeword. 178 | 179 | trellis : Trellis object 180 | Trellis representation of the convolutional code. 181 | 182 | noise_variance : float 183 | Variance (power) of the AWGN channel. 184 | 185 | L_int : 1D ndarray 186 | Array representing the initial intrinsic 187 | information for all received 188 | symbols. 189 | 190 | Typically all zeros, 191 | corresponding to equal prior 192 | probabilities of bits 0 and 1. 193 | 194 | mode : str{'decode', 'compute'}, optional 195 | The mode in which the MAP decoder is used. 196 | 'decode' mode returns the decoded bits 197 | 198 | along with the extrinsic information. 199 | 'compute' mode returns only the 200 | extrinsic information. 201 | 202 | Returns 203 | ------- 204 | [L_ext, decoded_bits] : list of two 1D ndarrays 205 | The first element of the list is the extrinsic information. 206 | The second element of the list is the decoded bits. 207 | 208 | """ 209 | 210 | k = trellis.k 211 | n = trellis.n 212 | rate = float(k)/n 213 | number_states = trellis.number_states 214 | number_inputs = trellis.number_inputs 215 | 216 | msg_length = len(sys_symbols) 217 | 218 | # Initialize forward state metrics (alpha) 219 | f_state_metrics = zeros([number_states, 2]) 220 | f_state_metrics[0][0] = 1 221 | #print f_state_metrics 222 | 223 | # Initialize backward state metrics (beta) 224 | b_state_metrics = zeros([number_states, msg_length+1]) 225 | b_state_metrics[:,msg_length] = 1 226 | 227 | # Initialize branch transition probabilities (gamma) 228 | branch_probs = zeros([number_inputs, number_states, msg_length+1]) 229 | 230 | app = zeros(number_inputs) 231 | 232 | lappr = 0 233 | 234 | decoded_bits = zeros(msg_length, 'int') 235 | L_ext = zeros(msg_length) 236 | 237 | priors = empty([2, msg_length]) 238 | priors[0,:] = 1/(1 + exp(L_int)) 239 | priors[1,:] = 1 - priors[0,:] 240 | 241 | # Backward recursion 242 | _backward_recursion(trellis, msg_length, noise_variance, sys_symbols, 243 | non_sys_symbols, branch_probs, priors, b_state_metrics) 244 | 245 | # Forward recursion 246 | _forward_recursion_decoding(trellis, mode, msg_length, noise_variance, sys_symbols, 247 | non_sys_symbols, b_state_metrics, f_state_metrics, 248 | branch_probs, app, L_int, priors, L_ext, decoded_bits) 249 | 250 | return [L_ext, decoded_bits] 251 | 252 | 253 | def turbo_decode(sys_symbols, non_sys_symbols_1, non_sys_symbols_2, trellis, 254 | noise_variance, number_iterations, interleaver, L_int = None): 255 | """ Turbo Decoder. 256 | 257 | Decodes a stream of convolutionally encoded 258 | (rate 1/3) bits using the BCJR algorithm. 259 | 260 | Parameters 261 | ---------- 262 | sys_symbols : 1D ndarray 263 | Received symbols corresponding to 264 | the systematic (first output) bits in the codeword. 265 | 266 | non_sys_symbols_1 : 1D ndarray 267 | Received symbols corresponding to 268 | the first parity bits in the codeword. 269 | 270 | non_sys_symbols_2 : 1D ndarray 271 | Received symbols corresponding to the 272 | second parity bits in the codeword. 273 | 274 | trellis : Trellis object 275 | Trellis representation of the convolutional codes 276 | used in the Turbo code. 277 | 278 | noise_variance : float 279 | Variance (power) of the AWGN channel. 280 | 281 | number_iterations : int 282 | Number of the iterations of the 283 | BCJR algorithm used in turbo decoding. 284 | 285 | interleaver : Interleaver object. 286 | Interleaver used in the turbo code. 287 | 288 | L_int : 1D ndarray 289 | Array representing the initial intrinsic 290 | information for all received 291 | symbols. 292 | 293 | Typically all zeros, 294 | corresponding to equal prior 295 | probabilities of bits 0 and 1. 296 | 297 | Returns 298 | ------- 299 | decoded_bits : 1D ndarray of ints containing {0, 1} 300 | Decoded bit stream. 301 | 302 | """ 303 | if L_int is None: 304 | L_int = zeros(len(sys_symbols)) 305 | 306 | L_int_1 = L_int 307 | 308 | # Interleave systematic symbols for input to second decoder 309 | sys_symbols_i = interleaver.interlv(sys_symbols) 310 | 311 | for iteration_count in range(number_iterations): 312 | 313 | # MAP Decoder - 1 314 | [L_ext_1, decoded_bits] = map_decode(sys_symbols, non_sys_symbols_1, 315 | trellis, noise_variance, L_int_1, 'compute') 316 | 317 | L_ext_1 = L_ext_1 - L_int_1 318 | L_int_2 = interleaver.interlv(L_ext_1) 319 | if iteration_count == number_iterations - 1: 320 | mode = 'decode' 321 | else: 322 | mode = 'compute' 323 | 324 | # MAP Decoder - 2 325 | [L_2, decoded_bits] = map_decode(sys_symbols_i, non_sys_symbols_2, 326 | trellis, noise_variance, L_int_2, mode) 327 | L_ext_2 = L_2 - L_int_2 328 | L_int_1 = interleaver.deinterlv(L_ext_2) 329 | 330 | decoded_bits = interleaver.deinterlv(decoded_bits) 331 | 332 | return decoded_bits 333 | -------------------------------------------------------------------------------- /commpy/modulation.py: -------------------------------------------------------------------------------- 1 | # Authors: Veeresh Taranalli & Bastien Trotobas 2 | # License: BSD 3-Clause 3 | 4 | """ 5 | ================================================== 6 | Modulation Demodulation (:mod:`commpy.modulation`) 7 | ================================================== 8 | 9 | .. autosummary:: 10 | :toctree: generated/ 11 | 12 | PSKModem -- Phase Shift Keying (PSK) Modem. 13 | QAMModem -- Quadrature Amplitude Modulation (QAM) Modem. 14 | ofdm_tx -- OFDM Transmit Signal Generation 15 | ofdm_rx -- OFDM Receive Signal Processing 16 | mimo_ml -- MIMO Maximum Likelihood (ML) Detection. 17 | kbest -- MIMO K-best Schnorr-Euchner Detection. 18 | bit_lvl_repr -- Bit level representation 19 | 20 | """ 21 | from itertools import product 22 | 23 | import matplotlib.pyplot as plt 24 | from numpy import arange, array, zeros, pi, cos, sin, sqrt, log2, argmin, \ 25 | hstack, repeat, tile, dot, shape, concatenate, exp, \ 26 | log, vectorize, empty, eye, kron 27 | from numpy.fft import fft, ifft 28 | from numpy.linalg import qr, norm 29 | 30 | from commpy.utilities import bitarray2dec, dec2bitarray 31 | 32 | __all__ = ['PSKModem', 'QAMModem', 'ofdm_tx', 'ofdm_rx', 'mimo_ml', 'kbest', 'bit_lvl_repr'] 33 | 34 | 35 | class Modem: 36 | def modulate(self, input_bits): 37 | """ Modulate (map) an array of bits to constellation symbols. 38 | 39 | Parameters 40 | ---------- 41 | input_bits : 1D ndarray of ints 42 | Inputs bits to be modulated (mapped). 43 | 44 | Returns 45 | ------- 46 | baseband_symbols : 1D ndarray of complex floats 47 | Modulated complex symbols. 48 | 49 | """ 50 | mapfunc = vectorize(lambda i: 51 | self.constellation[bitarray2dec(input_bits[i:i + self.num_bits_symbol])]) 52 | 53 | baseband_symbols = mapfunc(arange(0, len(input_bits), self.num_bits_symbol)) 54 | 55 | return baseband_symbols 56 | 57 | def demodulate(self, input_symbols, demod_type, noise_var=0): 58 | """ Demodulate (map) a set of constellation symbols to corresponding bits. 59 | 60 | Parameters 61 | ---------- 62 | input_symbols : 1D ndarray of complex floats 63 | Input symbols to be demodulated. 64 | 65 | demod_type : string 66 | 'hard' for hard decision output (bits) 67 | 'soft' for soft decision output (LLRs) 68 | 69 | noise_var : float 70 | AWGN variance. Needs to be specified only if demod_type is 'soft' 71 | 72 | Returns 73 | ------- 74 | demod_bits : 1D ndarray of ints 75 | Corresponding demodulated bits. 76 | 77 | """ 78 | if demod_type == 'hard': 79 | index_list = map(lambda i: argmin(abs(input_symbols[i] - self.constellation)), 80 | range(0, len(input_symbols))) 81 | demod_bits = hstack(map(lambda i: dec2bitarray(i, self.num_bits_symbol), 82 | index_list)) 83 | elif demod_type == 'soft': 84 | demod_bits = zeros(len(input_symbols) * self.num_bits_symbol) 85 | for i in arange(len(input_symbols)): 86 | current_symbol = input_symbols[i] 87 | for bit_index in arange(self.num_bits_symbol): 88 | llr_num = 0 89 | llr_den = 0 90 | for const_index in self.symbol_mapping: 91 | if (const_index >> bit_index) & 1: 92 | llr_num = llr_num + exp( 93 | (-abs(current_symbol - self.constellation[const_index]) ** 2) / noise_var) 94 | else: 95 | llr_den = llr_den + exp( 96 | (-abs(current_symbol - self.constellation[const_index]) ** 2) / noise_var) 97 | demod_bits[i * self.num_bits_symbol + self.num_bits_symbol - 1 - bit_index] = log(llr_num / llr_den) 98 | else: 99 | pass 100 | # throw an error 101 | 102 | return demod_bits 103 | 104 | def plot_constellation(self): 105 | """ Plot the constellation """ 106 | # init some arrays 107 | beta = self.num_bits_symbol 108 | numbit = '0' + str(beta) + 'b' 109 | Bin = [] 110 | mot = [] 111 | const = [] 112 | 113 | # creation of w array 114 | reel = [pow(2, i) for i in range(beta // 2 - 1, -1, -1)] 115 | im = [1j * pow(2, i) for i in range(beta // 2 - 1, -1, -1)] 116 | w = concatenate((reel, im), axis=None) 117 | 118 | listBin = [format(i, numbit) for i in range(2 ** beta)] 119 | for e in listBin: 120 | for i in range(beta): 121 | Bin.append(ord(e[i]) - 48) 122 | if ord(e[i]) - 48 == 0: 123 | mot.append(-1) 124 | else: 125 | mot.append(1) 126 | const.append(dot(w, mot)) 127 | mot = [] 128 | symb = self.modulate(Bin) 129 | 130 | # plot the symbols 131 | x = symb.real 132 | y = symb.imag 133 | 134 | plt.plot(x, y, '+', linewidth=4) 135 | for i in range(len(x)): 136 | plt.text(x[i], y[i], listBin[i]) 137 | 138 | plt.title('Constellation') 139 | plt.grid() 140 | plt.show() 141 | 142 | 143 | class PSKModem(Modem): 144 | """ Creates a Phase Shift Keying (PSK) Modem object. """ 145 | 146 | Es = 1 147 | 148 | def _constellation_symbol(self, i): 149 | return cos(2 * pi * (i - 1) / self.m) + sin(2 * pi * (i - 1) / self.m) * (0 + 1j) 150 | 151 | def __init__(self, m): 152 | """ Creates a Phase Shift Keying (PSK) Modem object. 153 | 154 | Parameters 155 | ---------- 156 | m : int 157 | Size of the PSK constellation. 158 | 159 | """ 160 | self.m = m 161 | self.num_bits_symbol = int(log2(self.m)) 162 | self.symbol_mapping = arange(self.m) 163 | self.constellation = list(map(self._constellation_symbol, 164 | self.symbol_mapping)) 165 | 166 | 167 | class QAMModem(Modem): 168 | """ Creates a Quadrature Amplitude Modulation (QAM) Modem object.""" 169 | 170 | def _constellation_symbol(self, i): 171 | return (2 * i[0] - 1) + (2 * i[1] - 1) * (1j) 172 | 173 | def __init__(self, m): 174 | """ Creates a Quadrature Amplitude Modulation (QAM) Modem object. 175 | 176 | Parameters 177 | ---------- 178 | m : int 179 | Size of the QAM constellation. 180 | 181 | """ 182 | 183 | self.m = m 184 | self.num_bits_symbol = int(log2(self.m)) 185 | self.symbol_mapping = arange(self.m) 186 | mapping_array = arange(1, sqrt(self.m) + 1) - (sqrt(self.m) / 2) 187 | self.constellation = list(map(self._constellation_symbol, 188 | list(product(mapping_array, repeat=2)))) 189 | self.Es = 2 * (self.m - 1) / 3 190 | 191 | 192 | def ofdm_tx(x, nfft, nsc, cp_length): 193 | """ OFDM Transmit signal generation """ 194 | 195 | nfft = float(nfft) 196 | nsc = float(nsc) 197 | cp_length = float(cp_length) 198 | ofdm_tx_signal = array([]) 199 | 200 | for i in range(0, shape(x)[1]): 201 | symbols = x[:, i] 202 | ofdm_sym_freq = zeros(nfft, dtype=complex) 203 | ofdm_sym_freq[1:(nsc / 2) + 1] = symbols[nsc / 2:] 204 | ofdm_sym_freq[-(nsc / 2):] = symbols[0:nsc / 2] 205 | ofdm_sym_time = ifft(ofdm_sym_freq) 206 | cp = ofdm_sym_time[-cp_length:] 207 | ofdm_tx_signal = concatenate((ofdm_tx_signal, cp, ofdm_sym_time)) 208 | 209 | return ofdm_tx_signal 210 | 211 | 212 | def ofdm_rx(y, nfft, nsc, cp_length): 213 | """ OFDM Receive Signal Processing """ 214 | 215 | num_ofdm_symbols = int(len(y) / (nfft + cp_length)) 216 | x_hat = zeros([nsc, num_ofdm_symbols], dtype=complex) 217 | 218 | for i in range(0, num_ofdm_symbols): 219 | ofdm_symbol = y[i * nfft + (i + 1) * cp_length:(i + 1) * (nfft + cp_length)] 220 | symbols_freq = fft(ofdm_symbol) 221 | x_hat[:, i] = concatenate((symbols_freq[-nsc / 2:], symbols_freq[1:(nsc / 2) + 1])) 222 | 223 | return x_hat 224 | 225 | 226 | def mimo_ml(y, h, constellation): 227 | """ MIMO ML Detection. 228 | 229 | parameters 230 | ---------- 231 | y : 1D ndarray of complex floats 232 | Received complex symbols (shape: num_receive_antennas x 1) 233 | 234 | h : 2D ndarray of complex floats 235 | Channel Matrix (shape: num_receive_antennas x num_transmit_antennas) 236 | 237 | constellation : 1D ndarray of complex floats 238 | Constellation used to modulate the symbols 239 | 240 | """ 241 | _, n = h.shape 242 | m = len(constellation) 243 | x_ideal = empty((n, pow(m, n)), complex) 244 | for i in range(0, n): 245 | x_ideal[i] = repeat(tile(constellation, pow(m, i)), pow(m, n - i - 1)) 246 | min_idx = argmin(norm(y[:, None] - dot(h, x_ideal), axis=0)) 247 | x_r = x_ideal[:, min_idx] 248 | 249 | return x_r 250 | 251 | 252 | def kbest(y, h, constellation, K): 253 | """ MIMO K-best Schnorr-Euchner Detection. 254 | 255 | Reference: Zhan Guo and P. Nilsson, 'Algorithm and implementation of the K-best sphere decoding for MIMO detection', 256 | IEEE Journal on Selected Areas in Communications, vol. 24, no. 3, pp. 491-503, Mar. 2006. 257 | 258 | Parameters 259 | ---------- 260 | y : 1D ndarray 261 | Received complex symbols (length: num_receive_antennas) 262 | 263 | h : 2D ndarray 264 | Channel Matrix (shape: num_receive_antennas x num_transmit_antennas) 265 | 266 | constellation : 1D ndarray of floats 267 | Constellation used to modulate the symbols 268 | 269 | K : positive integer 270 | Number of candidates kept at each step 271 | 272 | Returns 273 | ------- 274 | x : 1D ndarray of constellation points 275 | Detected vector (length: num_receive_antennas) 276 | 277 | raises 278 | ------ 279 | ValueError 280 | If h has more columns than rows. 281 | """ 282 | nb_tx, nb_rx = h.shape 283 | if nb_rx > nb_tx: 284 | raise ValueError('h has more columns than rows') 285 | 286 | # QR decomposition 287 | q, r = qr(h) 288 | yt = q.conj().T.dot(y) 289 | 290 | # Initialization 291 | m = len(constellation) 292 | nb_can = 1 293 | 294 | if isinstance(constellation[0], complex): 295 | const_type = complex 296 | else: 297 | const_type = float 298 | X = empty((nb_rx, K * m), dtype=const_type) # Set of current candidates 299 | d = tile(yt[:, None], (1, K * m)) # Corresponding distance vector 300 | d_tot = zeros(K * m, dtype=float) # Corresponding total distance 301 | hyp = empty(K * m, dtype=const_type) # Hypothesis vector 302 | 303 | # Processing 304 | for coor in range(nb_rx - 1, -1, -1): 305 | nb_hyp = nb_can * m 306 | 307 | # Copy best candidates m times 308 | X[:, :nb_hyp] = tile(X[:, :nb_can], (1, m)) 309 | d[:, :nb_hyp] = tile(d[:, :nb_can], (1, m)) 310 | d_tot[:nb_hyp] = tile(d_tot[:nb_can], (1, m)) 311 | 312 | # Make hypothesis 313 | hyp[:nb_hyp] = repeat(constellation, nb_can) 314 | X[coor, :nb_hyp] = hyp[:nb_hyp] 315 | d[coor, :nb_hyp] -= r[coor, coor] * hyp[:nb_hyp] 316 | d_tot[:nb_hyp] += abs(d[coor, :nb_hyp]) ** 2 317 | 318 | # Select best candidates 319 | argsort = d_tot[:nb_hyp].argsort() 320 | nb_can = min(nb_hyp, K) # Update number of candidate 321 | 322 | # Update accordingly 323 | X[:, :nb_can] = X[:, argsort[:nb_can]] 324 | d[:, :nb_can] = d[:, argsort[:nb_can]] 325 | d[:coor, :nb_can] -= r[:coor, coor, None] * hyp[argsort[:nb_can]] 326 | d_tot[:nb_can] = d_tot[argsort[:nb_can]] 327 | return X[:, 0] 328 | 329 | 330 | def bit_lvl_repr(H, w): 331 | """ Bit-level representation of matrix H with weights w. 332 | 333 | parameters 334 | ---------- 335 | H : 2D ndarray (shape : nb_rx, nb_tx) 336 | Channel Matrix. 337 | 338 | w : 1D ndarray of complex (length : beta) 339 | Bit level representation weights. The length must be even. 340 | 341 | return 342 | ------ 343 | A : 2D nbarray (shape : nb_rx, nb_tx*beta) 344 | Channel matrix adapted to the bit-level representation. 345 | """ 346 | beta = len(w) 347 | if beta % 2 == 0: 348 | m, n = H.shape 349 | In = eye(n, n) 350 | kr = kron(In, w) 351 | return dot(H, kr) 352 | else: 353 | raise ValueError('Beta must be even.') 354 | -------------------------------------------------------------------------------- /doc/sphinxext/docscrape.py: -------------------------------------------------------------------------------- 1 | """Extract reference documentation from the NumPy source tree. 2 | 3 | """ 4 | 5 | import inspect 6 | import textwrap 7 | import re 8 | import pydoc 9 | from StringIO import StringIO 10 | from warnings import warn 11 | 12 | class Reader(object): 13 | """A line-based string reader. 14 | 15 | """ 16 | def __init__(self, data): 17 | """ 18 | Parameters 19 | ---------- 20 | data : str 21 | String with lines separated by '\n'. 22 | 23 | """ 24 | if isinstance(data,list): 25 | self._str = data 26 | else: 27 | self._str = data.split('\n') # store string as list of lines 28 | 29 | self.reset() 30 | 31 | def __getitem__(self, n): 32 | return self._str[n] 33 | 34 | def reset(self): 35 | self._l = 0 # current line nr 36 | 37 | def read(self): 38 | if not self.eof(): 39 | out = self[self._l] 40 | self._l += 1 41 | return out 42 | else: 43 | return '' 44 | 45 | def seek_next_non_empty_line(self): 46 | for l in self[self._l:]: 47 | if l.strip(): 48 | break 49 | else: 50 | self._l += 1 51 | 52 | def eof(self): 53 | return self._l >= len(self._str) 54 | 55 | def read_to_condition(self, condition_func): 56 | start = self._l 57 | for line in self[start:]: 58 | if condition_func(line): 59 | return self[start:self._l] 60 | self._l += 1 61 | if self.eof(): 62 | return self[start:self._l+1] 63 | return [] 64 | 65 | def read_to_next_empty_line(self): 66 | self.seek_next_non_empty_line() 67 | def is_empty(line): 68 | return not line.strip() 69 | return self.read_to_condition(is_empty) 70 | 71 | def read_to_next_unindented_line(self): 72 | def is_unindented(line): 73 | return (line.strip() and (len(line.lstrip()) == len(line))) 74 | return self.read_to_condition(is_unindented) 75 | 76 | def peek(self,n=0): 77 | if self._l + n < len(self._str): 78 | return self[self._l + n] 79 | else: 80 | return '' 81 | 82 | def is_empty(self): 83 | return not ''.join(self._str).strip() 84 | 85 | 86 | class NumpyDocString(object): 87 | def __init__(self, docstring, config={}): 88 | docstring = textwrap.dedent(docstring).split('\n') 89 | 90 | self._doc = Reader(docstring) 91 | self._parsed_data = { 92 | 'Signature': '', 93 | 'Summary': [''], 94 | 'Extended Summary': [], 95 | 'Parameters': [], 96 | 'Returns': [], 97 | 'Raises': [], 98 | 'Warns': [], 99 | 'Other Parameters': [], 100 | 'Attributes': [], 101 | 'Methods': [], 102 | 'See Also': [], 103 | 'Notes': [], 104 | 'Warnings': [], 105 | 'References': '', 106 | 'Examples': '', 107 | 'index': {} 108 | } 109 | 110 | self._parse() 111 | 112 | def __getitem__(self,key): 113 | return self._parsed_data[key] 114 | 115 | def __setitem__(self,key,val): 116 | if not self._parsed_data.has_key(key): 117 | warn("Unknown section %s" % key) 118 | else: 119 | self._parsed_data[key] = val 120 | 121 | def _is_at_section(self): 122 | self._doc.seek_next_non_empty_line() 123 | 124 | if self._doc.eof(): 125 | return False 126 | 127 | l1 = self._doc.peek().strip() # e.g. Parameters 128 | 129 | if l1.startswith('.. index::'): 130 | return True 131 | 132 | l2 = self._doc.peek(1).strip() # ---------- or ========== 133 | return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) 134 | 135 | def _strip(self,doc): 136 | i = 0 137 | j = 0 138 | for i,line in enumerate(doc): 139 | if line.strip(): break 140 | 141 | for j,line in enumerate(doc[::-1]): 142 | if line.strip(): break 143 | 144 | return doc[i:len(doc)-j] 145 | 146 | def _read_to_next_section(self): 147 | section = self._doc.read_to_next_empty_line() 148 | 149 | while not self._is_at_section() and not self._doc.eof(): 150 | if not self._doc.peek(-1).strip(): # previous line was empty 151 | section += [''] 152 | 153 | section += self._doc.read_to_next_empty_line() 154 | 155 | return section 156 | 157 | def _read_sections(self): 158 | while not self._doc.eof(): 159 | data = self._read_to_next_section() 160 | name = data[0].strip() 161 | 162 | if name.startswith('..'): # index section 163 | yield name, data[1:] 164 | elif len(data) < 2: 165 | yield StopIteration 166 | else: 167 | yield name, self._strip(data[2:]) 168 | 169 | def _parse_param_list(self,content): 170 | r = Reader(content) 171 | params = [] 172 | while not r.eof(): 173 | header = r.read().strip() 174 | if ' : ' in header: 175 | arg_name, arg_type = header.split(' : ')[:2] 176 | else: 177 | arg_name, arg_type = header, '' 178 | 179 | desc = r.read_to_next_unindented_line() 180 | desc = dedent_lines(desc) 181 | 182 | params.append((arg_name,arg_type,desc)) 183 | 184 | return params 185 | 186 | 187 | _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" 188 | r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) 189 | def _parse_see_also(self, content): 190 | """ 191 | func_name : Descriptive text 192 | continued text 193 | another_func_name : Descriptive text 194 | func_name1, func_name2, :meth:`func_name`, func_name3 195 | 196 | """ 197 | items = [] 198 | 199 | def parse_item_name(text): 200 | """Match ':role:`name`' or 'name'""" 201 | m = self._name_rgx.match(text) 202 | if m: 203 | g = m.groups() 204 | if g[1] is None: 205 | return g[3], None 206 | else: 207 | return g[2], g[1] 208 | raise ValueError("%s is not a item name" % text) 209 | 210 | def push_item(name, rest): 211 | if not name: 212 | return 213 | name, role = parse_item_name(name) 214 | items.append((name, list(rest), role)) 215 | del rest[:] 216 | 217 | current_func = None 218 | rest = [] 219 | 220 | for line in content: 221 | if not line.strip(): continue 222 | 223 | m = self._name_rgx.match(line) 224 | if m and line[m.end():].strip().startswith(':'): 225 | push_item(current_func, rest) 226 | current_func, line = line[:m.end()], line[m.end():] 227 | rest = [line.split(':', 1)[1].strip()] 228 | if not rest[0]: 229 | rest = [] 230 | elif not line.startswith(' '): 231 | push_item(current_func, rest) 232 | current_func = None 233 | if ',' in line: 234 | for func in line.split(','): 235 | if func.strip(): 236 | push_item(func, []) 237 | elif line.strip(): 238 | current_func = line 239 | elif current_func is not None: 240 | rest.append(line.strip()) 241 | push_item(current_func, rest) 242 | return items 243 | 244 | def _parse_index(self, section, content): 245 | """ 246 | .. index: default 247 | :refguide: something, else, and more 248 | 249 | """ 250 | def strip_each_in(lst): 251 | return [s.strip() for s in lst] 252 | 253 | out = {} 254 | section = section.split('::') 255 | if len(section) > 1: 256 | out['default'] = strip_each_in(section[1].split(','))[0] 257 | for line in content: 258 | line = line.split(':') 259 | if len(line) > 2: 260 | out[line[1]] = strip_each_in(line[2].split(',')) 261 | return out 262 | 263 | def _parse_summary(self): 264 | """Grab signature (if given) and summary""" 265 | if self._is_at_section(): 266 | return 267 | 268 | summary = self._doc.read_to_next_empty_line() 269 | summary_str = " ".join([s.strip() for s in summary]).strip() 270 | if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): 271 | self['Signature'] = summary_str 272 | if not self._is_at_section(): 273 | self['Summary'] = self._doc.read_to_next_empty_line() 274 | else: 275 | self['Summary'] = summary 276 | 277 | if not self._is_at_section(): 278 | self['Extended Summary'] = self._read_to_next_section() 279 | 280 | def _parse(self): 281 | self._doc.reset() 282 | self._parse_summary() 283 | 284 | for (section,content) in self._read_sections(): 285 | if not section.startswith('..'): 286 | section = ' '.join([s.capitalize() for s in section.split(' ')]) 287 | if section in ('Parameters', 'Returns', 'Raises', 'Warns', 288 | 'Other Parameters', 'Attributes', 'Methods'): 289 | self[section] = self._parse_param_list(content) 290 | elif section.startswith('.. index::'): 291 | self['index'] = self._parse_index(section, content) 292 | elif section == 'See Also': 293 | self['See Also'] = self._parse_see_also(content) 294 | else: 295 | self[section] = content 296 | 297 | # string conversion routines 298 | 299 | def _str_header(self, name, symbol='-'): 300 | return [name, len(name)*symbol] 301 | 302 | def _str_indent(self, doc, indent=4): 303 | out = [] 304 | for line in doc: 305 | out += [' '*indent + line] 306 | return out 307 | 308 | def _str_signature(self): 309 | if self['Signature']: 310 | return [self['Signature'].replace('*','\*')] + [''] 311 | else: 312 | return [''] 313 | 314 | def _str_summary(self): 315 | if self['Summary']: 316 | return self['Summary'] + [''] 317 | else: 318 | return [] 319 | 320 | def _str_extended_summary(self): 321 | if self['Extended Summary']: 322 | return self['Extended Summary'] + [''] 323 | else: 324 | return [] 325 | 326 | def _str_param_list(self, name): 327 | out = [] 328 | if self[name]: 329 | out += self._str_header(name) 330 | for param,param_type,desc in self[name]: 331 | out += ['%s : %s' % (param, param_type)] 332 | out += self._str_indent(desc) 333 | out += [''] 334 | return out 335 | 336 | def _str_section(self, name): 337 | out = [] 338 | if self[name]: 339 | out += self._str_header(name) 340 | out += self[name] 341 | out += [''] 342 | return out 343 | 344 | def _str_see_also(self, func_role): 345 | if not self['See Also']: return [] 346 | out = [] 347 | out += self._str_header("See Also") 348 | last_had_desc = True 349 | for func, desc, role in self['See Also']: 350 | if role: 351 | link = ':%s:`%s`' % (role, func) 352 | elif func_role: 353 | link = ':%s:`%s`' % (func_role, func) 354 | else: 355 | link = "`%s`_" % func 356 | if desc or last_had_desc: 357 | out += [''] 358 | out += [link] 359 | else: 360 | out[-1] += ", %s" % link 361 | if desc: 362 | out += self._str_indent([' '.join(desc)]) 363 | last_had_desc = True 364 | else: 365 | last_had_desc = False 366 | out += [''] 367 | return out 368 | 369 | def _str_index(self): 370 | idx = self['index'] 371 | out = [] 372 | out += ['.. index:: %s' % idx.get('default','')] 373 | for section, references in idx.iteritems(): 374 | if section == 'default': 375 | continue 376 | out += [' :%s: %s' % (section, ', '.join(references))] 377 | return out 378 | 379 | def __str__(self, func_role=''): 380 | out = [] 381 | out += self._str_signature() 382 | out += self._str_summary() 383 | out += self._str_extended_summary() 384 | for param_list in ('Parameters', 'Returns', 'Other Parameters', 385 | 'Raises', 'Warns'): 386 | out += self._str_param_list(param_list) 387 | out += self._str_section('Warnings') 388 | out += self._str_see_also(func_role) 389 | for s in ('Notes','References','Examples'): 390 | out += self._str_section(s) 391 | for param_list in ('Attributes', 'Methods'): 392 | out += self._str_param_list(param_list) 393 | out += self._str_index() 394 | return '\n'.join(out) 395 | 396 | 397 | def indent(str,indent=4): 398 | indent_str = ' '*indent 399 | if str is None: 400 | return indent_str 401 | lines = str.split('\n') 402 | return '\n'.join(indent_str + l for l in lines) 403 | 404 | def dedent_lines(lines): 405 | """Deindent a list of lines maximally""" 406 | return textwrap.dedent("\n".join(lines)).split("\n") 407 | 408 | def header(text, style='-'): 409 | return text + '\n' + style*len(text) + '\n' 410 | 411 | 412 | class FunctionDoc(NumpyDocString): 413 | def __init__(self, func, role='func', doc=None, config={}): 414 | self._f = func 415 | self._role = role # e.g. "func" or "meth" 416 | 417 | if doc is None: 418 | if func is None: 419 | raise ValueError("No function or docstring given") 420 | doc = inspect.getdoc(func) or '' 421 | NumpyDocString.__init__(self, doc) 422 | 423 | if not self['Signature'] and func is not None: 424 | func, func_name = self.get_func() 425 | try: 426 | # try to read signature 427 | argspec = inspect.getargspec(func) 428 | argspec = inspect.formatargspec(*argspec) 429 | argspec = argspec.replace('*','\*') 430 | signature = '%s%s' % (func_name, argspec) 431 | except TypeError, e: 432 | signature = '%s()' % func_name 433 | self['Signature'] = signature 434 | 435 | def get_func(self): 436 | func_name = getattr(self._f, '__name__', self.__class__.__name__) 437 | if inspect.isclass(self._f): 438 | func = getattr(self._f, '__call__', self._f.__init__) 439 | else: 440 | func = self._f 441 | return func, func_name 442 | 443 | def __str__(self): 444 | out = '' 445 | 446 | func, func_name = self.get_func() 447 | signature = self['Signature'].replace('*', '\*') 448 | 449 | roles = {'func': 'function', 450 | 'meth': 'method'} 451 | 452 | if self._role: 453 | if not roles.has_key(self._role): 454 | print "Warning: invalid role %s" % self._role 455 | out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), 456 | func_name) 457 | 458 | out += super(FunctionDoc, self).__str__(func_role=self._role) 459 | return out 460 | 461 | 462 | class ClassDoc(NumpyDocString): 463 | 464 | extra_public_methods = ['__call__'] 465 | 466 | def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, 467 | config={}): 468 | if not inspect.isclass(cls) and cls is not None: 469 | raise ValueError("Expected a class or None, but got %r" % cls) 470 | self._cls = cls 471 | 472 | if modulename and not modulename.endswith('.'): 473 | modulename += '.' 474 | self._mod = modulename 475 | 476 | if doc is None: 477 | if cls is None: 478 | raise ValueError("No class or documentation string given") 479 | doc = pydoc.getdoc(cls) 480 | 481 | NumpyDocString.__init__(self, doc) 482 | 483 | if config.get('show_class_members', True): 484 | if not self['Methods']: 485 | self['Methods'] = [(name, '', '') 486 | for name in sorted(self.methods)] 487 | if not self['Attributes']: 488 | self['Attributes'] = [(name, '', '') 489 | for name in sorted(self.properties)] 490 | 491 | @property 492 | def methods(self): 493 | if self._cls is None: 494 | return [] 495 | return [name for name,func in inspect.getmembers(self._cls) 496 | if ((not name.startswith('_') 497 | or name in self.extra_public_methods) 498 | and callable(func))] 499 | 500 | @property 501 | def properties(self): 502 | if self._cls is None: 503 | return [] 504 | return [name for name,func in inspect.getmembers(self._cls) 505 | if not name.startswith('_') and func is None] 506 | -------------------------------------------------------------------------------- /doc/sphinxext/tests/test_docscrape.py: -------------------------------------------------------------------------------- 1 | # -*- encoding:utf-8 -*- 2 | 3 | import sys, os 4 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 5 | 6 | from docscrape import NumpyDocString, FunctionDoc, ClassDoc 7 | from docscrape_sphinx import SphinxDocString, SphinxClassDoc 8 | from nose.tools import * 9 | 10 | doc_txt = '''\ 11 | numpy.multivariate_normal(mean, cov, shape=None, spam=None) 12 | 13 | Draw values from a multivariate normal distribution with specified 14 | mean and covariance. 15 | 16 | The multivariate normal or Gaussian distribution is a generalisation 17 | of the one-dimensional normal distribution to higher dimensions. 18 | 19 | Parameters 20 | ---------- 21 | mean : (N,) ndarray 22 | Mean of the N-dimensional distribution. 23 | 24 | .. math:: 25 | 26 | (1+2+3)/3 27 | 28 | cov : (N,N) ndarray 29 | Covariance matrix of the distribution. 30 | shape : tuple of ints 31 | Given a shape of, for example, (m,n,k), m*n*k samples are 32 | generated, and packed in an m-by-n-by-k arrangement. Because 33 | each sample is N-dimensional, the output shape is (m,n,k,N). 34 | 35 | Returns 36 | ------- 37 | out : ndarray 38 | The drawn samples, arranged according to `shape`. If the 39 | shape given is (m,n,...), then the shape of `out` is is 40 | (m,n,...,N). 41 | 42 | In other words, each entry ``out[i,j,...,:]`` is an N-dimensional 43 | value drawn from the distribution. 44 | 45 | Other Parameters 46 | ---------------- 47 | spam : parrot 48 | A parrot off its mortal coil. 49 | 50 | Raises 51 | ------ 52 | RuntimeError 53 | Some error 54 | 55 | Warns 56 | ----- 57 | RuntimeWarning 58 | Some warning 59 | 60 | Warnings 61 | -------- 62 | Certain warnings apply. 63 | 64 | Notes 65 | ----- 66 | 67 | Instead of specifying the full covariance matrix, popular 68 | approximations include: 69 | 70 | - Spherical covariance (`cov` is a multiple of the identity matrix) 71 | - Diagonal covariance (`cov` has non-negative elements only on the diagonal) 72 | 73 | This geometrical property can be seen in two dimensions by plotting 74 | generated data-points: 75 | 76 | >>> mean = [0,0] 77 | >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis 78 | 79 | >>> x,y = multivariate_normal(mean,cov,5000).T 80 | >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() 81 | 82 | Note that the covariance matrix must be symmetric and non-negative 83 | definite. 84 | 85 | References 86 | ---------- 87 | .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic 88 | Processes," 3rd ed., McGraw-Hill Companies, 1991 89 | .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 90 | 2nd ed., Wiley, 2001. 91 | 92 | See Also 93 | -------- 94 | some, other, funcs 95 | otherfunc : relationship 96 | 97 | Examples 98 | -------- 99 | >>> mean = (1,2) 100 | >>> cov = [[1,0],[1,0]] 101 | >>> x = multivariate_normal(mean,cov,(3,3)) 102 | >>> print x.shape 103 | (3, 3, 2) 104 | 105 | The following is probably true, given that 0.6 is roughly twice the 106 | standard deviation: 107 | 108 | >>> print list( (x[0,0,:] - mean) < 0.6 ) 109 | [True, True] 110 | 111 | .. index:: random 112 | :refguide: random;distributions, random;gauss 113 | 114 | ''' 115 | doc = NumpyDocString(doc_txt) 116 | 117 | 118 | def test_signature(): 119 | assert doc['Signature'].startswith('numpy.multivariate_normal(') 120 | assert doc['Signature'].endswith('spam=None)') 121 | 122 | def test_summary(): 123 | assert doc['Summary'][0].startswith('Draw values') 124 | assert doc['Summary'][-1].endswith('covariance.') 125 | 126 | def test_extended_summary(): 127 | assert doc['Extended Summary'][0].startswith('The multivariate normal') 128 | 129 | def test_parameters(): 130 | assert_equal(len(doc['Parameters']), 3) 131 | assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) 132 | 133 | arg, arg_type, desc = doc['Parameters'][1] 134 | assert_equal(arg_type, '(N,N) ndarray') 135 | assert desc[0].startswith('Covariance matrix') 136 | assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' 137 | 138 | def test_other_parameters(): 139 | assert_equal(len(doc['Other Parameters']), 1) 140 | assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) 141 | arg, arg_type, desc = doc['Other Parameters'][0] 142 | assert_equal(arg_type, 'parrot') 143 | assert desc[0].startswith('A parrot off its mortal coil') 144 | 145 | def test_returns(): 146 | assert_equal(len(doc['Returns']), 1) 147 | arg, arg_type, desc = doc['Returns'][0] 148 | assert_equal(arg, 'out') 149 | assert_equal(arg_type, 'ndarray') 150 | assert desc[0].startswith('The drawn samples') 151 | assert desc[-1].endswith('distribution.') 152 | 153 | def test_notes(): 154 | assert doc['Notes'][0].startswith('Instead') 155 | assert doc['Notes'][-1].endswith('definite.') 156 | assert_equal(len(doc['Notes']), 17) 157 | 158 | def test_references(): 159 | assert doc['References'][0].startswith('..') 160 | assert doc['References'][-1].endswith('2001.') 161 | 162 | def test_examples(): 163 | assert doc['Examples'][0].startswith('>>>') 164 | assert doc['Examples'][-1].endswith('True]') 165 | 166 | def test_index(): 167 | assert_equal(doc['index']['default'], 'random') 168 | print doc['index'] 169 | assert_equal(len(doc['index']), 2) 170 | assert_equal(len(doc['index']['refguide']), 2) 171 | 172 | def non_blank_line_by_line_compare(a,b): 173 | a = [l for l in a.split('\n') if l.strip()] 174 | b = [l for l in b.split('\n') if l.strip()] 175 | for n,line in enumerate(a): 176 | if not line == b[n]: 177 | raise AssertionError("Lines %s of a and b differ: " 178 | "\n>>> %s\n<<< %s\n" % 179 | (n,line,b[n])) 180 | def test_str(): 181 | non_blank_line_by_line_compare(str(doc), 182 | """numpy.multivariate_normal(mean, cov, shape=None, spam=None) 183 | 184 | Draw values from a multivariate normal distribution with specified 185 | mean and covariance. 186 | 187 | The multivariate normal or Gaussian distribution is a generalisation 188 | of the one-dimensional normal distribution to higher dimensions. 189 | 190 | Parameters 191 | ---------- 192 | mean : (N,) ndarray 193 | Mean of the N-dimensional distribution. 194 | 195 | .. math:: 196 | 197 | (1+2+3)/3 198 | 199 | cov : (N,N) ndarray 200 | Covariance matrix of the distribution. 201 | shape : tuple of ints 202 | Given a shape of, for example, (m,n,k), m*n*k samples are 203 | generated, and packed in an m-by-n-by-k arrangement. Because 204 | each sample is N-dimensional, the output shape is (m,n,k,N). 205 | 206 | Returns 207 | ------- 208 | out : ndarray 209 | The drawn samples, arranged according to `shape`. If the 210 | shape given is (m,n,...), then the shape of `out` is is 211 | (m,n,...,N). 212 | 213 | In other words, each entry ``out[i,j,...,:]`` is an N-dimensional 214 | value drawn from the distribution. 215 | 216 | Other Parameters 217 | ---------------- 218 | spam : parrot 219 | A parrot off its mortal coil. 220 | 221 | Raises 222 | ------ 223 | RuntimeError : 224 | Some error 225 | 226 | Warns 227 | ----- 228 | RuntimeWarning : 229 | Some warning 230 | 231 | Warnings 232 | -------- 233 | Certain warnings apply. 234 | 235 | See Also 236 | -------- 237 | `some`_, `other`_, `funcs`_ 238 | 239 | `otherfunc`_ 240 | relationship 241 | 242 | Notes 243 | ----- 244 | Instead of specifying the full covariance matrix, popular 245 | approximations include: 246 | 247 | - Spherical covariance (`cov` is a multiple of the identity matrix) 248 | - Diagonal covariance (`cov` has non-negative elements only on the diagonal) 249 | 250 | This geometrical property can be seen in two dimensions by plotting 251 | generated data-points: 252 | 253 | >>> mean = [0,0] 254 | >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis 255 | 256 | >>> x,y = multivariate_normal(mean,cov,5000).T 257 | >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() 258 | 259 | Note that the covariance matrix must be symmetric and non-negative 260 | definite. 261 | 262 | References 263 | ---------- 264 | .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic 265 | Processes," 3rd ed., McGraw-Hill Companies, 1991 266 | .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 267 | 2nd ed., Wiley, 2001. 268 | 269 | Examples 270 | -------- 271 | >>> mean = (1,2) 272 | >>> cov = [[1,0],[1,0]] 273 | >>> x = multivariate_normal(mean,cov,(3,3)) 274 | >>> print x.shape 275 | (3, 3, 2) 276 | 277 | The following is probably true, given that 0.6 is roughly twice the 278 | standard deviation: 279 | 280 | >>> print list( (x[0,0,:] - mean) < 0.6 ) 281 | [True, True] 282 | 283 | .. index:: random 284 | :refguide: random;distributions, random;gauss""") 285 | 286 | 287 | def test_sphinx_str(): 288 | sphinx_doc = SphinxDocString(doc_txt) 289 | non_blank_line_by_line_compare(str(sphinx_doc), 290 | """ 291 | .. index:: random 292 | single: random;distributions, random;gauss 293 | 294 | Draw values from a multivariate normal distribution with specified 295 | mean and covariance. 296 | 297 | The multivariate normal or Gaussian distribution is a generalisation 298 | of the one-dimensional normal distribution to higher dimensions. 299 | 300 | :Parameters: 301 | 302 | **mean** : (N,) ndarray 303 | 304 | Mean of the N-dimensional distribution. 305 | 306 | .. math:: 307 | 308 | (1+2+3)/3 309 | 310 | **cov** : (N,N) ndarray 311 | 312 | Covariance matrix of the distribution. 313 | 314 | **shape** : tuple of ints 315 | 316 | Given a shape of, for example, (m,n,k), m*n*k samples are 317 | generated, and packed in an m-by-n-by-k arrangement. Because 318 | each sample is N-dimensional, the output shape is (m,n,k,N). 319 | 320 | :Returns: 321 | 322 | **out** : ndarray 323 | 324 | The drawn samples, arranged according to `shape`. If the 325 | shape given is (m,n,...), then the shape of `out` is is 326 | (m,n,...,N). 327 | 328 | In other words, each entry ``out[i,j,...,:]`` is an N-dimensional 329 | value drawn from the distribution. 330 | 331 | :Other Parameters: 332 | 333 | **spam** : parrot 334 | 335 | A parrot off its mortal coil. 336 | 337 | :Raises: 338 | 339 | **RuntimeError** : 340 | 341 | Some error 342 | 343 | :Warns: 344 | 345 | **RuntimeWarning** : 346 | 347 | Some warning 348 | 349 | .. warning:: 350 | 351 | Certain warnings apply. 352 | 353 | .. seealso:: 354 | 355 | :obj:`some`, :obj:`other`, :obj:`funcs` 356 | 357 | :obj:`otherfunc` 358 | relationship 359 | 360 | .. rubric:: Notes 361 | 362 | Instead of specifying the full covariance matrix, popular 363 | approximations include: 364 | 365 | - Spherical covariance (`cov` is a multiple of the identity matrix) 366 | - Diagonal covariance (`cov` has non-negative elements only on the diagonal) 367 | 368 | This geometrical property can be seen in two dimensions by plotting 369 | generated data-points: 370 | 371 | >>> mean = [0,0] 372 | >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis 373 | 374 | >>> x,y = multivariate_normal(mean,cov,5000).T 375 | >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() 376 | 377 | Note that the covariance matrix must be symmetric and non-negative 378 | definite. 379 | 380 | .. rubric:: References 381 | 382 | .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic 383 | Processes," 3rd ed., McGraw-Hill Companies, 1991 384 | .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," 385 | 2nd ed., Wiley, 2001. 386 | 387 | .. only:: latex 388 | 389 | [1]_, [2]_ 390 | 391 | .. rubric:: Examples 392 | 393 | >>> mean = (1,2) 394 | >>> cov = [[1,0],[1,0]] 395 | >>> x = multivariate_normal(mean,cov,(3,3)) 396 | >>> print x.shape 397 | (3, 3, 2) 398 | 399 | The following is probably true, given that 0.6 is roughly twice the 400 | standard deviation: 401 | 402 | >>> print list( (x[0,0,:] - mean) < 0.6 ) 403 | [True, True] 404 | """) 405 | 406 | 407 | doc2 = NumpyDocString(""" 408 | Returns array of indices of the maximum values of along the given axis. 409 | 410 | Parameters 411 | ---------- 412 | a : {array_like} 413 | Array to look in. 414 | axis : {None, integer} 415 | If None, the index is into the flattened array, otherwise along 416 | the specified axis""") 417 | 418 | def test_parameters_without_extended_description(): 419 | assert_equal(len(doc2['Parameters']), 2) 420 | 421 | doc3 = NumpyDocString(""" 422 | my_signature(*params, **kwds) 423 | 424 | Return this and that. 425 | """) 426 | 427 | def test_escape_stars(): 428 | signature = str(doc3).split('\n')[0] 429 | assert_equal(signature, 'my_signature(\*params, \*\*kwds)') 430 | 431 | doc4 = NumpyDocString( 432 | """a.conj() 433 | 434 | Return an array with all complex-valued elements conjugated.""") 435 | 436 | def test_empty_extended_summary(): 437 | assert_equal(doc4['Extended Summary'], []) 438 | 439 | doc5 = NumpyDocString( 440 | """ 441 | a.something() 442 | 443 | Raises 444 | ------ 445 | LinAlgException 446 | If array is singular. 447 | 448 | Warns 449 | ----- 450 | SomeWarning 451 | If needed 452 | """) 453 | 454 | def test_raises(): 455 | assert_equal(len(doc5['Raises']), 1) 456 | name,_,desc = doc5['Raises'][0] 457 | assert_equal(name,'LinAlgException') 458 | assert_equal(desc,['If array is singular.']) 459 | 460 | def test_warns(): 461 | assert_equal(len(doc5['Warns']), 1) 462 | name,_,desc = doc5['Warns'][0] 463 | assert_equal(name,'SomeWarning') 464 | assert_equal(desc,['If needed']) 465 | 466 | def test_see_also(): 467 | doc6 = NumpyDocString( 468 | """ 469 | z(x,theta) 470 | 471 | See Also 472 | -------- 473 | func_a, func_b, func_c 474 | func_d : some equivalent func 475 | foo.func_e : some other func over 476 | multiple lines 477 | func_f, func_g, :meth:`func_h`, func_j, 478 | func_k 479 | :obj:`baz.obj_q` 480 | :class:`class_j`: fubar 481 | foobar 482 | """) 483 | 484 | assert len(doc6['See Also']) == 12 485 | for func, desc, role in doc6['See Also']: 486 | if func in ('func_a', 'func_b', 'func_c', 'func_f', 487 | 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): 488 | assert(not desc) 489 | else: 490 | assert(desc) 491 | 492 | if func == 'func_h': 493 | assert role == 'meth' 494 | elif func == 'baz.obj_q': 495 | assert role == 'obj' 496 | elif func == 'class_j': 497 | assert role == 'class' 498 | else: 499 | assert role is None 500 | 501 | if func == 'func_d': 502 | assert desc == ['some equivalent func'] 503 | elif func == 'foo.func_e': 504 | assert desc == ['some other func over', 'multiple lines'] 505 | elif func == 'class_j': 506 | assert desc == ['fubar', 'foobar'] 507 | 508 | def test_see_also_print(): 509 | class Dummy(object): 510 | """ 511 | See Also 512 | -------- 513 | func_a, func_b 514 | func_c : some relationship 515 | goes here 516 | func_d 517 | """ 518 | pass 519 | 520 | obj = Dummy() 521 | s = str(FunctionDoc(obj, role='func')) 522 | assert(':func:`func_a`, :func:`func_b`' in s) 523 | assert(' some relationship' in s) 524 | assert(':func:`func_d`' in s) 525 | 526 | doc7 = NumpyDocString(""" 527 | 528 | Doc starts on second line. 529 | 530 | """) 531 | 532 | def test_empty_first_line(): 533 | assert doc7['Summary'][0].startswith('Doc starts') 534 | 535 | 536 | def test_no_summary(): 537 | str(SphinxDocString(""" 538 | Parameters 539 | ----------""")) 540 | 541 | 542 | def test_unicode(): 543 | doc = SphinxDocString(""" 544 | öäöäöäöäöåååå 545 | 546 | öäöäöäööäååå 547 | 548 | Parameters 549 | ---------- 550 | ååå : äää 551 | ööö 552 | 553 | Returns 554 | ------- 555 | ååå : ööö 556 | äää 557 | 558 | """) 559 | assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8') 560 | 561 | def test_plot_examples(): 562 | cfg = dict(use_plots=True) 563 | 564 | doc = SphinxDocString(""" 565 | Examples 566 | -------- 567 | >>> import matplotlib.pyplot as plt 568 | >>> plt.plot([1,2,3],[4,5,6]) 569 | >>> plt.show() 570 | """, config=cfg) 571 | assert 'plot::' in str(doc), str(doc) 572 | 573 | doc = SphinxDocString(""" 574 | Examples 575 | -------- 576 | .. plot:: 577 | 578 | import matplotlib.pyplot as plt 579 | plt.plot([1,2,3],[4,5,6]) 580 | plt.show() 581 | """, config=cfg) 582 | assert str(doc).count('plot::') == 1, str(doc) 583 | 584 | def test_class_members(): 585 | 586 | class Dummy(object): 587 | """ 588 | Dummy class. 589 | 590 | """ 591 | def spam(self, a, b): 592 | """Spam\n\nSpam spam.""" 593 | pass 594 | def ham(self, c, d): 595 | """Cheese\n\nNo cheese.""" 596 | pass 597 | 598 | for cls in (ClassDoc, SphinxClassDoc): 599 | doc = cls(Dummy, config=dict(show_class_members=False)) 600 | assert 'Methods' not in str(doc), (cls, str(doc)) 601 | assert 'spam' not in str(doc), (cls, str(doc)) 602 | assert 'ham' not in str(doc), (cls, str(doc)) 603 | 604 | doc = cls(Dummy, config=dict(show_class_members=True)) 605 | assert 'Methods' in str(doc), (cls, str(doc)) 606 | assert 'spam' in str(doc), (cls, str(doc)) 607 | assert 'ham' in str(doc), (cls, str(doc)) 608 | 609 | if cls is SphinxClassDoc: 610 | assert '.. autosummary::' in str(doc), str(doc) 611 | 612 | if __name__ == "__main__": 613 | import nose 614 | nose.run() 615 | 616 | --------------------------------------------------------------------------------