├── .github └── workflows │ ├── ci.yml │ └── publish-to-pypi.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── pyproject.toml ├── setup.py ├── src └── pylsl │ ├── __init__.py │ ├── examples │ ├── GetTimeCorrection.py │ ├── HandleMetadata.py │ ├── PerformanceTest.py │ ├── README.md │ ├── ReceiveAndPlot.py │ ├── ReceiveData.py │ ├── ReceiveDataInChunks.py │ ├── ReceiveStringMarkers.py │ ├── SendData.py │ ├── SendDataAdvanced.py │ ├── SendStringMarkers.py │ └── __init__.py │ ├── info.py │ ├── inlet.py │ ├── lib │ └── __init__.py │ ├── outlet.py │ ├── resolve.py │ └── util.py └── test ├── test_format.py └── test_info.py /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: 8 | - main 9 | workflow_dispatch: 10 | 11 | env: 12 | LSL_RELEASE_URL: "https://github.com/sccn/liblsl/releases/download/v1.16.2" 13 | LSL_RELEASE: "1.16.2" 14 | 15 | jobs: 16 | 17 | style: 18 | name: Check style 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v4 22 | - uses: astral-sh/ruff-action@v1 23 | - uses: astral-sh/ruff-action@v1 24 | with: 25 | args: "format --check" 26 | 27 | test: 28 | needs: style 29 | strategy: 30 | matrix: 31 | python-version: ["3.9", "3.10", "3.11", "3.12"] 32 | os: ["ubuntu-latest", "windows-latest", "macOS-latest"] 33 | 34 | name: Run tests (${{ matrix.os }} Python ${{ matrix.python-version }}) 35 | runs-on: ${{ matrix.os }} 36 | steps: 37 | - uses: actions/checkout@v4 38 | - name: Download liblsl (Windows) 39 | if: matrix.os == 'windows-latest' 40 | run: | 41 | curl -L https://github.com/sccn/liblsl/releases/download/v1.16.2/liblsl-1.16.2-Win_amd64.zip -o liblsl.zip 42 | unzip -oj liblsl.zip bin/lsl* -d src/pylsl/lib/ 43 | - name: Install liblsl (MacOS) 44 | if: matrix.os == 'macos-latest' 45 | run: brew install labstreaminglayer/tap/lsl 46 | - name: Install liblsl (Ubuntu) 47 | if: startsWith(matrix.os, 'ubuntu-') 48 | run: | 49 | sudo apt install -y libpugixml-dev 50 | curl -L ${LSL_RELEASE_URL}/liblsl-${LSL_RELEASE}-$(lsb_release -sc)_amd64.deb -o liblsl.deb 51 | sudo apt install ./liblsl.deb 52 | - name: Install uv 53 | uses: astral-sh/setup-uv@v4 54 | with: 55 | python-version: ${{ matrix.python-version }} 56 | - name: Build 57 | run: uv sync --all-extras 58 | - name: Run tests 59 | run: uv run pytest 60 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Build and publish Python 🐍 distributions 📦 to PyPI 2 | 3 | on: 4 | release: 5 | types: [published] 6 | workflow_dispatch: 7 | 8 | env: 9 | LSL_RELEASE_URL: "https://github.com/sccn/liblsl/releases/download/" 10 | LSL_RELEASE: "1.16.2" 11 | 12 | defaults: 13 | run: 14 | shell: bash 15 | 16 | jobs: 17 | deploy: 18 | name: ${{ matrix.config.name }} 19 | runs-on: ${{ matrix.config.os }} 20 | permissions: 21 | id-token: write 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | config: 26 | - name: "ubuntu-24.04" 27 | os: "ubuntu-latest" 28 | pyarch: "x64" 29 | - name: "windows-x64" 30 | os: "windows-latest" 31 | arch: "amd64" 32 | pyarch: "x64" 33 | - name: "windows-x86" 34 | os: "windows-latest" 35 | arch: "i386" 36 | pyarch: "x86" 37 | steps: 38 | - uses: actions/checkout@v4 39 | - name: Download liblsl (Windows) 40 | if: matrix.config.os == 'windows-latest' 41 | run: | 42 | curl -L ${LSL_RELEASE_URL}/v${LSL_RELEASE}/liblsl-${LSL_RELEASE}-Win_${{ matrix.config.arch}}.zip -o liblsl.zip 43 | unzip -oj liblsl.zip bin/lsl* -d src/pylsl/lib 44 | - name: Set up Python 3.x 45 | uses: actions/setup-python@v4 46 | with: 47 | python-version: "3.x" 48 | architecture: ${{ matrix.config.pyarch }} 49 | - name: Install uv 50 | uses: astral-sh/setup-uv@v4 51 | - name: Build Package (Linux) 52 | if: matrix.config.os != 'windows-latest' 53 | run: uv build 54 | - name: Build Package (Windows) 55 | if: matrix.config.os == 'windows-latest' 56 | run: uv build --wheel 57 | - name: Publish package distributions to PyPI 58 | run: uv publish 59 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /dist/ 2 | /.idea/ 3 | /src/pylsl.egg-info/ 4 | /build/ 5 | __pycache__ 6 | *.so 7 | *.so.* 8 | *.pyc 9 | *.dll 10 | *.dylib 11 | *.cprof 12 | *.png 13 | /wheelhouse/ 14 | /src/pylsl/include 15 | /src/pylsl/share 16 | .DS_Store 17 | 18 | uv.lock 19 | /src/pylsl/__version__.py 20 | /src/pylsl/lib/lslver.exe 21 | liblsl.zip 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2018 Christian A. Kothe 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | 4 | # If using Python 2.6 or less, then have to include package data, even though 5 | # it's already declared in setup.py 6 | include src/pylsl/lib/liblsl*.* 7 | include src/pylsl/lib/lsl*.* 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pylsl 2 | 3 | ![publish workflow](https://github.com/labstreaminglayer/pylsl/actions/workflows/publish-to-pypi.yml/badge.svg) 4 | [![PyPI version](https://badge.fury.io/py/pylsl.svg)](https://badge.fury.io/py/pylsl) 5 | 6 | This is the Python interface to the [Lab Streaming Layer (LSL)](https://github.com/sccn/labstreaminglayer). 7 | LSL is an overlay network for real-time exchange of time series between applications, 8 | most often used in research environments. LSL has clients for many other languages 9 | and platforms that are compatible with each other. 10 | 11 | Let us know if you encounter any bugs (ideally using the issue tracker on 12 | the GitHub project). 13 | 14 | # Installation 15 | 16 | ## Prerequisites 17 | 18 | On all non-Windows platforms and for some Windows-Python combinations, you must first obtain a liblsl shared library. See the [liblsl repo documentation](https://github.com/sccn/liblsl) for further details. 19 | 20 | ## Get pylsl from PyPI 21 | 22 | * `pip install pylsl` 23 | 24 | ## Get pylsl from source 25 | 26 | This should only be necessary if you need to modify or debug pylsl. 27 | 28 | * Download the pylsl source: `git clone https://github.com/labstreaminglayer/pylsl.git && cd pylsl` 29 | * From the `pylsl` working directory, run `pip install .`. 30 | * Note: You can use `pip install -e .` to install while keeping the files in-place. This is convenient for developing pylsl. 31 | 32 | # Usage 33 | 34 | See the examples in src/pylsl/examples. Note that these can be run directly from the commandline with (e.g.) `python -m pylsl.examples.{name-of-example}`. 35 | 36 | You can get a list of the examples with `python -c "import pylsl.examples; help(pylsl.examples)"` 37 | 38 | ## liblsl loading 39 | 40 | `pylsl` will search for `liblsl` first at the filepath specified by an environment variable named `PYLSL_LIB`, then in the package directory (default location for Windows), then finally in normal system library folders. 41 | 42 | If the shared object is not installed onto a standard search path (or it is but can't be found for some [other bug](https://github.com/labstreaminglayer/pylsl/issues/48)), then we recommend that you copy it to the pylsl installed module path's `lib` subfolder. i.e. `{path/to/env/}site-packages/pylsl/lib`. 43 | 44 | * The `site-packages/pylsl` path will only exist _after_ you install `pylsl` in your Python environment. 45 | * You may have to create the `lib` subfolder. 46 | * Use `python -m site` to find the "site-packages" path. 47 | * Use `cp -L` on platforms that use symlinks. 48 | 49 | Alternatively, you can use an environment variable. Set the `PYLSL_LIB` environment variable to the location of the library or set `LD_LIBRARY_PATH` to the folder containing the library. For example, 50 | 51 | 1. `PYLSL_LIB=/usr/local/lib/liblsl.so python -m pylsl.examples.{name-of-example}`, or 52 | 2. `LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib python -m pylsl.examples.{name-of-example}` 53 | 54 | # For maintainers 55 | 56 | ## Continuous Integration 57 | 58 | pylsl uses continuous integration and distribution. GitHub Actions will upload a new release to pypi whenever a Release is created in GitHub. 59 | Before creating the GitHub release, be sure to bump the version number in `pylsl/version.py` and consider updating the liblsl dependency 60 | in `.github/workflows/publish-to-pypi.yml`. 61 | 62 | ### Linux Binaries Deprecated 63 | 64 | We recently stopped building binary wheels for Linux. In practice, the `manylinux` dependencies were often incompatible with real systems. 65 | 66 | ## Manual Distribution 67 | 68 | 1. Manual way: 69 | 1. `rm -Rf build dist *.egg-info` 70 | 1. `python setup.py sdist bdist_wheel` 71 | 1. Additional steps on Linux: 72 | * `auditwheel repair dist/*.whl -w dist` 73 | * `rm dist/*-linux_x86_64.whl` 74 | 1. `twine upload dist/*` 75 | 1. For conda 76 | 1. build liblsl: `conda build ../liblsl/` 77 | 1. `conda build .` 78 | 79 | # Known Issues with Multithreading on Linux 80 | 81 | * At least for some versions of pylsl, it has been reported that running on Linux one cannot call ``pylsl`` functions from a thread that is not the main thread. This has been reported to cause access violations, and can occur during pulling from an inlet, and also from accessing an inlets info structure in a thread. 82 | * Recent tests with multithreading (especially when safeguarding library calls with locks) using Python 3.7.6. with pylsl 1.14 on Linux Mint 20 suggest that this issue is solved, or at least depends on your machine. See https://github.com/labstreaminglayer/pylsl/issues/29 83 | 84 | # Acknowledgments 85 | 86 | Pylsl was primarily written by Christian Kothe while at Swartz Center for Computational Neuroscience, UCSD. The LSL project was funded by the Army Research Laboratory under Cooperative Agreement Number W911NF-10-2-0022 as well as through NINDS grant 3R01NS047293-06S1. pylsl is maintained primarily by Chadwick Boulay. Thanks for contributions, bug reports, and suggestions go to Bastian Venthur, David Medine, Clemens Brunner, and Matthew Grivich. 87 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pylsl" 3 | description = "Python library for importing XDF (Extensible Data Format)" 4 | authors = [ 5 | { name = "Christian Kothe", email = "christian.kothe@intheon.io" }, 6 | { name = "Chadwick Boulay", email = "chadwick.boulay@gmail.com" } 7 | ] 8 | #license = {file = "LICENSE"} # Bug in setuptools. https://github.com/astral-sh/uv/issues/9513 9 | readme = "README.md" 10 | requires-python = ">=3.9" 11 | dynamic = ["version"] 12 | keywords = [ 13 | "networking", 14 | "LSL", 15 | "Lab Streaming Layer", 16 | "labstreaminglayer", 17 | "data", 18 | "acquisition", 19 | "stream" 20 | ] 21 | classifiers = [ 22 | "Development Status :: 5 - Production/Stable", 23 | "Intended Audience :: Developers", 24 | "Intended Audience :: Science/Research", 25 | "Topic :: System :: Networking", 26 | "Topic :: Scientific/Engineering", 27 | "License :: OSI Approved :: MIT License", 28 | "Operating System :: Microsoft :: Windows", 29 | "Operating System :: POSIX :: Linux", 30 | "Operating System :: MacOS", 31 | "Programming Language :: Python :: 3", 32 | "Programming Language :: Python :: 3.9", 33 | "Programming Language :: Python :: 3.10", 34 | "Programming Language :: Python :: 3.11", 35 | "Programming Language :: Python :: 3.12", 36 | ] 37 | dependencies = [ 38 | "numpy>=1.21,<3", 39 | ] 40 | 41 | [project.urls] 42 | Repository = "https://github.com/labstreaminglayer/pylsl" 43 | Issues = "https://github.com/labstreaminglayer/pylsl/issues" 44 | 45 | [project.optional-dependencies] 46 | examples = [ 47 | "pyqtgraph>=0.13.7", 48 | ] 49 | #Changelog = "https://github.com/labstreaminglayer/pylsl/blob/main/CHANGELOG.md" 50 | 51 | [dependency-groups] 52 | dev = [ 53 | "pytest>=8.3.4", 54 | "ruff>=0.8.2", 55 | ] 56 | 57 | [build-system] 58 | requires = ["setuptools>=64", "setuptools-scm>=8"] 59 | build-backend = "setuptools.build_meta" 60 | 61 | [tool.setuptools] 62 | license-files = [] 63 | 64 | [tool.setuptools_scm] 65 | version_file = "src/pylsl/__version__.py" 66 | 67 | [tool.setuptools.package-data] 68 | pylsl = ["lib/*.dll"] 69 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from setuptools import setup 4 | # from setuptools.dist import Distribution 5 | 6 | 7 | # class BinaryDistribution(Distribution): 8 | # """Distribution which always forces a binary package with platform name""" 9 | # def has_ext_modules(foo): 10 | # return sys.platform.startswith("win") 11 | 12 | try: 13 | from wheel.bdist_wheel import bdist_wheel as _bdist_wheel 14 | 15 | class bdist_wheel(_bdist_wheel): 16 | def finalize_options(self): 17 | super().finalize_options() 18 | self.root_is_pure = not sys.platform.startswith("win") 19 | 20 | def get_tag(self): 21 | python, abi, plat = _bdist_wheel.get_tag(self) 22 | # We don't contain any python source 23 | python, abi = "py2.py3", "none" 24 | return python, abi, plat 25 | except ImportError: 26 | bdist_wheel = None 27 | 28 | 29 | setup( 30 | # distclass=BinaryDistribution, 31 | cmdclass={"bdist_wheel": bdist_wheel}, 32 | ) 33 | -------------------------------------------------------------------------------- /src/pylsl/__init__.py: -------------------------------------------------------------------------------- 1 | """Python API for the lab streaming layer. 2 | 3 | The lab streaming layer provides a set of functions to make instrument data 4 | accessible in real time within a lab network. From there, streams can be 5 | picked up by recording programs, viewing programs or custom experiment 6 | applications that access data streams in real time. 7 | 8 | The API covers two areas: 9 | - The "push API" allows to create stream outlets and to push data (regular 10 | or irregular measurement time series, event data, coded audio/video frames, 11 | etc.) into them. 12 | - The "pull API" allows to create stream inlets and read time-synched 13 | experiment data from them (for recording, viewing or experiment control). 14 | 15 | """ 16 | 17 | from .__version__ import __version__ as __version__ 18 | from .resolve import ContinuousResolver as ContinuousResolver 19 | from .resolve import resolve_streams as resolve_streams 20 | from .resolve import resolve_bypred as resolve_bypred 21 | from .resolve import resolve_byprop as resolve_byprop 22 | from .info import StreamInfo as StreamInfo 23 | from .inlet import StreamInlet as StreamInlet 24 | from .outlet import StreamOutlet as StreamOutlet 25 | from .util import IRREGULAR_RATE as IRREGULAR_RATE 26 | from .util import FOREVER as FOREVER 27 | from .util import proc_none as proc_none 28 | from .util import proc_clocksync as proc_clocksync 29 | from .util import proc_dejitter as proc_dejitter 30 | from .util import proc_monotonize as proc_monotonize 31 | from .util import proc_threadsafe as proc_threadsafe 32 | from .util import proc_ALL as proc_ALL 33 | from .util import protocol_version as protocol_version 34 | from .util import library_version as library_version 35 | from .util import library_info as library_info 36 | from .util import local_clock as local_clock 37 | from .lib import cf_int8 as cf_int8 38 | from .lib import cf_int16 as cf_int16 39 | from .lib import cf_int32 as cf_int32 40 | from .lib import cf_int64 as cf_int64 41 | from .lib import cf_float32 as cf_float32 42 | from .lib import cf_double64 as cf_double64 43 | from .lib import cf_string as cf_string 44 | -------------------------------------------------------------------------------- /src/pylsl/examples/GetTimeCorrection.py: -------------------------------------------------------------------------------- 1 | """Example program to show how to read a multi-channel time series from LSL.""" 2 | 3 | import time 4 | 5 | from pylsl import StreamInlet, resolve_byprop 6 | 7 | 8 | def main(): 9 | # first resolve an EEG stream on the lab network 10 | print("looking for an EEG stream...") 11 | streams = resolve_byprop("type", "EEG") 12 | info = streams[0] 13 | 14 | # create a new inlet to read from the stream 15 | inlet = StreamInlet(info) 16 | 17 | print("Connected to outlet " + info.name() + "@" + info.hostname()) 18 | while True: 19 | offset = inlet.time_correction() 20 | print("Offset: " + str(offset)) 21 | time.sleep(1) 22 | 23 | 24 | if __name__ == "__main__": 25 | main() 26 | -------------------------------------------------------------------------------- /src/pylsl/examples/HandleMetadata.py: -------------------------------------------------------------------------------- 1 | """Example program that shows how to attach meta-data to a stream, and how to 2 | later on retrieve the meta-data again at the receiver side.""" 3 | 4 | import time 5 | 6 | import numpy as np 7 | from pylsl import StreamInfo, StreamInlet, StreamOutlet, resolve_byprop 8 | 9 | 10 | def main(): 11 | # create a new StreamInfo object which shall describe our stream 12 | info = StreamInfo("MetaTester", "EEG", 8, 100, "float32", "myuid56872") 13 | 14 | # now attach some meta-data (in accordance with XDF format, 15 | # see also https://github.com/sccn/xdf/wiki/Meta-Data) 16 | chns = info.desc().append_child("channels") 17 | ch_labels = ["C3", "C4", "Cz", "FPz", "POz", "CPz", "O1", "O2"] 18 | for label in ch_labels: 19 | ch = chns.append_child("channel") 20 | ch.append_child_value("label", label) 21 | ch.append_child_value("unit", "microvolts") 22 | ch.append_child_value("type", "EEG") 23 | info.desc().append_child_value("manufacturer", "SCCN") 24 | cap = info.desc().append_child("cap") 25 | cap.append_child_value("name", "EasyCap") 26 | cap.append_child_value("size", "54") 27 | cap.append_child_value("labelscheme", "10-20") 28 | 29 | # create outlet for the stream 30 | outlet = StreamOutlet(info) 31 | 32 | # Send a sample into the outlet... 33 | dummy_sample = np.arange(len(ch_labels), dtype=np.float32) 34 | outlet.push_sample(dummy_sample) 35 | 36 | # === the following could run on another computer === 37 | 38 | # first we resolve a stream whose name is MetaTester (note that there are 39 | # other ways to query a stream, too - for instance by content-type) 40 | results = resolve_byprop("name", "MetaTester") 41 | 42 | # open an inlet so we can read the stream's data (and meta-data) 43 | inlet = StreamInlet(results[0]) 44 | 45 | # get the full stream info (including custom meta-data) and dissect it 46 | info = inlet.info() 47 | print("The stream's XML meta-data is: ") 48 | print(info.as_xml()) 49 | print("The manufacturer is: %s" % info.desc().child_value("manufacturer")) 50 | print("Cap circumference is: %s" % info.desc().child("cap").child_value("size")) 51 | print("The channel labels are as follows:") 52 | ch = info.desc().child("channels").child("channel") 53 | for k in range(info.channel_count()): 54 | print(" " + ch.child_value("label")) 55 | ch = ch.next_sibling() 56 | 57 | time.sleep(3) 58 | 59 | 60 | if __name__ == "__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /src/pylsl/examples/PerformanceTest.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | 4 | import numpy as np 5 | 6 | from pylsl import ( 7 | StreamInfo, 8 | StreamInlet, 9 | StreamOutlet, 10 | local_clock, 11 | proc_clocksync, 12 | proc_dejitter, 13 | proc_monotonize, 14 | resolve_bypred, 15 | resolve_byprop, 16 | ) 17 | 18 | try: 19 | from pyfftw.interfaces.numpy_fft import irfft 20 | # Performs much better than numpy's fftpack 21 | except ImportError: 22 | from numpy.fft import irfft 23 | try: 24 | import sys 25 | 26 | import pyqtgraph as pg 27 | 28 | haspyqtgraph = True 29 | except ImportError: 30 | haspyqtgraph = False 31 | 32 | 33 | # The code for pink noise generation is taken from 34 | # https://github.com/python-acoustics/python-acoustics/blob/master/acoustics/generator.py 35 | # which is distributed under the BSD license. 36 | def ms(x): 37 | """Mean value of signal `x` squared. 38 | :param x: Dynamic quantity. 39 | :returns: Mean squared of `x`. 40 | """ 41 | return (np.abs(x) ** 2.0).mean() 42 | 43 | 44 | def normalize(y, x=None): 45 | """normalize power in y to a (standard normal) white noise signal. 46 | Optionally normalize to power in signal `x`. 47 | #The mean power of a Gaussian with :math:`\\mu=0` and :math:`\\sigma=1` is 1. 48 | """ 49 | # return y * np.sqrt( (np.abs(x)**2.0).mean() / (np.abs(y)**2.0).mean() ) 50 | if x is not None: 51 | x = ms(x) 52 | else: 53 | x = 1.0 54 | return y * np.sqrt(x / ms(y)) 55 | # return y * np.sqrt( 1.0 / (np.abs(y)**2.0).mean() ) 56 | 57 | 58 | def pink(N): 59 | """ 60 | Pink noise. 61 | 62 | :param N: Amount of samples. 63 | 64 | Pink noise has equal power in bands that are proportionally wide. 65 | Power density decreases with 3 dB per octave. 66 | 67 | """ 68 | # This method uses the filter with the following coefficients. 69 | # b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786]) 70 | # a = np.array([1, -2.494956002, 2.017265875, -0.522189400]) 71 | # return lfilter(B, A, np.random.randn(N)) 72 | # Another way would be using the FFT 73 | # x = np.random.randn(N) 74 | # X = rfft(x) / N 75 | uneven = N % 2 76 | X = np.random.randn(N // 2 + 1 + uneven) + 1j * np.random.randn(N // 2 + 1 + uneven) 77 | S = np.sqrt(np.arange(len(X)) + 1.0) # +1 to avoid divide by zero 78 | y = (irfft(X / S)).real 79 | if uneven: 80 | y = y[:-1] 81 | return normalize(y) 82 | 83 | 84 | class PinkNoiseGenerator(object): 85 | def __init__(self, nSampsPerBlock=1024): 86 | self.N = nSampsPerBlock 87 | self.uneven = self.N % 2 88 | lenX = self.N // 2 + 1 + self.uneven 89 | self.S = np.sqrt(np.arange(lenX) + 1.0) 90 | 91 | def generate(self): 92 | X = np.random.randn(self.N // 2 + 1 + self.uneven) + 1j * np.random.randn( 93 | self.N // 2 + 1 + self.uneven 94 | ) 95 | y = (irfft(X / self.S)).real 96 | if self.uneven: 97 | y = y[:-1] 98 | return normalize(y) 99 | 100 | 101 | class BetaGeneratorOutlet(object): 102 | def __init__( 103 | self, 104 | Fs=2**14, 105 | FreqBeta=20.0, 106 | AmpBeta=100.0, 107 | AmpNoise=20.0, 108 | NCyclesPerChunk=4, 109 | channels=["RAW1", "SPK1", "RAW2", "SPK2", "RAW3", "SPK3"], 110 | ): 111 | """ 112 | :param Fs: Sampling rate 113 | :param FreqBeta: Central frequency of beta band 114 | :param AmpBeta: Amplitude of beta (uV) 115 | :param AmpNoise: Amplitude of pink noise (uV) 116 | :param NCyclesPerChunk: Minimum number of cycles of beta in a chunk. 117 | :param channels: List of channel names 118 | """ 119 | # Saved arguments 120 | self.FreqBeta = FreqBeta 121 | self.AmpBeta = AmpBeta # Amplitude of Beta (uV) 122 | self.AmpNoise = AmpNoise # Amplitude of pink noise 123 | self.channels = channels 124 | # Derived variables 125 | chunk_dur = NCyclesPerChunk / self.FreqBeta # Duration, in sec, of one chunk 126 | chunk_len = int(Fs * chunk_dur) # Number of samples in a chunk 127 | self.tvec = 1.0 * (np.arange(chunk_len) + 1) / Fs # time vector for chunk (sec) 128 | # Pink noise generator 129 | self.pinkNoiseGen = PinkNoiseGenerator(nSampsPerBlock=chunk_len) 130 | 131 | # Create a stream of fake 'raw' data 132 | raw_info = StreamInfo( 133 | name="BetaGen", 134 | type="EEG", 135 | channel_count=len(self.channels), 136 | nominal_srate=Fs, 137 | channel_format="float32", 138 | source_id="betagen1234", 139 | ) 140 | raw_xml = raw_info.desc() 141 | chans = raw_xml.append_child("channels") 142 | for channame in self.channels: 143 | chn = chans.append_child("channel") 144 | chn.append_child_value("label", channame) 145 | chn.append_child_value("unit", "microvolts") 146 | chn.append_child_value("type", "generated") 147 | self.eeg_outlet = StreamOutlet(raw_info) 148 | print("Created outlet with name BetaGen and type EEG") 149 | 150 | self.last_time = local_clock() 151 | 152 | def update(self, task={"phase": "precue", "class": 1}): 153 | # Convert phase and class_id into beta_amp 154 | if task["phase"] in ["cue", "go"]: 155 | beta_amp = 0 if task["class"] == 3 else self.AmpBeta 156 | else: 157 | beta_amp = self.AmpBeta / 5.0 158 | 159 | this_tvec = self.tvec + self.last_time # Sample times 160 | # Put the signal together 161 | this_sig = self.AmpNoise * np.asarray( 162 | self.pinkNoiseGen.generate(), dtype=np.float32 163 | ) # Start with some pink noise 164 | this_sig += beta_amp * np.sin( 165 | this_tvec * 2 * np.pi * self.FreqBeta 166 | ) # Add our beta signal 167 | this_sig = np.atleast_2d(this_sig).T * np.ones( 168 | (1, len(self.channels)), dtype=np.float32 169 | ) # Tile across channels 170 | 171 | time_to_sleep = max(0, this_tvec[-1] - local_clock()) 172 | time.sleep(time_to_sleep) 173 | 174 | print( 175 | "Beta outlet pushing signal with shape {},{} and Beta amp {}".format( 176 | this_sig.shape[0], this_sig.shape[1], beta_amp 177 | ) 178 | ) 179 | self.eeg_outlet.push_chunk(this_sig, timestamp=this_tvec[-1]) 180 | 181 | self.last_time = local_clock() 182 | 183 | 184 | class BetaInlet(object): 185 | def __init__(self): 186 | print("looking for an EEG stream...") 187 | streams = resolve_byprop("type", "EEG") 188 | 189 | # create a new inlet to read from the stream 190 | proc_flags = proc_clocksync | proc_dejitter | proc_monotonize 191 | self.inlet = StreamInlet(streams[0], processing_flags=proc_flags) 192 | 193 | # The following is an example of how to read stream info 194 | stream_info = self.inlet.info() 195 | stream_Fs = stream_info.nominal_srate() 196 | stream_xml = stream_info.desc() 197 | chans_xml = stream_xml.child("channels") 198 | chan_xml_list = [] 199 | ch = chans_xml.child("channel") 200 | while ch.name() == "channel": 201 | chan_xml_list.append(ch) 202 | ch = ch.next_sibling("channel") 203 | self.channel_names = [ch_xml.child_value("label") for ch_xml in chan_xml_list] 204 | print( 205 | "Reading from inlet named {} with channels {} sending data at {} Hz".format( 206 | stream_info.name(), self.channel_names, stream_Fs 207 | ) 208 | ) 209 | 210 | def update(self): 211 | max_samps = 3276 * 2 212 | data = np.nan * np.ones((max_samps, len(self.channel_names)), dtype=np.float32) 213 | _, timestamps = self.inlet.pull_chunk(max_samples=max_samps, dest_obj=data) 214 | data = data[: len(timestamps), :] 215 | print("Beta inlet retrieved {} samples.".format(len(timestamps))) 216 | return data, np.asarray(timestamps) 217 | 218 | 219 | class MarkersGeneratorOutlet(object): 220 | phases = { 221 | "precue": {"next": "cue", "duration": 1.0}, 222 | "cue": {"next": "go", "duration": 0.5}, 223 | "go": {"next": "evaluate", "duration": 5.0}, 224 | "evaluate": {"next": "precue", "duration": 0.1}, 225 | } 226 | 227 | def __init__( 228 | self, 229 | class_list=[1, 3], 230 | classes_rand=True, 231 | target_list=[1, 2], 232 | targets_rand=True, 233 | ): 234 | """ 235 | 236 | :param class_list: A list of integers comprising different class ids. Default: [1, 3] 237 | :param classes_rand: If True, classes are chosen randomly from list. If False, the list is cycled. Default: True 238 | :param target_list: A list of integers comprising different target ids. Default: [1, 2] 239 | :param targets_rand: If True, targets are chosen randomly from list. If False, the list is cycled. Default: True 240 | """ 241 | stream_name = "GeneratedCentreOutMarkers" 242 | stream_type = "Markers" 243 | outlet_info = StreamInfo( 244 | name=stream_name, 245 | type=stream_type, 246 | channel_count=1, 247 | nominal_srate=0, 248 | channel_format="string", 249 | source_id="centreoutmarkergen1234", 250 | ) 251 | outlet_xml = outlet_info.desc() 252 | channels_xml = outlet_xml.append_child("channels") 253 | chan_xml = channels_xml.append_child("channel") 254 | chan_xml.append_child_value("label", "EventMarkers") 255 | chan_xml.append_child_value("type", "generated") 256 | self.outlet = StreamOutlet(outlet_info) 257 | print( 258 | "Created outlet with name {} and type {}".format(stream_name, stream_type) 259 | ) 260 | 261 | self.class_list = class_list 262 | self.classes_rand = classes_rand 263 | self.target_list = target_list 264 | self.targets_rand = targets_rand 265 | self.next_transition = -1 266 | self.in_phase = "evaluate" 267 | self.trial_ix = 0 268 | self.class_id = self.class_list[0] 269 | self.target_id = self.target_list[0] 270 | 271 | def update(self): 272 | now = local_clock() 273 | if now > self.next_transition: 274 | # Transition phase 275 | self.in_phase = self.phases[self.in_phase]["next"] 276 | self.next_transition = now + self.phases[self.in_phase]["duration"] 277 | 278 | # Send markers 279 | out_string = "undefined" 280 | if self.in_phase == "precue": 281 | # transition from evaluate to precue 282 | # print("Previous class_id: {}, target_id: {}".format(self.class_id, self.target_id)) 283 | self.trial_ix += 1 284 | self.target_id = ( 285 | random.choice(self.target_list) 286 | if self.targets_rand 287 | else self.target_list[ 288 | (self.target_list.index(self.target_id) + 1) 289 | % len(self.target_list) 290 | ] 291 | ) 292 | self.class_id = ( 293 | random.choice(self.class_list) 294 | if self.classes_rand 295 | else self.class_list[ 296 | (self.class_list.index(self.class_id) + 1) 297 | % len(self.class_list) 298 | ] 299 | ) 300 | # print("New class_id: {}, target_id: {}".format(self.class_id, self.target_id)) 301 | out_string = "NewTrial {}, Class {}, Target {}".format( 302 | self.trial_ix, self.class_id, self.target_id 303 | ) 304 | elif self.in_phase == "cue": 305 | # transition from precue to cue 306 | out_string = "TargetCue, Class {}, Target {}".format( 307 | self.class_id, self.target_id 308 | ) 309 | elif self.in_phase == "go": 310 | # transition from cue to go 311 | out_string = "GoCue, Class {}, Target {}".format( 312 | self.class_id, self.target_id 313 | ) 314 | elif self.in_phase == "evaluate": 315 | # transition from go to evaluate 316 | hit_string = "Hit" if random.randint(0, 1) == 1 else "Miss" 317 | out_string = "{}, Class {}, Target {}".format( 318 | hit_string, self.class_id, self.target_id 319 | ) 320 | print("Marker outlet pushing string: {}".format(out_string)) 321 | self.outlet.push_sample( 322 | [ 323 | out_string, 324 | ] 325 | ) 326 | 327 | return True 328 | return False 329 | 330 | 331 | class MarkerInlet(object): 332 | def __init__(self): 333 | self.task = {"phase": "precue", "class": 1, "target": 1} 334 | print("Looking for stream with type Markers") 335 | streams = resolve_bypred("type='Markers'", minimum=1) 336 | proc_flags = 0 # Marker events are relatively rare. No need to post-process. 337 | self.inlet = StreamInlet(streams[0], processing_flags=proc_flags) 338 | # The following is an example of how to read stream info 339 | stream_info = self.inlet.info() 340 | # stream_Fs = stream_info.nominal_srate() 341 | stream_xml = stream_info.desc() 342 | chans_xml = stream_xml.child("channels") 343 | chan_xml_list = [] 344 | ch = chans_xml.child("channel") 345 | while ch.name() == "channel": 346 | chan_xml_list.append(ch) 347 | ch = ch.next_sibling("channel") 348 | stream_ch_names = [ch_xml.child_value("label") for ch_xml in chan_xml_list] 349 | print( 350 | "Reading from inlet named {} with channels {}".format( 351 | stream_info.name(), stream_ch_names 352 | ) 353 | ) 354 | 355 | def update(self): 356 | marker_samples, marker_timestamps = self.inlet.pull_chunk(timeout=0.0) 357 | if marker_timestamps: 358 | [phase_str, class_str, targ_str] = marker_samples[-1][0].split(", ") 359 | if phase_str in ["TargetCue"]: 360 | self.task["phase"] = "cue" 361 | elif phase_str in ["GoCue"]: 362 | self.task["phase"] = "go" 363 | elif phase_str in ["Miss", "Hit"]: 364 | self.task["phase"] = "evaluate" 365 | elif phase_str[:8] == "NewTrial": 366 | self.task["phase"] = "precue" 367 | else: 368 | print(phase_str) 369 | self.task["class"] = int(class_str.split(" ")[1]) 370 | self.task["target"] = int(targ_str.split(" ")[1]) 371 | print("Marker inlet updated with task {}".format(self.task)) 372 | 373 | 374 | betaGen = BetaGeneratorOutlet() 375 | markerGen = MarkersGeneratorOutlet() 376 | betaIn = BetaInlet() 377 | markerIn = MarkerInlet() 378 | 379 | if haspyqtgraph: 380 | qapp = pg.QtGui.QApplication(sys.argv) 381 | qwindow = pg.plot() 382 | qwindow.clear() 383 | qwindow.parent().setWindowTitle("pylsl PerformanceTest") 384 | 385 | 386 | def update(): 387 | markerGen.update() 388 | markerIn.update() 389 | betaGen.update(task=markerIn.task) # Rate-limiting step. Will time.sleep as needed. 390 | signal, tvec = betaIn.update() 391 | 392 | if haspyqtgraph: 393 | plot = qwindow.getPlotItem() 394 | graphs = plot.listDataItems() 395 | if not graphs: 396 | # create graphs 397 | for i in range(signal.shape[1]): 398 | plot.plot(tvec, signal[:, i]) 399 | else: 400 | # update graphs 401 | for i in range(signal.shape[1]): 402 | graphs[i].setData(signal[:, i], x=tvec) 403 | 404 | 405 | if __name__ == "__main__": 406 | """ 407 | python3 -m cProfile -o pylsl.cprof PerformanceTest.py 408 | gprof2dot -f pstats pylsl.cprof | dot -Tpng -o pylsl_prof.png 409 | """ 410 | try: 411 | if haspyqtgraph: 412 | timer = pg.QtCore.QTimer() 413 | timer.timeout.connect(update) 414 | timer.start(1) # Delay not needed because update has time.sleep 415 | if (sys.flags.interactive != 1) or not hasattr(pg.QtCore, "PYQT_VERSION"): 416 | sys.exit(pg.QtGui.QApplication.instance().exec_()) 417 | else: 418 | while True: 419 | update() 420 | 421 | except KeyboardInterrupt: 422 | # No cleanup necessary? 423 | pass 424 | -------------------------------------------------------------------------------- /src/pylsl/examples/README.md: -------------------------------------------------------------------------------- 1 | If pylsl was installed with `pip install pylsl` (recommended), then you can run each of these examples with `python -m pylsl.examples.name_of_example`. 2 | 3 | There are few other noteworthy uses of pylsl in the wild. These might give you some inspiration or direct examples of how to use pylsl in your project. 4 | 5 | * [mne-realtime examples](https://github.com/mne-tools/mne-realtime/tree/master/examples) 6 | * [NeuroPype](https://www.neuropype.io/) 7 | * [Sig-Visualizer](https://github.com/labstreaminglayer/App-SigVisualizer) 8 | * [Pupil-Labs has 2 different pylsl uses](https://github.com/labstreaminglayer/App-PupilLabs) 9 | * [@agricolab's pyliesl](https://github.com/pyreiz/pyliesl) makes use of some less-common features of LSL and XDF. 10 | * [muse-lsl](https://github.com/alexandrebarachant/muse-lsl) 11 | -------------------------------------------------------------------------------- /src/pylsl/examples/ReceiveAndPlot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | ReceiveAndPlot example for LSL 4 | 5 | This example shows data from all found outlets in realtime. 6 | It illustrates the following use cases: 7 | - efficiently pulling data, re-using buffers 8 | - automatically discarding older samples 9 | - online postprocessing 10 | """ 11 | 12 | import math 13 | from typing import List 14 | 15 | import numpy as np 16 | import pyqtgraph as pg 17 | from pyqtgraph.Qt import QtCore, QtGui 18 | 19 | import pylsl 20 | 21 | # Basic parameters for the plotting window 22 | plot_duration = 5 # how many seconds of data to show 23 | update_interval = 60 # ms between screen updates 24 | pull_interval = 500 # ms between each pull operation 25 | 26 | 27 | class Inlet: 28 | """Base class to represent a plottable inlet""" 29 | 30 | def __init__(self, info: pylsl.StreamInfo): 31 | # create an inlet and connect it to the outlet we found earlier. 32 | # max_buflen is set so data older the plot_duration is discarded 33 | # automatically and we only pull data new enough to show it 34 | 35 | # Also, perform online clock synchronization so all streams are in the 36 | # same time domain as the local lsl_clock() 37 | # (see https://labstreaminglayer.readthedocs.io/projects/liblsl/ref/enums.html#_CPPv414proc_clocksync) 38 | # and dejitter timestamps 39 | self.inlet = pylsl.StreamInlet( 40 | info, 41 | max_buflen=plot_duration, 42 | processing_flags=pylsl.proc_clocksync | pylsl.proc_dejitter, 43 | ) 44 | # store the name and channel count 45 | self.name = info.name() 46 | self.channel_count = info.channel_count() 47 | 48 | def pull_and_plot(self, plot_time: float, plt: pg.PlotItem): 49 | """Pull data from the inlet and add it to the plot. 50 | :param plot_time: lowest timestamp that's still visible in the plot 51 | :param plt: the plot the data should be shown on 52 | """ 53 | # We don't know what to do with a generic inlet, so we skip it. 54 | pass 55 | 56 | 57 | class DataInlet(Inlet): 58 | """A DataInlet represents an inlet with continuous, multi-channel data that 59 | should be plotted as multiple lines.""" 60 | 61 | dtypes = [[], np.float32, np.float64, None, np.int32, np.int16, np.int8, np.int64] 62 | 63 | def __init__(self, info: pylsl.StreamInfo, plt: pg.PlotItem): 64 | super().__init__(info) 65 | # calculate the size for our buffer, i.e. two times the displayed data 66 | bufsize = ( 67 | 2 * math.ceil(info.nominal_srate() * plot_duration), 68 | info.channel_count(), 69 | ) 70 | self.buffer = np.empty(bufsize, dtype=self.dtypes[info.channel_format()]) 71 | empty = np.array([]) 72 | # create one curve object for each channel/line that will handle displaying the data 73 | self.curves = [ 74 | pg.PlotCurveItem(x=empty, y=empty, autoDownsample=True) 75 | for _ in range(self.channel_count) 76 | ] 77 | for curve in self.curves: 78 | plt.addItem(curve) 79 | 80 | def pull_and_plot(self, plot_time, plt): 81 | # pull the data 82 | _, ts = self.inlet.pull_chunk( 83 | timeout=0.0, max_samples=self.buffer.shape[0], dest_obj=self.buffer 84 | ) 85 | # ts will be empty if no samples were pulled, a list of timestamps otherwise 86 | if ts: 87 | ts = np.asarray(ts) 88 | y = self.buffer[0 : ts.size, :] 89 | this_x = None 90 | old_offset = 0 91 | new_offset = 0 92 | for ch_ix in range(self.channel_count): 93 | # we don't pull an entire screen's worth of data, so we have to 94 | # trim the old data and append the new data to it 95 | old_x, old_y = self.curves[ch_ix].getData() 96 | # the timestamps are identical for all channels, so we need to do 97 | # this calculation only once 98 | if ch_ix == 0: 99 | # find the index of the first sample that's still visible, 100 | # i.e. newer than the left border of the plot 101 | old_offset = old_x.searchsorted(plot_time) 102 | # same for the new data, in case we pulled more data than 103 | # can be shown at once 104 | new_offset = ts.searchsorted(plot_time) 105 | # append new timestamps to the trimmed old timestamps 106 | this_x = np.hstack((old_x[old_offset:], ts[new_offset:])) 107 | # append new data to the trimmed old data 108 | this_y = np.hstack((old_y[old_offset:], y[new_offset:, ch_ix] - ch_ix)) 109 | # replace the old data 110 | self.curves[ch_ix].setData(this_x, this_y) 111 | 112 | 113 | class MarkerInlet(Inlet): 114 | """A MarkerInlet shows events that happen sporadically as vertical lines""" 115 | 116 | def __init__(self, info: pylsl.StreamInfo): 117 | super().__init__(info) 118 | 119 | def pull_and_plot(self, plot_time, plt): 120 | # TODO: purge old markers 121 | strings, timestamps = self.inlet.pull_chunk(0) 122 | if timestamps: 123 | for string, ts in zip(strings, timestamps): 124 | plt.addItem( 125 | pg.InfiniteLine(ts, angle=90, movable=False, label=string[0]) 126 | ) 127 | 128 | 129 | def main(): 130 | # firstly resolve all streams that could be shown 131 | inlets: List[Inlet] = [] 132 | print("looking for streams") 133 | streams = pylsl.resolve_streams() 134 | 135 | # Create the pyqtgraph window 136 | pw = pg.plot(title="LSL Plot") 137 | plt = pw.getPlotItem() 138 | plt.enableAutoRange(x=False, y=True) 139 | 140 | # iterate over found streams, creating specialized inlet objects that will 141 | # handle plotting the data 142 | for info in streams: 143 | if info.type() == "Markers": 144 | if ( 145 | info.nominal_srate() != pylsl.IRREGULAR_RATE 146 | or info.channel_format() != pylsl.cf_string 147 | ): 148 | print("Invalid marker stream " + info.name()) 149 | print("Adding marker inlet: " + info.name()) 150 | inlets.append(MarkerInlet(info)) 151 | elif ( 152 | info.nominal_srate() != pylsl.IRREGULAR_RATE 153 | and info.channel_format() != pylsl.cf_string 154 | ): 155 | print("Adding data inlet: " + info.name()) 156 | inlets.append(DataInlet(info, plt)) 157 | else: 158 | print("Don't know what to do with stream " + info.name()) 159 | 160 | def scroll(): 161 | """Move the view so the data appears to scroll""" 162 | # We show data only up to a timepoint shortly before the current time 163 | # so new data doesn't suddenly appear in the middle of the plot 164 | fudge_factor = pull_interval * 0.002 165 | plot_time = pylsl.local_clock() 166 | pw.setXRange(plot_time - plot_duration + fudge_factor, plot_time - fudge_factor) 167 | 168 | def update(): 169 | # Read data from the inlet. Use a timeout of 0.0 so we don't block GUI interaction. 170 | mintime = pylsl.local_clock() - plot_duration 171 | # call pull_and_plot for each inlet. 172 | # Special handling of inlet types (markers, continuous data) is done in 173 | # the different inlet classes. 174 | for inlet in inlets: 175 | inlet.pull_and_plot(mintime, plt) 176 | 177 | # create a timer that will move the view every update_interval ms 178 | update_timer = QtCore.QTimer() 179 | update_timer.timeout.connect(scroll) 180 | update_timer.start(update_interval) 181 | 182 | # create a timer that will pull and add new data occasionally 183 | pull_timer = QtCore.QTimer() 184 | pull_timer.timeout.connect(update) 185 | pull_timer.start(pull_interval) 186 | 187 | import sys 188 | 189 | # Start Qt event loop unless running in interactive mode or using pyside. 190 | if (sys.flags.interactive != 1) or not hasattr(QtCore, "PYQT_VERSION"): 191 | QtGui.QGuiApplication.instance().exec_() 192 | 193 | 194 | if __name__ == "__main__": 195 | main() 196 | -------------------------------------------------------------------------------- /src/pylsl/examples/ReceiveData.py: -------------------------------------------------------------------------------- 1 | """Example program to show how to read a multi-channel time series from LSL.""" 2 | 3 | from pylsl import StreamInlet, resolve_byprop 4 | 5 | 6 | def main(): 7 | # first resolve an EEG stream on the lab network 8 | print("looking for an EEG stream...") 9 | streams = resolve_byprop("type", "EEG") 10 | 11 | # create a new inlet to read from the stream 12 | inlet = StreamInlet(streams[0]) 13 | 14 | while True: 15 | # get a new sample (you can also omit the timestamp part if you're not 16 | # interested in it) 17 | sample, timestamp = inlet.pull_sample() 18 | print(timestamp, sample) 19 | 20 | 21 | if __name__ == "__main__": 22 | main() 23 | -------------------------------------------------------------------------------- /src/pylsl/examples/ReceiveDataInChunks.py: -------------------------------------------------------------------------------- 1 | """Example program to demonstrate how to read a multi-channel time-series 2 | from LSL in a chunk-by-chunk manner (which is more efficient).""" 3 | 4 | from pylsl import StreamInlet, resolve_byprop 5 | 6 | 7 | def main(): 8 | # first resolve an EEG stream on the lab network 9 | print("looking for an EEG stream...") 10 | streams = resolve_byprop("type", "EEG") 11 | 12 | # create a new inlet to read from the stream 13 | inlet = StreamInlet(streams[0]) 14 | 15 | while True: 16 | # get a new sample (you can also omit the timestamp part if you're not 17 | # interested in it) 18 | chunk, timestamps = inlet.pull_chunk() 19 | if timestamps: 20 | print(timestamps, chunk) 21 | 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /src/pylsl/examples/ReceiveStringMarkers.py: -------------------------------------------------------------------------------- 1 | """Example program to demonstrate how to read string-valued markers from LSL.""" 2 | 3 | from pylsl import StreamInlet, resolve_byprop 4 | 5 | 6 | def main(): 7 | # first resolve a marker stream on the lab network 8 | print("looking for a marker stream...") 9 | streams = resolve_byprop("type", "Markers") 10 | 11 | # create a new inlet to read from the stream 12 | inlet = StreamInlet(streams[0]) 13 | 14 | while True: 15 | # get a new sample (you can also omit the timestamp part if you're not 16 | # interested in it) 17 | sample, timestamp = inlet.pull_sample() 18 | print("got %s at time %s" % (sample[0], timestamp)) 19 | 20 | 21 | if __name__ == "__main__": 22 | main() 23 | -------------------------------------------------------------------------------- /src/pylsl/examples/SendData.py: -------------------------------------------------------------------------------- 1 | """Example program to demonstrate how to send a multi-channel time series to 2 | LSL.""" 3 | 4 | import getopt 5 | import sys 6 | import time 7 | from random import random as rand 8 | 9 | from pylsl import StreamInfo, StreamOutlet, local_clock 10 | 11 | 12 | def main(argv): 13 | srate = 100 14 | name = "BioSemi" 15 | type = "EEG" 16 | n_channels = 8 17 | help_string = "SendData.py -s -n -t " 18 | try: 19 | opts, args = getopt.getopt( 20 | argv, "hs:c:n:t:", longopts=["srate=", "channels=", "name=", "type"] 21 | ) 22 | except getopt.GetoptError: 23 | print(help_string) 24 | sys.exit(2) 25 | for opt, arg in opts: 26 | if opt == "-h": 27 | print(help_string) 28 | sys.exit() 29 | elif opt in ("-s", "--srate"): 30 | srate = float(arg) 31 | elif opt in ("-c", "--channels"): 32 | n_channels = int(arg) 33 | elif opt in ("-n", "--name"): 34 | name = arg 35 | elif opt in ("-t", "--type"): 36 | type = arg 37 | 38 | # first create a new stream info (here we set the name to BioSemi, 39 | # the content-type to EEG, 8 channels, 100 Hz, and float-valued data) The 40 | # last value would be the serial number of the device or some other more or 41 | # less locally unique identifier for the stream as far as available (you 42 | # could also omit it but interrupted connections wouldn't auto-recover) 43 | info = StreamInfo(name, type, n_channels, srate, "float32", "myuid34234") 44 | 45 | # next make an outlet 46 | outlet = StreamOutlet(info) 47 | 48 | print("now sending data...") 49 | start_time = local_clock() 50 | sent_samples = 0 51 | while True: 52 | elapsed_time = local_clock() - start_time 53 | required_samples = int(srate * elapsed_time) - sent_samples 54 | for sample_ix in range(required_samples): 55 | # make a new random n_channels sample; this is converted into a 56 | # pylsl.vectorf (the data type that is expected by push_sample) 57 | mysample = [rand() for _ in range(n_channels)] 58 | # now send it 59 | outlet.push_sample(mysample) 60 | sent_samples += required_samples 61 | # now send it and wait for a bit before trying again. 62 | time.sleep(0.01) 63 | 64 | 65 | if __name__ == "__main__": 66 | main(sys.argv[1:]) 67 | -------------------------------------------------------------------------------- /src/pylsl/examples/SendDataAdvanced.py: -------------------------------------------------------------------------------- 1 | """Example program to demonstrate how to send a multi-channel time-series 2 | with proper meta-data to LSL.""" 3 | 4 | import argparse 5 | import time 6 | from random import random as rand 7 | 8 | import pylsl 9 | 10 | 11 | def main(name="LSLExampleAmp", stream_type="EEG", srate=100): 12 | channel_names = ["Fp1", "Fp2", "C3", "C4", "Cz", "P3", "P4", "Pz", "O1", "O2"] 13 | channel_locations = [ 14 | [-0.0307, 0.0949, -0.0047], 15 | [0.0307, 0.0949, -0.0047], 16 | [-0.0742, 4.54343962e-18, 0.0668], 17 | [0.0743, 4.54956286e-18, 0.0669], 18 | [0, 6.123234e-18, 0.1], 19 | [-0.0567, -0.0677, 0.0469], 20 | [0.0566, -0.0677, 0.0469], 21 | [8.74397815e-18, -0.0714, 0.0699], 22 | [-0.0307, -0.0949, -0.0047], 23 | [0.0307, -0.0949, -0.0047], 24 | ] 25 | n_channels = len(channel_names) 26 | 27 | # First create a new stream info. 28 | # The first 4 arguments are stream name, stream type, number of channels, and 29 | # sampling rate -- all parameterized by the keyword arguments or the channel list above. 30 | # The 5th parameter is the data format. This should match the origin format (unless the 31 | # data will be transformed prior to pushing, then it should match the transformed-to format). 32 | # Possible values are "float32", "double64", "string", "int32", "int16", "int8", or "int64". 33 | # Alternatively, one could use the constants in the pylsl namespace beginning with `cf_`. 34 | # i.e., cf_float32, cf_double64, etc. 35 | # For this example, we will always use float32 data so we provide that as the 5th parameter. 36 | # The last value would be the serial number of the device or some other more or 37 | # less locally unique identifier for the stream as far as available (you 38 | # could also omit it but interrupted connections wouldn't auto-recover). 39 | info = pylsl.StreamInfo( 40 | name, stream_type, n_channels, srate, "float32", "myuid2424" 41 | ) 42 | 43 | # append some meta-data 44 | # https://github.com/sccn/xdf/wiki/EEG-Meta-Data 45 | info.desc().append_child_value("manufacturer", "LSLExampleAmp") 46 | chns = info.desc().append_child("channels") 47 | for chan_ix, label in enumerate(channel_names): 48 | ch = chns.append_child("channel") 49 | ch.append_child_value("label", label) 50 | ch.append_child_value("unit", "microvolts") 51 | ch.append_child_value("type", "EEG") 52 | ch.append_child_value("scaling_factor", "1") 53 | loc = ch.append_child("location") 54 | for ax_str, pos in zip(["X", "Y", "Z"], channel_locations[chan_ix]): 55 | loc.append_child_value(ax_str, str(pos)) 56 | cap = info.desc().append_child("cap") 57 | cap.append_child_value("name", "ComfyCap") 58 | cap.append_child_value("size", "54") 59 | cap.append_child_value("labelscheme", "10-20") 60 | 61 | # next make an outlet; we set the transmission chunk size to 32 samples 62 | # and the outgoing buffer size to 360 seconds (max.) 63 | outlet = pylsl.StreamOutlet(info, 32, 360) 64 | 65 | if False: 66 | # It's unnecessary to check the info when the stream was created in the same scope; just use info. 67 | # Use this code only as a sanity check if you think something when wrong during stream creation. 68 | check_info = outlet.get_info() 69 | assert check_info.name() == name 70 | assert check_info.type() == stream_type 71 | assert check_info.channel_count() == len(channel_names) 72 | assert check_info.channel_format() == pylsl.cf_float32 73 | assert check_info.nominal_srate() == srate 74 | 75 | print("now sending data...") 76 | start_time = pylsl.local_clock() 77 | sent_samples = 0 78 | while True: 79 | elapsed_time = pylsl.local_clock() - start_time 80 | required_samples = int(srate * elapsed_time) - sent_samples 81 | if required_samples > 0: 82 | # make a chunk==array of length required_samples, where each element in the array 83 | # is a new random n_channels sample vector 84 | mychunk = [ 85 | [rand() for chan_ix in range(n_channels)] 86 | for samp_ix in range(required_samples) 87 | ] 88 | # Get a time stamp in seconds. We pretend that our samples are actually 89 | # 125ms old, e.g., as if coming from some external hardware with known latency. 90 | stamp = pylsl.local_clock() - 0.125 91 | # now send it and wait for a bit 92 | # Note that even though `rand()` returns a 64-bit value, the `push_chunk` method 93 | # will convert it to c_float before passing the data to liblsl. 94 | outlet.push_chunk(mychunk, stamp) 95 | sent_samples += required_samples 96 | time.sleep(0.02) 97 | 98 | 99 | if __name__ == "__main__": 100 | parser = argparse.ArgumentParser() 101 | parser.add_argument( 102 | "--name", default="LSLExampleAmp", help="Name of the created stream." 103 | ) 104 | parser.add_argument("--type", default="EEG", help="Type of the created stream.") 105 | parser.add_argument( 106 | "--srate", 107 | default=100.0, 108 | help="Sampling rate of the created stream.", 109 | type=float, 110 | ) 111 | arg = parser.parse_args() 112 | 113 | main(name=arg.name, stream_type=arg.type, srate=arg.srate) 114 | -------------------------------------------------------------------------------- /src/pylsl/examples/SendStringMarkers.py: -------------------------------------------------------------------------------- 1 | """Example program to demonstrate how to send string-valued markers into LSL.""" 2 | 3 | import random 4 | import time 5 | 6 | from pylsl import StreamInfo, StreamOutlet 7 | 8 | 9 | def main(): 10 | # first create a new stream info (here we set the name to MyMarkerStream, 11 | # the content-type to Markers, 1 channel, irregular sampling rate, 12 | # and string-valued data) The last value would be the locally unique 13 | # identifier for the stream as far as available, e.g. 14 | # program-scriptname-subjectnumber (you could also omit it but interrupted 15 | # connections wouldn't auto-recover). The important part is that the 16 | # content-type is set to 'Markers', because then other programs will know how 17 | # to interpret the content 18 | info = StreamInfo("MyMarkerStream", "Markers", 1, 0, "string", "myuidw43536") 19 | 20 | # next make an outlet 21 | outlet = StreamOutlet(info) 22 | 23 | print("now sending markers...") 24 | markernames = ["Test", "Blah", "Marker", "XXX", "Testtest", "Test-1-2-3"] 25 | while True: 26 | # pick a sample to send an wait for a bit 27 | outlet.push_sample([random.choice(markernames)]) 28 | time.sleep(random.random() * 3) 29 | 30 | 31 | if __name__ == "__main__": 32 | main() 33 | -------------------------------------------------------------------------------- /src/pylsl/examples/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/pylsl/info.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import typing 3 | 4 | from .lib import lib, string2fmt, cf_float32 5 | from .util import IRREGULAR_RATE 6 | 7 | 8 | class StreamInfo: 9 | """The StreamInfo object stores the declaration of a data stream. 10 | 11 | Represents the following information: 12 | a) stream data format (#channels, channel format) 13 | b) core information (stream name, content type, sampling rate) 14 | c) optional meta-data about the stream content (channel labels, 15 | measurement units, etc.) 16 | 17 | Whenever a program wants to provide a new stream on the lab network it will 18 | typically first create a StreamInfo to describe its properties and then 19 | construct a StreamOutlet with it to create the stream on the network. 20 | Recipients who discover the outlet can query the StreamInfo; it is also 21 | written to disk when recording the stream (playing a similar role as a file 22 | header). 23 | 24 | """ 25 | 26 | def __init__( 27 | self, 28 | name: str = "untitled", 29 | type: str = "", 30 | channel_count: int = 1, 31 | nominal_srate: float = IRREGULAR_RATE, 32 | channel_format: int = cf_float32, 33 | source_id: typing.Optional[str] = None, 34 | handle=None, 35 | ): 36 | """Construct a new StreamInfo object. 37 | 38 | Core stream information is specified here. Any remaining meta-data can 39 | be added later. 40 | 41 | Keyword arguments: 42 | name -- Name of the stream. Describes the device (or product series) 43 | that this stream makes available (for use by programs, 44 | experimenters or data analysts). Cannot be empty. 45 | type -- Content type of the stream. By convention LSL uses the content 46 | types defined in the XDF file format specification where 47 | applicable (https://github.com/sccn/xdf). The content type is the 48 | preferred way to find streams (as opposed to searching by name). 49 | channel_count -- Number of channels per sample. This stays constant for 50 | the lifetime of the stream. (default 1) 51 | nominal_srate -- The sampling rate (in Hz) as advertised by the data 52 | source, regular (otherwise set to IRREGULAR_RATE). 53 | (default IRREGULAR_RATE) 54 | channel_format -- Format/type of each channel. If your channels have 55 | different formats, consider supplying multiple 56 | streams or use the largest type that can hold 57 | them all (such as cf_double64). It is also allowed 58 | to pass this as a string, without the cf_ prefix, 59 | e.g., 'float32' (default cf_float32) 60 | source_id -- Unique identifier of the device or source of the data, if 61 | available (such as the serial number). This is critical 62 | for system robustness since it allows recipients to 63 | recover from failure even after the serving app, device or 64 | computer crashes (just by finding a stream with the same 65 | source id on the network again). If the provided value is None 66 | then a source id will be generated automatically from a hash of 67 | the other arguments. If recovery is not desired, for example 68 | when a disconnection should raise an error, set the source_id 69 | to "" (empty string) . (default None) 70 | """ 71 | if handle is not None: 72 | self.obj = ctypes.c_void_p(handle) 73 | else: 74 | if isinstance(channel_format, str): 75 | channel_format = string2fmt[channel_format] 76 | if source_id is None: 77 | source_id = str( 78 | hash((name, type, channel_count, nominal_srate, channel_format)) 79 | ) 80 | print( 81 | f"Generated source_id: '{source_id}' for StreamInfo with name '{name}', type '{type}', " 82 | f"channel_count {channel_count}, nominal_srate {nominal_srate}, " 83 | f"and channel_format {channel_format}." 84 | ) 85 | self.obj = lib.lsl_create_streaminfo( 86 | ctypes.c_char_p(str.encode(name)), 87 | ctypes.c_char_p(str.encode(type)), 88 | channel_count, 89 | ctypes.c_double(nominal_srate), 90 | channel_format, 91 | ctypes.c_char_p(str.encode(source_id)), 92 | ) 93 | self.obj = ctypes.c_void_p(self.obj) 94 | if not self.obj: 95 | raise RuntimeError("could not create stream description " "object.") 96 | 97 | def __del__(self): 98 | """Destroy a previously created StreamInfo object.""" 99 | # noinspection PyBroadException 100 | try: 101 | lib.lsl_destroy_streaminfo(self.obj) 102 | except Exception as e: 103 | print(f"StreamInfo deletion triggered error: {e}") 104 | 105 | # === Core Information (assigned at construction) === 106 | 107 | def name(self) -> str: 108 | """Name of the stream. 109 | 110 | This is a human-readable name. For streams offered by device modules, 111 | it refers to the type of device or product series that is generating 112 | the data of the stream. If the source is an application, the name may 113 | be a more generic or specific identifier. Multiple streams with the 114 | same name can coexist, though potentially at the cost of ambiguity (for 115 | the recording app or experimenter). 116 | 117 | """ 118 | return lib.lsl_get_name(self.obj).decode("utf-8") 119 | 120 | def type(self) -> str: 121 | """Content type of the stream. 122 | 123 | The content type is a short string such as "EEG", "Gaze" which 124 | describes the content carried by the channel (if known). If a stream 125 | contains mixed content this value need not be assigned but may instead 126 | be stored in the description of channel types. To be useful to 127 | applications and automated processing systems using the recommended 128 | content types is preferred. 129 | 130 | """ 131 | return lib.lsl_get_type(self.obj).decode("utf-8") 132 | 133 | def channel_count(self) -> int: 134 | """Number of channels of the stream. 135 | 136 | A stream has at least one channel; the channel count stays constant for 137 | all samples. 138 | 139 | """ 140 | return lib.lsl_get_channel_count(self.obj) 141 | 142 | def nominal_srate(self) -> float: 143 | """Sampling rate of the stream, according to the source (in Hz). 144 | 145 | If a stream is irregularly sampled, this should be set to 146 | IRREGULAR_RATE. 147 | 148 | Note that no data will be lost even if this sampling rate is incorrect 149 | or if a device has temporary hiccups, since all samples will be 150 | transmitted anyway (except for those dropped by the device itself). 151 | However, when the recording is imported into an application, a good 152 | data importer may correct such errors more accurately if the advertised 153 | sampling rate was close to the specs of the device. 154 | 155 | """ 156 | return lib.lsl_get_nominal_srate(self.obj) 157 | 158 | def channel_format(self) -> int: 159 | """Channel format of the stream. 160 | 161 | All channels in a stream have the same format. However, a device might 162 | offer multiple time-synched streams each with its own format. 163 | 164 | """ 165 | return lib.lsl_get_channel_format(self.obj) 166 | 167 | def source_id(self) -> str: 168 | """Unique identifier of the stream's source, if available. 169 | 170 | The unique source (or device) identifier is an optional piece of 171 | information that, if available, allows that endpoints (such as the 172 | recording program) can re-acquire a stream automatically once it is 173 | back online. 174 | 175 | """ 176 | return lib.lsl_get_source_id(self.obj).decode("utf-8") 177 | 178 | # === Hosting Information (assigned when bound to an outlet/inlet) === 179 | 180 | def version(self): 181 | """Protocol version used to deliver the stream.""" 182 | return lib.lsl_get_version(self.obj) 183 | 184 | def created_at(self): 185 | """Creation time stamp of the stream. 186 | 187 | This is the time stamp when the stream was first created 188 | (as determined via local_clock() on the providing machine). 189 | 190 | """ 191 | return lib.lsl_get_created_at(self.obj) 192 | 193 | def uid(self) -> str: 194 | """Unique ID of the stream outlet instance (once assigned). 195 | 196 | This is a unique identifier of the stream outlet, and is guaranteed to 197 | be different across multiple instantiations of the same outlet (e.g., 198 | after a re-start). 199 | 200 | """ 201 | return lib.lsl_get_uid(self.obj).decode("utf-8") 202 | 203 | def session_id(self) -> str: 204 | """Session ID for the given stream. 205 | 206 | The session id is an optional human-assigned identifier of the 207 | recording session. While it is rarely used, it can be used to prevent 208 | concurrent recording activities on the same sub-network (e.g., in 209 | multiple experiment areas) from seeing each other's streams 210 | (can be assigned in a configuration file read by liblsl, see also 211 | Network Connectivity in the LSL wiki). 212 | 213 | """ 214 | return lib.lsl_get_session_id(self.obj).decode("utf-8") 215 | 216 | def hostname(self) -> str: 217 | """Hostname of the providing machine.""" 218 | return lib.lsl_get_hostname(self.obj).decode("utf-8") 219 | 220 | # === Data Description (can be modified) === 221 | def desc(self) -> "XMLElement": 222 | """Extended description of the stream. 223 | 224 | It is highly recommended that at least the channel labels are described 225 | here. See code examples on the LSL wiki. Other information, such 226 | as amplifier settings, measurement units if deviating from defaults, 227 | setup information, subject information, etc., can be specified here, as 228 | well. Meta-data recommendations follow the XDF file format project 229 | (github.com/sccn/xdf/wiki/Meta-Data or web search for: XDF meta-data). 230 | 231 | Important: if you use a stream content type for which meta-data 232 | recommendations exist, please try to lay out your meta-data in 233 | agreement with these recommendations for compatibility with other 234 | applications. 235 | 236 | """ 237 | return XMLElement(lib.lsl_get_desc(self.obj)) 238 | 239 | def as_xml(self) -> str: 240 | """Retrieve the entire stream_info in XML format. 241 | 242 | This yields an XML document (in string form) whose top-level element is 243 | . The description element contains one element for each 244 | field of the stream_info class, including: 245 | a) the core elements , , , , 246 | , 247 | b) the misc elements , , , , 248 | , , , , 249 | , 250 | c) the extended description element with user-defined 251 | sub-elements. 252 | 253 | """ 254 | return lib.lsl_get_xml(self.obj).decode("utf-8") 255 | 256 | def get_channel_labels(self) -> typing.Optional[list[typing.Optional[str]]]: 257 | """Get the channel names in the description. 258 | 259 | Returns 260 | ------- 261 | labels : list of str or ``None`` | None 262 | List of channel names, matching the number of total channels. 263 | If ``None``, the channel names are not set. 264 | 265 | .. warning:: 266 | 267 | If a list of str and ``None`` are returned, some of the channel names 268 | are missing. This is not expected and could occur if the XML tree in 269 | the ``desc`` property is tempered with outside of the defined getter and 270 | setter. 271 | """ 272 | return self._get_channel_info("label") 273 | 274 | def get_channel_types(self) -> typing.Optional[list[typing.Optional[str]]]: 275 | """Get the channel types in the description. 276 | 277 | Returns 278 | ------- 279 | types : list of str or ``None`` | None 280 | List of channel types, matching the number of total channels. 281 | If ``None``, the channel types are not set. 282 | 283 | .. warning:: 284 | 285 | If a list of str and ``None`` are returned, some of the channel types 286 | are missing. This is not expected and could occur if the XML tree in 287 | the ``desc`` property is tempered with outside of the defined getter and 288 | setter. 289 | """ 290 | return self._get_channel_info("type") 291 | 292 | def get_channel_units(self) -> typing.Optional[list[typing.Optional[str]]]: 293 | """Get the channel units in the description. 294 | 295 | Returns 296 | ------- 297 | units : list of str or ``None`` | None 298 | List of channel units, matching the number of total channels. 299 | If ``None``, the channel units are not set. 300 | 301 | .. warning:: 302 | 303 | If a list of str and ``None`` are returned, some of the channel units 304 | are missing. This is not expected and could occur if the XML tree in 305 | the ``desc`` property is tempered with outside of the defined getter and 306 | setter. 307 | """ 308 | return self._get_channel_info("unit") 309 | 310 | def _get_channel_info(self, name) -> typing.Optional[list[typing.Optional[str]]]: 311 | """Get the 'channel/name' element in the XML tree.""" 312 | if self.desc().child("channels").empty(): 313 | return None 314 | ch_infos = list() 315 | channels = self.desc().child("channels") 316 | ch = channels.child("channel") 317 | while not ch.empty(): 318 | ch_info = ch.child(name).first_child().value() 319 | if len(ch_info) != 0: 320 | ch_infos.append(ch_info) 321 | else: 322 | ch_infos.append(None) 323 | ch = ch.next_sibling() 324 | if all(ch_info is None for ch_info in ch_infos): 325 | return None 326 | if len(ch_infos) != self.channel_count(): 327 | print( 328 | f"The stream description contains {len(ch_infos)} elements for " 329 | f"{self.channel_count()} channels.", 330 | ) 331 | return ch_infos 332 | 333 | def set_channel_labels(self, labels: list[str]): 334 | """Set the channel names in the description. Existing labels are overwritten. 335 | 336 | Parameters 337 | ---------- 338 | labels : list of str 339 | List of channel names, matching the number of total channels. 340 | """ 341 | self._set_channel_info(labels, "label") 342 | 343 | def set_channel_types(self, types: typing.Union[str, list[str]]): 344 | """Set the channel types in the description. Existing types are overwritten. 345 | 346 | The types are given as human-readable strings, e.g. ``'eeg'``. 347 | 348 | Parameters 349 | ---------- 350 | types : list of str | str 351 | List of channel types, matching the number of total channels. 352 | If a single `str` is provided, the type is applied to all channels. 353 | """ 354 | types = [types] * self.channel_count() if isinstance(types, str) else types 355 | self._set_channel_info(types, "type") 356 | 357 | def set_channel_units( 358 | self, units: typing.Union[str, int, list[typing.Union[str, int]]] 359 | ) -> None: 360 | """Set the channel units in the description. Existing units are overwritten. 361 | 362 | The units are given as human-readable strings, e.g. ``'microvolts'``, or as 363 | multiplication factor, e.g. ``-6`` for ``1e-6`` thus converting e.g. Volts to 364 | microvolts. 365 | 366 | Parameters 367 | ---------- 368 | units : list of str | list of int | array of int | str | int 369 | List of channel units, matching the number of total channels. 370 | If a single `str` or `int` is provided, the unit is applied to all channels. 371 | 372 | Notes 373 | ----- 374 | Some channel types do not have a unit. The `str` ``none`` or the `int` 0 should 375 | be used to denote this channel unit, corresponding to ``FIFF_UNITM_NONE`` in 376 | MNE-Python. 377 | """ 378 | if isinstance(units, (int, str)): 379 | units = [units] * self.channel_count() 380 | else: # iterable 381 | units = [ 382 | str(int(unit)) if isinstance(unit, int) else unit for unit in units 383 | ] 384 | self._set_channel_info(units, "unit") 385 | 386 | def _set_channel_info(self, ch_infos, name: str) -> None: 387 | """Set the 'channel/name' element in the XML tree.""" 388 | if len(ch_infos) != self.channel_count(): 389 | raise ValueError( 390 | f"The number of provided channel {name} {len(ch_infos)} " 391 | f"must match the number of channels {self.channel_count()}." 392 | ) 393 | 394 | channels = StreamInfo._add_first_node(self.desc, "channels") 395 | # fill the 'channel/name' element of the tree and overwrite existing values 396 | ch = channels.child("channel") 397 | for ch_info in ch_infos: 398 | ch = channels.append_child("channel") if ch.empty() else ch 399 | StreamInfo._set_description_node(ch, {name: ch_info}) 400 | ch = ch.next_sibling() 401 | StreamInfo._prune_description_node(ch, channels) 402 | 403 | # -- Helper methods to interact with the XMLElement tree --------------------------- 404 | @staticmethod 405 | def _add_first_node(desc, name: str) -> "XMLElement": 406 | """Add the first node in the description and return it.""" 407 | if desc().child(name).empty(): 408 | node = desc().append_child(name) 409 | else: 410 | node = desc().child(name) 411 | return node 412 | 413 | @staticmethod 414 | def _prune_description_node(node, parent): 415 | """Prune a node and remove outdated entries.""" 416 | # this is useful in case the sinfo is tempered with and had more entries of type 417 | # 'node' than it should. 418 | while not node.empty(): 419 | node_next = node.next_sibling() 420 | parent.remove_child(node) 421 | node = node_next 422 | 423 | @staticmethod 424 | def _set_description_node(node, mapping): 425 | """Set the key: value child(s) of a node.""" 426 | for key, value in mapping.items(): 427 | value = str(int(value)) if isinstance(value, int) else str(value) 428 | if node.child(key).empty(): 429 | node.append_child_value(key, value) 430 | else: 431 | node.child(key).first_child().set_value(value) 432 | 433 | 434 | class XMLElement: 435 | """A lightweight XML element tree modeling the .desc() field of StreamInfo. 436 | 437 | Has a name and can have multiple named children or have text content as 438 | value; attributes are omitted. Insider note: The interface is modeled after 439 | a subset of pugixml's node type and is compatible with it. See also 440 | http://pugixml.googlecode.com/svn/tags/latest/docs/manual/access.html for 441 | additional documentation. 442 | 443 | """ 444 | 445 | def __init__(self, handle): 446 | """Construct new XML element from existing handle.""" 447 | self.e = ctypes.c_void_p(handle) 448 | 449 | # === Tree Navigation === 450 | 451 | def first_child(self) -> "XMLElement": 452 | """Get the first child of the element.""" 453 | return XMLElement(lib.lsl_first_child(self.e)) 454 | 455 | def last_child(self) -> "XMLElement": 456 | """Get the last child of the element.""" 457 | return XMLElement(lib.lsl_last_child(self.e)) 458 | 459 | def child(self, name: str) -> "XMLElement": 460 | """Get a child with a specified name.""" 461 | return XMLElement(lib.lsl_child(self.e, str.encode(name))) 462 | 463 | def next_sibling(self, name: typing.Optional[str] = None) -> "XMLElement": 464 | """Get the next sibling in the children list of the parent node. 465 | 466 | If a name is provided, the next sibling with the given name is returned. 467 | 468 | """ 469 | if name is None: 470 | return XMLElement(lib.lsl_next_sibling(self.e)) 471 | else: 472 | return XMLElement(lib.lsl_next_sibling_n(self.e, str.encode(name))) 473 | 474 | def previous_sibling(self, name: typing.Optional[str] = None) -> "XMLElement": 475 | """Get the previous sibling in the children list of the parent node. 476 | 477 | If a name is provided, the previous sibling with the given name is 478 | returned. 479 | 480 | """ 481 | if name is None: 482 | return XMLElement(lib.lsl_previous_sibling(self.e)) 483 | else: 484 | return XMLElement(lib.lsl_previous_sibling_n(self.e, str.encode(name))) 485 | 486 | def parent(self) -> "XMLElement": 487 | """Get the parent node.""" 488 | return XMLElement(lib.lsl_parent(self.e)) 489 | 490 | # === Content Queries === 491 | 492 | def empty(self) -> bool: 493 | """Whether this node is empty.""" 494 | return bool(lib.lsl_empty(self.e)) 495 | 496 | def is_text(self) -> bool: 497 | """Whether this is a text body (instead of an XML element). 498 | 499 | True both for plain char data and CData. 500 | 501 | """ 502 | return bool(lib.lsl_is_text(self.e)) 503 | 504 | def name(self) -> str: 505 | """Name of the element.""" 506 | return lib.lsl_name(self.e).decode("utf-8") 507 | 508 | def value(self) -> str: 509 | """Value of the element.""" 510 | return lib.lsl_value(self.e).decode("utf-8") 511 | 512 | def child_value(self, name: typing.Optional[str] = None) -> str: 513 | """Get child value (value of the first child that is text). 514 | 515 | If a name is provided, then the value of the first child with the 516 | given name is returned. 517 | 518 | """ 519 | if name is None: 520 | res = lib.lsl_child_value(self.e) 521 | else: 522 | res = lib.lsl_child_value_n(self.e, str.encode(name)) 523 | return res.decode("utf-8") 524 | 525 | # === Modification === 526 | 527 | def append_child_value(self, name: str, value: str) -> "XMLElement": 528 | """Append a child node with a given name, which has a (nameless) 529 | plain-text child with the given text value.""" 530 | return XMLElement( 531 | lib.lsl_append_child_value(self.e, str.encode(name), str.encode(value)) 532 | ) 533 | 534 | def prepend_child_value(self, name: str, value: str) -> "XMLElement": 535 | """Prepend a child node with a given name, which has a (nameless) 536 | plain-text child with the given text value.""" 537 | return XMLElement( 538 | lib.lsl_prepend_child_value(self.e, str.encode(name), str.encode(value)) 539 | ) 540 | 541 | def set_child_value(self, name: str, value: str) -> "XMLElement": 542 | """Set the text value of the (nameless) plain-text child of a named 543 | child node.""" 544 | return XMLElement( 545 | lib.lsl_set_child_value(self.e, str.encode(name), str.encode(value)) 546 | ) 547 | 548 | def set_name(self, name: str) -> bool: 549 | """Set the element's name. Returns False if the node is empty.""" 550 | return bool(lib.lsl_set_name(self.e, str.encode(name))) 551 | 552 | def set_value(self, value: str) -> bool: 553 | """Set the element's value. Returns False if the node is empty.""" 554 | return bool(lib.lsl_set_value(self.e, str.encode(value))) 555 | 556 | def append_child(self, name: str) -> "XMLElement": 557 | """Append a child element with the specified name.""" 558 | return XMLElement(lib.lsl_append_child(self.e, str.encode(name))) 559 | 560 | def prepend_child(self, name: str) -> "XMLElement": 561 | """Prepend a child element with the specified name.""" 562 | return XMLElement(lib.lsl_prepend_child(self.e, str.encode(name))) 563 | 564 | def append_copy(self, elem: "XMLElement") -> "XMLElement": 565 | """Append a copy of the specified element as a child.""" 566 | return XMLElement(lib.lsl_append_copy(self.e, elem.e)) 567 | 568 | def prepend_copy(self, elem: "XMLElement") -> "XMLElement": 569 | """Prepend a copy of the specified element as a child.""" 570 | return XMLElement(lib.lsl_prepend_copy(self.e, elem.e)) 571 | 572 | def remove_child(self, rhs: "XMLElement") -> None: 573 | """Remove a given child element, specified by name or as element.""" 574 | if type(rhs) is XMLElement: 575 | lib.lsl_remove_child(self.e, rhs.e) 576 | else: 577 | lib.lsl_remove_child_n(self.e, rhs) 578 | -------------------------------------------------------------------------------- /src/pylsl/inlet.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | from .lib import lib, fmt2type, fmt2pull_sample, fmt2pull_chunk, cf_string 4 | from .util import handle_error, FOREVER 5 | from .info import StreamInfo 6 | 7 | 8 | def free_char_p_array_memory(char_p_array, num_elements): 9 | pointers = ctypes.cast(char_p_array, ctypes.POINTER(ctypes.c_void_p)) 10 | for p in range(num_elements): 11 | if pointers[p] is not None: # only free initialized pointers 12 | lib.lsl_destroy_string(pointers[p]) 13 | 14 | 15 | class StreamInlet: 16 | """A stream inlet. 17 | 18 | Inlets are used to receive streaming data (and meta-data) from the lab 19 | network. 20 | 21 | """ 22 | 23 | def __init__( 24 | self, info, max_buflen=360, max_chunklen=0, recover=True, processing_flags=0 25 | ): 26 | """Construct a new stream inlet from a resolved stream description. 27 | 28 | Keyword arguments: 29 | description -- A resolved stream description object (as coming from one 30 | of the resolver functions). Note: the stream_inlet may also be 31 | constructed with a fully-specified stream_info, if the desired 32 | channel format and count is already known up-front, but this is 33 | strongly discouraged and should only ever be done if there is 34 | no time to resolve the stream up-front (e.g., due to 35 | limitations in the client program). 36 | max_buflen -- Optionally the maximum amount of data to buffer (in 37 | seconds if there is a nominal sampling rate, otherwise 38 | x100 in samples). Recording applications want to use a 39 | fairly large buffer size here, while real-time 40 | applications would only buffer as much as they need to 41 | perform their next calculation. (default 360) 42 | max_chunklen -- Optionally the maximum size, in samples, at which 43 | chunks are transmitted (the default corresponds to the 44 | chunk sizes used by the sender). Recording programs 45 | can use a generous size here (leaving it to the network 46 | how to pack things), while real-time applications may 47 | want a finer (perhaps 1-sample) granularity. If left 48 | unspecified (=0), the sender determines the chunk 49 | granularity. (default 0) 50 | recover -- Try to silently recover lost streams that are recoverable 51 | (=those that that have a source_id set). In all other cases 52 | (recover is False or the stream is not recoverable) 53 | functions may throw a lost_error if the stream's source is 54 | lost (e.g., due to an app or computer crash). (default True) 55 | processing_flags -- Post-processing options. Use one of the post-processing 56 | flags `proc_none`, `proc_clocksync`, `proc_dejitter`, `proc_monotonize`, 57 | or `proc_threadsafe`. Can also be a logical OR combination of multiple 58 | flags. Use `proc_ALL` for all flags. (default proc_none). 59 | """ 60 | if type(info) is list: 61 | raise TypeError( 62 | "description needs to be of type StreamInfo, " "got a list." 63 | ) 64 | self.obj = lib.lsl_create_inlet(info.obj, max_buflen, max_chunklen, recover) 65 | self.obj = ctypes.c_void_p(self.obj) 66 | if not self.obj: 67 | raise RuntimeError("could not create stream inlet.") 68 | if processing_flags > 0: 69 | handle_error(lib.lsl_set_postprocessing(self.obj, processing_flags)) 70 | self.channel_format = info.channel_format() 71 | self.channel_count = info.channel_count() 72 | self.do_pull_sample = fmt2pull_sample[self.channel_format] 73 | self.do_pull_chunk = fmt2pull_chunk[self.channel_format] 74 | self.value_type = fmt2type[self.channel_format] 75 | self.sample_type = self.value_type * self.channel_count 76 | self.sample = self.sample_type() 77 | self.buffers = {} 78 | 79 | def __del__(self): 80 | """Destructor. The inlet will automatically disconnect if destroyed.""" 81 | # noinspection PyBroadException 82 | try: 83 | lib.lsl_destroy_inlet(self.obj) 84 | except Exception: 85 | pass 86 | 87 | def info(self, timeout=FOREVER): 88 | """Retrieve the complete information of the given stream. 89 | 90 | This includes the extended description. Can be invoked at any time of 91 | the stream's lifetime. 92 | 93 | Keyword arguments: 94 | timeout -- Timeout of the operation. (default FOREVER) 95 | 96 | Throws a TimeoutError (if the timeout expires), or LostError (if the 97 | stream source has been lost). 98 | 99 | """ 100 | errcode = ctypes.c_int() 101 | result = lib.lsl_get_fullinfo( 102 | self.obj, ctypes.c_double(timeout), ctypes.byref(errcode) 103 | ) 104 | handle_error(errcode) 105 | return StreamInfo(handle=result) 106 | 107 | def open_stream(self, timeout=FOREVER): 108 | """Subscribe to the data stream. 109 | 110 | All samples pushed in at the other end from this moment onwards will be 111 | queued and eventually be delivered in response to pull_sample() or 112 | pull_chunk() calls. Pulling a sample without some preceding open_stream 113 | is permitted (the stream will then be opened implicitly). 114 | 115 | Keyword arguments: 116 | timeout -- Optional timeout of the operation (default FOREVER). 117 | 118 | Throws a TimeoutError (if the timeout expires), or LostError (if the 119 | stream source has been lost). 120 | 121 | """ 122 | errcode = ctypes.c_int() 123 | lib.lsl_open_stream(self.obj, ctypes.c_double(timeout), ctypes.byref(errcode)) 124 | handle_error(errcode) 125 | 126 | def close_stream(self): 127 | """Drop the current data stream. 128 | 129 | All samples that are still buffered or in flight will be dropped and 130 | transmission and buffering of data for this inlet will be stopped. If 131 | an application stops being interested in data from a source 132 | (temporarily or not) but keeps the outlet alive, it should call 133 | lsl_close_stream() to not waste unnecessary system and network 134 | resources. 135 | 136 | """ 137 | lib.lsl_close_stream(self.obj) 138 | 139 | def time_correction(self, timeout=FOREVER): 140 | """Retrieve an estimated time correction offset for the given stream. 141 | 142 | The first call to this function takes several milliseconds until a 143 | reliable first estimate is obtained. Subsequent calls are instantaneous 144 | (and rely on periodic background updates). The precision of these 145 | estimates should be below 1 ms (empirically within +/-0.2 ms). 146 | 147 | Keyword arguments: 148 | timeout -- Timeout to acquire the first time-correction estimate 149 | (default FOREVER). 150 | 151 | Returns the current time correction estimate. This is the number that 152 | needs to be added to a time stamp that was remotely generated via 153 | local_clock() to map it into the local clock domain of this 154 | machine. 155 | 156 | Throws a TimeoutError (if the timeout expires), or LostError (if the 157 | stream source has been lost). 158 | 159 | """ 160 | errcode = ctypes.c_int() 161 | result = lib.lsl_time_correction( 162 | self.obj, ctypes.c_double(timeout), ctypes.byref(errcode) 163 | ) 164 | handle_error(errcode) 165 | return result 166 | 167 | def pull_sample(self, timeout=FOREVER, sample=None): 168 | """Pull a sample from the inlet and return it. 169 | 170 | Keyword arguments: 171 | timeout -- The timeout for this operation, if any. (default FOREVER) 172 | If this is passed as 0.0, then the function returns only a 173 | sample if one is buffered for immediate pickup. 174 | 175 | Returns a tuple (sample,timestamp) where sample is a list of channel 176 | values and timestamp is the capture time of the sample on the remote 177 | machine, or (None,None) if no new sample was available. To remap this 178 | time stamp to the local clock, add the value returned by 179 | .time_correction() to it. 180 | 181 | Throws a LostError if the stream source has been lost. Note that, if 182 | the timeout expires, no TimeoutError is thrown (because this case is 183 | not considered an error). 184 | 185 | """ 186 | 187 | # support for the legacy API 188 | if type(timeout) is list: 189 | assign_to = timeout 190 | timeout = sample if type(sample) is float else 0.0 191 | else: 192 | assign_to = None 193 | 194 | errcode = ctypes.c_int() 195 | timestamp = self.do_pull_sample( 196 | self.obj, 197 | ctypes.byref(self.sample), 198 | self.channel_count, 199 | ctypes.c_double(timeout), 200 | ctypes.byref(errcode), 201 | ) 202 | handle_error(errcode) 203 | if timestamp: 204 | sample = [v for v in self.sample] 205 | if self.channel_format == cf_string: 206 | sample = [v.decode("utf-8") for v in sample] 207 | if assign_to is not None: 208 | assign_to[:] = sample 209 | return sample, timestamp 210 | else: 211 | return None, None 212 | 213 | def pull_chunk(self, timeout=0.0, max_samples=1024, dest_obj=None): 214 | """Pull a chunk of samples from the inlet. 215 | 216 | Keyword arguments: 217 | timeout -- The timeout of the operation; if passed as 0.0, then only 218 | samples available for immediate pickup will be returned. 219 | (default 0.0) 220 | max_samples -- Maximum number of samples to return. (default 221 | 1024) 222 | dest_obj -- A Python object that supports the buffer interface. 223 | If this is provided then the dest_obj will be updated in place 224 | and the samples list returned by this method will be empty. 225 | It is up to the caller to trim the buffer to the appropriate 226 | number of samples. 227 | A numpy buffer must be order='C' 228 | (default None) 229 | 230 | Returns a tuple (samples,timestamps) where samples is a list of samples 231 | (each itself a list of values), and timestamps is a list of time-stamps. 232 | 233 | Throws a LostError if the stream source has been lost. 234 | 235 | """ 236 | # look up a pre-allocated buffer of appropriate length 237 | num_channels = self.channel_count 238 | max_values = max_samples * num_channels 239 | 240 | if max_samples not in self.buffers: 241 | # noinspection PyCallingNonCallable 242 | self.buffers[max_samples] = ( 243 | (self.value_type * max_values)(), 244 | (ctypes.c_double * max_samples)(), 245 | ) 246 | if dest_obj is not None: 247 | data_buff = (self.value_type * max_values).from_buffer(dest_obj) 248 | else: 249 | data_buff = self.buffers[max_samples][0] 250 | ts_buff = self.buffers[max_samples][1] 251 | 252 | # read data into it 253 | errcode = ctypes.c_int() 254 | # noinspection PyCallingNonCallable 255 | num_elements = self.do_pull_chunk( 256 | self.obj, 257 | ctypes.byref(data_buff), 258 | ctypes.byref(ts_buff), 259 | ctypes.c_size_t(max_values), 260 | ctypes.c_size_t(max_samples), 261 | ctypes.c_double(timeout), 262 | ctypes.byref(errcode), 263 | ) 264 | handle_error(errcode) 265 | # return results (note: could offer a more efficient format in the 266 | # future, e.g., a numpy array) 267 | num_samples = num_elements / num_channels 268 | if dest_obj is None: 269 | samples = [ 270 | [data_buff[s * num_channels + c] for c in range(num_channels)] 271 | for s in range(int(num_samples)) 272 | ] 273 | if self.channel_format == cf_string: 274 | samples = [[v.decode("utf-8") for v in s] for s in samples] 275 | free_char_p_array_memory(data_buff, max_values) 276 | else: 277 | samples = None 278 | timestamps = [ts_buff[s] for s in range(int(num_samples))] 279 | return samples, timestamps 280 | 281 | def samples_available(self): 282 | """Query whether samples are currently available for immediate pickup. 283 | 284 | Note that it is not a good idea to use samples_available() to determine 285 | whether a pull_*() call would block: to be sure, set the pull timeout 286 | to 0.0 or an acceptably low value. If the underlying implementation 287 | supports it, the value will be the number of samples available 288 | (otherwise it will be 1 or 0). 289 | 290 | """ 291 | return lib.lsl_samples_available(self.obj) 292 | 293 | def flush(self): 294 | """ 295 | Drop all queued not-yet pulled samples. 296 | :return: The number of dropped samples. 297 | """ 298 | return lib.lsl_inlet_flush(self.obj) 299 | 300 | def was_clock_reset(self): 301 | """Query whether the clock was potentially reset since the last call. 302 | 303 | This is rarely-used function is only needed for applications that 304 | combine multiple time_correction values to estimate precise clock 305 | drift if they should tolerate cases where the source machine was 306 | hot-swapped or restarted. 307 | 308 | """ 309 | return bool(lib.lsl_was_clock_reset(self.obj)) 310 | -------------------------------------------------------------------------------- /src/pylsl/lib/__init__.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import ctypes.util 3 | import os 4 | import platform 5 | import struct 6 | 7 | 8 | # For up to 24-bit precision measurements in the appropriate physical unit ( 9 | # e.g., microvolts). Integers from -16777216 to 16777216 are represented 10 | # accurately. 11 | cf_float32 = 1 12 | # For universal numeric data as long as permitted by network and disk budget. 13 | # The largest representable integer is 53-bit. 14 | cf_double64 = 2 15 | # For variable-length ASCII strings or data blobs, such as video frames, 16 | # complex event descriptions, etc. 17 | cf_string = 3 18 | # For high-rate digitized formats that require 32-bit precision. Depends 19 | # critically on meta-data to represent meaningful units. Useful for 20 | # application event codes or other coded data. 21 | cf_int32 = 4 22 | # For very high bandwidth signals or CD quality audio (for professional audio 23 | # float is recommended). 24 | cf_int16 = 5 25 | # For binary signals or other coded data. 26 | cf_int8 = 6 27 | # For now only for future compatibility. Support for this type is not 28 | # available on all languages and platforms. 29 | cf_int64 = 7 30 | # Can not be transmitted. 31 | cf_undefined = 0 32 | 33 | 34 | def find_liblsl_libraries(verbose=False): 35 | """finds the binary lsl library. 36 | 37 | Search order is to first try to use the path stored in the environment 38 | variable PYLSL_LIB (if available), then search through the package 39 | directory, and finally search the whole system. 40 | 41 | returns 42 | ------- 43 | 44 | path: Generator[str] 45 | a generator yielding possible paths to the library 46 | 47 | """ 48 | # find and load library 49 | if "PYLSL_LIB" in os.environ: 50 | path = os.environ["PYLSL_LIB"] 51 | if os.path.isfile(path): 52 | yield path 53 | elif verbose: 54 | print( 55 | "Skipping PYLSL_LIB:", 56 | path, 57 | " because it was either not " + "found or is not a valid file", 58 | ) 59 | 60 | os_name = platform.system() 61 | if os_name in ["Windows", "Microsoft"]: 62 | libsuffix = ".dll" 63 | elif os_name == "Darwin": 64 | libsuffix = ".dylib" 65 | elif os_name == "Linux": 66 | libsuffix = ".so" 67 | else: 68 | raise RuntimeError("unrecognized operating system:", os_name) 69 | 70 | libbasepath = os.path.dirname(__file__) 71 | 72 | # because there were quite a few errors with picking up old binaries 73 | # still lurking in the system or environment, we first search through all 74 | # prefix/suffix/bitness variants in the package itself, i.e. in libbasepath 75 | # before searching through the system with util.find_library 76 | for scope in ["package", "system"]: 77 | for libprefix in ["", "lib"]: 78 | for debugsuffix in ["", "-debug"]: 79 | for bitness in ["", str(8 * struct.calcsize("P"))]: 80 | if scope == "package": 81 | path = os.path.join( 82 | libbasepath, 83 | libprefix + "lsl" + bitness + debugsuffix + libsuffix, 84 | ) 85 | if os.path.isfile(path): 86 | yield path 87 | elif (scope == "system") and os_name not in [ 88 | "Windows", 89 | "Microsoft", 90 | ]: 91 | # according to docs: 92 | # On Linux, find_library tries to run external 93 | # programs (/sbin/ldconfig, gcc, and objdump) to find 94 | # the library file 95 | # On OS X, find_library tries several predefined 96 | # naming schemes and paths to locate the library, 97 | # On Windows, find_library searches along the system 98 | # search path. However, we disallow finding system-level 99 | # lsl.dll on Windows because it causes too many problems 100 | # and should never be necessary. 101 | quallibname = libprefix + "lsl" + bitness + debugsuffix 102 | path = ctypes.util.find_library(quallibname) 103 | if path is None and os_name == "Darwin": 104 | # MacOS >= 10.15 requires only searches 1 or 2 paths, thus requires the full lib path 105 | # https://bugs.python.org/issue43964#msg394782 106 | # Here we try the default homebrew folder, but you may have installed it elsewhere, 107 | # in which case you'd use the DYLD_LIBRARY_PATH (see error message below)". 108 | path = ctypes.util.find_library( 109 | "/opt/homebrew/lib/" + quallibname 110 | ) 111 | if path is not None: 112 | yield path 113 | 114 | 115 | __dload_msg = ( 116 | "You can install the LSL library with conda: `conda install -c conda-forge liblsl`" 117 | ) 118 | if platform.system() == "Darwin": 119 | __dload_msg += "\nor with homebrew: `brew install labstreaminglayer/tap/lsl`" 120 | __dload_msg += ( 121 | "\nor otherwise download it from the liblsl releases page assets: " 122 | "https://github.com/sccn/liblsl/releases" 123 | ) 124 | if platform.system() == "Darwin": 125 | # https://bugs.python.org/issue43964#msg394782 126 | __dload_msg += ( 127 | "\nOn modern MacOS (>= 10.15) it is further necessary to set the DYLD_LIBRARY_PATH " 128 | "environment variable. e.g. `>DYLD_LIBRARY_PATH=/opt/homebrew/lib python path/to/my_lsl_script.py`" 129 | ) 130 | 131 | 132 | try: 133 | libpath = next(find_liblsl_libraries()) 134 | lib = ctypes.CDLL(libpath) 135 | except StopIteration: 136 | err_msg = ( 137 | "LSL binary library file was not found. Please make sure that the " 138 | + "binary file can be found in the package lib folder\n (" 139 | + os.path.dirname(__file__) 140 | + ")\n or " 141 | ) 142 | if platform.system() not in ["Windows", "Microsoft"]: 143 | err_msg += "the system search path. Alternatively, " 144 | err_msg += "specify the PYLSL_LIB environment variable.\n " 145 | raise RuntimeError(err_msg + __dload_msg) 146 | except OSError: 147 | err_msg = "liblsl library '" + libpath + "' found but could not be loaded " 148 | err_msg += "- possible platform/architecture mismatch.\n " 149 | if platform.system() in ["Windows", "Microsoft"]: 150 | err_msg += "You may need to download and install the latest Microsoft Visual C++ Redistributable." 151 | raise RuntimeError(err_msg + "\n " + __dload_msg) 152 | 153 | 154 | # set function return types where necessary 155 | lib.lsl_local_clock.restype = ctypes.c_double 156 | lib.lsl_create_streaminfo.restype = ctypes.c_void_p 157 | lib.lsl_library_info.restype = ctypes.c_char_p 158 | lib.lsl_get_name.restype = ctypes.c_char_p 159 | lib.lsl_get_type.restype = ctypes.c_char_p 160 | lib.lsl_get_nominal_srate.restype = ctypes.c_double 161 | lib.lsl_get_source_id.restype = ctypes.c_char_p 162 | lib.lsl_get_created_at.restype = ctypes.c_double 163 | lib.lsl_get_uid.restype = ctypes.c_char_p 164 | lib.lsl_get_session_id.restype = ctypes.c_char_p 165 | lib.lsl_get_hostname.restype = ctypes.c_char_p 166 | lib.lsl_get_desc.restype = ctypes.c_void_p 167 | lib.lsl_get_xml.restype = ctypes.c_char_p 168 | lib.lsl_create_outlet.restype = ctypes.c_void_p 169 | lib.lsl_create_inlet.restype = ctypes.c_void_p 170 | lib.lsl_get_fullinfo.restype = ctypes.c_void_p 171 | lib.lsl_get_info.restype = ctypes.c_void_p 172 | lib.lsl_open_stream.restype = ctypes.c_void_p 173 | lib.lsl_time_correction.restype = ctypes.c_double 174 | lib.lsl_pull_sample_f.restype = ctypes.c_double 175 | lib.lsl_pull_sample_d.restype = ctypes.c_double 176 | lib.lsl_pull_sample_l.restype = ctypes.c_double 177 | lib.lsl_pull_sample_i.restype = ctypes.c_double 178 | lib.lsl_pull_sample_s.restype = ctypes.c_double 179 | lib.lsl_pull_sample_c.restype = ctypes.c_double 180 | lib.lsl_pull_sample_str.restype = ctypes.c_double 181 | lib.lsl_pull_sample_buf.restype = ctypes.c_double 182 | lib.lsl_first_child.restype = ctypes.c_void_p 183 | lib.lsl_first_child.argtypes = [ 184 | ctypes.c_void_p, 185 | ] 186 | lib.lsl_last_child.restype = ctypes.c_void_p 187 | lib.lsl_last_child.argtypes = [ 188 | ctypes.c_void_p, 189 | ] 190 | lib.lsl_next_sibling.restype = ctypes.c_void_p 191 | lib.lsl_next_sibling.argtypes = [ 192 | ctypes.c_void_p, 193 | ] 194 | lib.lsl_previous_sibling.restype = ctypes.c_void_p 195 | lib.lsl_previous_sibling.argtypes = [ 196 | ctypes.c_void_p, 197 | ] 198 | lib.lsl_parent.restype = ctypes.c_void_p 199 | lib.lsl_parent.argtypes = [ 200 | ctypes.c_void_p, 201 | ] 202 | lib.lsl_child.restype = ctypes.c_void_p 203 | lib.lsl_child.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 204 | lib.lsl_next_sibling_n.restype = ctypes.c_void_p 205 | lib.lsl_next_sibling_n.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 206 | lib.lsl_previous_sibling_n.restype = ctypes.c_void_p 207 | lib.lsl_previous_sibling_n.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 208 | lib.lsl_name.restype = ctypes.c_char_p 209 | lib.lsl_name.argtypes = [ 210 | ctypes.c_void_p, 211 | ] 212 | lib.lsl_value.restype = ctypes.c_char_p 213 | lib.lsl_value.argtypes = [ 214 | ctypes.c_void_p, 215 | ] 216 | lib.lsl_child_value.restype = ctypes.c_char_p 217 | lib.lsl_child_value.argtypes = [ 218 | ctypes.c_void_p, 219 | ] 220 | lib.lsl_child_value_n.restype = ctypes.c_char_p 221 | lib.lsl_child_value_n.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 222 | lib.lsl_append_child_value.restype = ctypes.c_void_p 223 | lib.lsl_append_child_value.argtypes = [ 224 | ctypes.c_void_p, 225 | ctypes.c_char_p, 226 | ctypes.c_char_p, 227 | ] 228 | lib.lsl_prepend_child_value.restype = ctypes.c_void_p 229 | lib.lsl_prepend_child_value.argtypes = [ 230 | ctypes.c_void_p, 231 | ctypes.c_char_p, 232 | ctypes.c_char_p, 233 | ] 234 | # Return type for lsl_set_child_value, lsl_set_name, lsl_set_value is int 235 | lib.lsl_set_child_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p] 236 | lib.lsl_set_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 237 | lib.lsl_set_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 238 | lib.lsl_append_child.restype = ctypes.c_void_p 239 | lib.lsl_append_child.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 240 | lib.lsl_prepend_child.restype = ctypes.c_void_p 241 | lib.lsl_prepend_child.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 242 | lib.lsl_append_copy.restype = ctypes.c_void_p 243 | lib.lsl_append_copy.argtypes = [ctypes.c_void_p, ctypes.c_void_p] 244 | lib.lsl_prepend_copy.restype = ctypes.c_void_p 245 | lib.lsl_prepend_copy.argtypes = [ctypes.c_void_p, ctypes.c_void_p] 246 | lib.lsl_remove_child_n.argtypes = [ctypes.c_void_p, ctypes.c_char_p] 247 | lib.lsl_remove_child.argtypes = [ctypes.c_void_p, ctypes.c_void_p] 248 | lib.lsl_destroy_string.argtypes = [ctypes.c_void_p] 249 | # noinspection PyBroadException 250 | try: 251 | lib.lsl_pull_chunk_f.restype = ctypes.c_long 252 | lib.lsl_pull_chunk_d.restype = ctypes.c_long 253 | lib.lsl_pull_chunk_l.restype = ctypes.c_long 254 | lib.lsl_pull_chunk_i.restype = ctypes.c_long 255 | lib.lsl_pull_chunk_s.restype = ctypes.c_long 256 | lib.lsl_pull_chunk_c.restype = ctypes.c_long 257 | lib.lsl_pull_chunk_str.restype = ctypes.c_long 258 | lib.lsl_pull_chunk_buf.restype = ctypes.c_long 259 | except Exception: 260 | print("pylsl: chunk transfer functions not available in your liblsl " "version.") 261 | # noinspection PyBroadException 262 | try: 263 | lib.lsl_create_continuous_resolver.restype = ctypes.c_void_p 264 | lib.lsl_create_continuous_resolver_bypred.restype = ctypes.c_void_p 265 | lib.lsl_create_continuous_resolver_byprop.restype = ctypes.c_void_p 266 | except Exception: 267 | print("pylsl: ContinuousResolver not (fully) available in your liblsl " "version.") 268 | 269 | 270 | # int64 support on windows and 32bit OSes isn't there yet 271 | if struct.calcsize("P") != 4 and platform.system() != "Windows": 272 | push_sample_int64 = lib.lsl_push_sample_ltp 273 | pull_sample_int64 = lib.lsl_pull_sample_l 274 | push_chunk_int64 = lib.lsl_push_chunk_ltp 275 | push_chunk_int64_n = lib.lsl_push_chunk_ltnp 276 | pull_chunk_int64 = lib.lsl_pull_chunk_l 277 | else: 278 | 279 | def push_sample_int64(*_): 280 | raise NotImplementedError("int64 support isn't enabled on your platform") 281 | 282 | pull_sample_int64 = push_sample_int64 283 | push_chunk_int64 = push_sample_int64 284 | push_chunk_int64_n = push_sample_int64 285 | pull_chunk_int64 = push_sample_int64 286 | 287 | # set up some type maps 288 | string2fmt = { 289 | "float32": cf_float32, 290 | "double64": cf_double64, 291 | "string": cf_string, 292 | "int32": cf_int32, 293 | "int16": cf_int16, 294 | "int8": cf_int8, 295 | "int64": cf_int64, 296 | } 297 | fmt2string = [ 298 | "undefined", 299 | "float32", 300 | "double64", 301 | "string", 302 | "int32", 303 | "int16", 304 | "int8", 305 | "int64", 306 | ] 307 | fmt2type = [ 308 | [], 309 | ctypes.c_float, 310 | ctypes.c_double, 311 | ctypes.c_char_p, 312 | ctypes.c_int, 313 | ctypes.c_short, 314 | ctypes.c_byte, 315 | ctypes.c_longlong, 316 | ] 317 | fmt2push_sample = [ 318 | [], 319 | lib.lsl_push_sample_ftp, 320 | lib.lsl_push_sample_dtp, 321 | lib.lsl_push_sample_strtp, 322 | lib.lsl_push_sample_itp, 323 | lib.lsl_push_sample_stp, 324 | lib.lsl_push_sample_ctp, 325 | push_sample_int64, 326 | ] 327 | fmt2pull_sample = [ 328 | [], 329 | lib.lsl_pull_sample_f, 330 | lib.lsl_pull_sample_d, 331 | lib.lsl_pull_sample_str, 332 | lib.lsl_pull_sample_i, 333 | lib.lsl_pull_sample_s, 334 | lib.lsl_pull_sample_c, 335 | pull_sample_int64, 336 | ] 337 | # noinspection PyBroadException 338 | try: 339 | fmt2push_chunk = [ 340 | [], 341 | lib.lsl_push_chunk_ftp, 342 | lib.lsl_push_chunk_dtp, 343 | lib.lsl_push_chunk_strtp, 344 | lib.lsl_push_chunk_itp, 345 | lib.lsl_push_chunk_stp, 346 | lib.lsl_push_chunk_ctp, 347 | push_chunk_int64, 348 | ] 349 | fmt2push_chunk_n = [ 350 | [], 351 | lib.lsl_push_chunk_ftnp, 352 | lib.lsl_push_chunk_dtnp, 353 | lib.lsl_push_chunk_strtnp, 354 | lib.lsl_push_chunk_itnp, 355 | lib.lsl_push_chunk_stnp, 356 | lib.lsl_push_chunk_ctnp, 357 | push_chunk_int64_n, 358 | ] 359 | fmt2pull_chunk = [ 360 | [], 361 | lib.lsl_pull_chunk_f, 362 | lib.lsl_pull_chunk_d, 363 | lib.lsl_pull_chunk_str, 364 | lib.lsl_pull_chunk_i, 365 | lib.lsl_pull_chunk_s, 366 | lib.lsl_pull_chunk_c, 367 | pull_chunk_int64, 368 | ] 369 | except Exception: 370 | # if not available 371 | fmt2push_chunk = [None] * len(fmt2string) 372 | fmt2push_chunk_n = [None] * len(fmt2string) 373 | fmt2pull_chunk = [None] * len(fmt2string) 374 | -------------------------------------------------------------------------------- /src/pylsl/outlet.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | from .lib import ( 4 | lib, 5 | fmt2push_sample, 6 | fmt2push_chunk, 7 | fmt2push_chunk_n, 8 | fmt2type, 9 | cf_string, 10 | ) 11 | from .util import handle_error 12 | from .info import StreamInfo 13 | 14 | 15 | class StreamOutlet: 16 | """A stream outlet. 17 | 18 | Outlets are used to make streaming data (and the meta-data) available on 19 | the lab network. 20 | 21 | """ 22 | 23 | def __init__(self, info: StreamInfo, chunk_size: int = 0, max_buffered: int = 360): 24 | """Establish a new stream outlet. This makes the stream discoverable. 25 | 26 | Keyword arguments: 27 | description -- The StreamInfo object to describe this stream. Stays 28 | constant over the lifetime of the outlet. 29 | chunk_size --- Optionally the desired chunk granularity (in samples) 30 | for transmission. If unspecified, each push operation 31 | yields one chunk. Inlets can override this setting. 32 | (default 0) 33 | max_buffered -- Optionally the maximum amount of data to buffer (in 34 | seconds if there is a nominal sampling rate, otherwise 35 | x100 in samples). The default is 6 minutes of data. 36 | Note that, for high-bandwidth data, you will want to 37 | use a lower value here to avoid running out of RAM. 38 | (default 360) 39 | 40 | """ 41 | 42 | """ 43 | # If the source_id matches the default then we can assume it was created automatically. 44 | # It may be desirable to include the host name in the source_id hash to avoid collisions. 45 | # However, there are likely implications to re-creating the info so this is commented out 46 | # until a need arises. 47 | expected_src_id = str(hash(( 48 | info.name(), info.type(), info.channel_count(), info.nominal_srate(), info.channel_format() 49 | ))) 50 | if info.source_id() == expected_src_id: 51 | old_desc = info.desc() # save the old metadata 52 | import socket 53 | new_source_id = str(hash(( 54 | info.name(), 55 | info.type(), 56 | info.channel_count(), 57 | info.nominal_srate(), 58 | info.channel_format(), 59 | socket.gethostname() 60 | ))) 61 | info = StreamInfo( 62 | name=info.name(), 63 | type=info.type(), 64 | channel_count=info.channel_count(), 65 | nominal_srate=info.nominal_srate(), 66 | channel_format=info.channel_format(), 67 | source_id=new_source_id, 68 | ) 69 | # Add the old metadata to the new info object 70 | new_desc_parent = info.desc().parent() 71 | new_desc_parent.remove_child(info.desc()) 72 | new_desc_parent.append_copy(old_desc) 73 | """ 74 | self.obj = lib.lsl_create_outlet(info.obj, chunk_size, max_buffered) 75 | self.obj = ctypes.c_void_p(self.obj) 76 | if not self.obj: 77 | raise RuntimeError("could not create stream outlet.") 78 | self.channel_format = info.channel_format() 79 | self.channel_count = info.channel_count() 80 | self.do_push_sample = fmt2push_sample[self.channel_format] 81 | self.do_push_chunk = fmt2push_chunk[self.channel_format] 82 | self.do_push_chunk_n = fmt2push_chunk_n[self.channel_format] 83 | self.value_type = fmt2type[self.channel_format] 84 | self.sample_type = self.value_type * self.channel_count 85 | 86 | def __del__(self): 87 | """Destroy an outlet. 88 | 89 | The outlet will no longer be discoverable after destruction and all 90 | connected inlets will stop delivering data. 91 | 92 | """ 93 | # noinspection PyBroadException 94 | try: 95 | lib.lsl_destroy_outlet(self.obj) 96 | except Exception as e: 97 | print(f"StreamOutlet deletion triggered error: {e}") 98 | 99 | def push_sample(self, x, timestamp: float = 0.0, pushthrough: bool = True): 100 | """Push a sample into the outlet. 101 | 102 | Each entry in the list corresponds to one channel. 103 | 104 | Keyword arguments: 105 | x -- A list of values to push (one per channel). 106 | timestamp -- Optionally the capture time of the sample, in agreement 107 | with local_clock(); if 0.0, the current 108 | time is used. (default 0.0) 109 | pushthrough -- Whether to push the sample through to the receivers 110 | instead of buffering it with subsequent samples. 111 | Note that the chunk_size, if specified at outlet 112 | construction, takes precedence over the pushthrough flag. 113 | (default True) 114 | 115 | """ 116 | if len(x) == self.channel_count: 117 | if self.channel_format == cf_string: 118 | x = [v.encode("utf-8") for v in x] 119 | handle_error( 120 | self.do_push_sample( 121 | self.obj, 122 | self.sample_type(*x), 123 | ctypes.c_double(timestamp), 124 | ctypes.c_int(pushthrough), 125 | ) 126 | ) 127 | else: 128 | raise ValueError( 129 | "length of the sample (" + str(len(x)) + ") must " 130 | "correspond to the stream's channel count (" 131 | + str(self.channel_count) 132 | + ")." 133 | ) 134 | 135 | def push_chunk(self, x, timestamp: float = 0.0, pushthrough: bool = True): 136 | """Push a list of samples into the outlet. 137 | 138 | samples -- A list of samples, preferably as a 2-D numpy array. 139 | `samples` can also be a list of lists, or a list of 140 | multiplexed values. 141 | timestamp -- Optional, float or 1-D list of floats. 142 | If float and != 0.0: the capture time of the most recent sample, in 143 | agreement with local_clock(); if default (0.0), the current 144 | time is used. The time stamps of other samples are 145 | automatically derived according to the sampling rate of 146 | the stream. 147 | If list of floats: the time stamps for each sample. 148 | Must be the same length as `samples`. 149 | pushthrough Whether to push the chunk through to the receivers instead 150 | of buffering it with subsequent samples. Note that the 151 | chunk_size, if specified at outlet construction, takes 152 | precedence over the pushthrough flag. (default True) 153 | 154 | Note: performance is optimized for the following argument types: 155 | - `samples`: 2-D numpy array 156 | - `timestamp`: float 157 | """ 158 | # Convert timestamp to corresponding ctype 159 | try: 160 | timestamp_c = ctypes.c_double(timestamp) 161 | # Select the corresponding push_chunk method 162 | liblsl_push_chunk_func = self.do_push_chunk 163 | except TypeError: 164 | try: 165 | timestamp_c = (ctypes.c_double * len(timestamp))(*timestamp) 166 | liblsl_push_chunk_func = self.do_push_chunk_n 167 | except TypeError: 168 | raise TypeError("timestamp must be a float or an iterable of floats") 169 | 170 | try: 171 | n_values = self.channel_count * len(x) 172 | data_buff = (self.value_type * n_values).from_buffer(x) 173 | handle_error( 174 | liblsl_push_chunk_func( 175 | self.obj, 176 | data_buff, 177 | ctypes.c_long(n_values), 178 | timestamp_c, 179 | ctypes.c_int(pushthrough), 180 | ) 181 | ) 182 | except TypeError: 183 | # don't send empty chunks 184 | if len(x): 185 | if type(x[0]) is list: 186 | x = [v for sample in x for v in sample] 187 | if self.channel_format == cf_string: 188 | x = [v.encode("utf-8") for v in x] 189 | if len(x) % self.channel_count == 0: 190 | # x is a flattened list of multiplexed values 191 | constructor = self.value_type * len(x) 192 | # noinspection PyCallingNonCallable 193 | handle_error( 194 | liblsl_push_chunk_func( 195 | self.obj, 196 | constructor(*x), 197 | ctypes.c_long(len(x)), 198 | timestamp_c, 199 | ctypes.c_int(pushthrough), 200 | ) 201 | ) 202 | else: 203 | raise ValueError( 204 | "Each sample must have the same number of channels (" 205 | + str(self.channel_count) 206 | + ")." 207 | ) 208 | 209 | def have_consumers(self) -> bool: 210 | """Check whether consumers are currently registered. 211 | 212 | While it does not hurt, there is technically no reason to push samples 213 | if there is no consumer. 214 | 215 | """ 216 | return bool(lib.lsl_have_consumers(self.obj)) 217 | 218 | def wait_for_consumers(self, timeout: float) -> bool: 219 | """Wait until some consumer shows up (without wasting resources). 220 | 221 | Returns True if the wait was successful, False if the timeout expired. 222 | 223 | """ 224 | return bool(lib.lsl_wait_for_consumers(self.obj, ctypes.c_double(timeout))) 225 | 226 | def get_info(self) -> StreamInfo: 227 | outlet_info = lib.lsl_get_info(self.obj) 228 | return StreamInfo(handle=outlet_info) 229 | -------------------------------------------------------------------------------- /src/pylsl/resolve.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | from .lib import lib 4 | from .info import StreamInfo 5 | from .util import FOREVER 6 | 7 | 8 | def resolve_streams(wait_time=1.0): 9 | """Resolve all streams on the network. 10 | 11 | This function returns all currently available streams from any outlet on 12 | the network. The network is usually the subnet specified at the local 13 | router, but may also include a group of machines visible to each other via 14 | multicast packets (given that the network supports it), or list of 15 | hostnames. These details may optionally be customized by the experimenter 16 | in a configuration file (see Network Connectivity in the LSL wiki). 17 | 18 | Keyword arguments: 19 | wait_time -- The waiting time for the operation, in seconds, to search for 20 | streams. Warning: If this is too short (<0.5s) only a subset 21 | (or none) of the outlets that are present on the network may 22 | be returned. (default 1.0) 23 | 24 | Returns a list of StreamInfo objects (with empty desc field), any of which 25 | can subsequently be used to open an inlet. The full description can be 26 | retrieved from the inlet. 27 | 28 | """ 29 | # noinspection PyCallingNonCallable 30 | buffer = (ctypes.c_void_p * 1024)() 31 | num_found = lib.lsl_resolve_all( 32 | ctypes.byref(buffer), 1024, ctypes.c_double(wait_time) 33 | ) 34 | return [StreamInfo(handle=buffer[k]) for k in range(num_found)] 35 | 36 | 37 | def resolve_byprop(prop, value, minimum=1, timeout=FOREVER): 38 | """Resolve all streams with a specific value for a given property. 39 | 40 | If the goal is to resolve a specific stream, this method is preferred over 41 | resolving all streams and then selecting the desired one. 42 | 43 | Keyword arguments: 44 | prop -- The StreamInfo property that should have a specific value (e.g., 45 | "name", "type", "source_id", or "desc/manufacturer"). 46 | value -- The string value that the property should have (e.g., "EEG" as 47 | the type property). 48 | minimum -- Return at least this many streams. (default 1) 49 | timeout -- Optionally a timeout of the operation, in seconds. If the 50 | timeout expires, less than the desired number of streams 51 | (possibly none) will be returned. (default FOREVER) 52 | 53 | Returns a list of matching StreamInfo objects (with empty desc field), any 54 | of which can subsequently be used to open an inlet. 55 | 56 | Example: results = resolve_Stream_byprop("type","EEG") 57 | 58 | """ 59 | # noinspection PyCallingNonCallable 60 | buffer = (ctypes.c_void_p * 1024)() 61 | num_found = lib.lsl_resolve_byprop( 62 | ctypes.byref(buffer), 63 | 1024, 64 | ctypes.c_char_p(str.encode(prop)), 65 | ctypes.c_char_p(str.encode(value)), 66 | minimum, 67 | ctypes.c_double(timeout), 68 | ) 69 | return [StreamInfo(handle=buffer[k]) for k in range(num_found)] 70 | 71 | 72 | def resolve_bypred(predicate, minimum=1, timeout=FOREVER): 73 | """Resolve all streams that match a given predicate. 74 | 75 | Advanced query that allows to impose more conditions on the retrieved 76 | streams; the given string is an XPath 1.0 predicate for the 77 | node (omitting the surrounding []'s), see also 78 | http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951. 79 | 80 | Keyword arguments: 81 | predicate -- The predicate string, e.g. "name='BioSemi'" or 82 | "type='EEG' and starts-with(name,'BioSemi') and 83 | count(description/desc/channels/channel)=32" 84 | minimum -- Return at least this many streams. (default 1) 85 | timeout -- Optionally a timeout of the operation, in seconds. If the 86 | timeout expires, less than the desired number of streams 87 | (possibly none) will be returned. (default FOREVER) 88 | 89 | Returns a list of matching StreamInfo objects (with empty desc field), any 90 | of which can subsequently be used to open an inlet. 91 | 92 | """ 93 | # noinspection PyCallingNonCallable 94 | buffer = (ctypes.c_void_p * 1024)() 95 | num_found = lib.lsl_resolve_bypred( 96 | ctypes.byref(buffer), 97 | 1024, 98 | ctypes.c_char_p(str.encode(predicate)), 99 | minimum, 100 | ctypes.c_double(timeout), 101 | ) 102 | return [StreamInfo(handle=buffer[k]) for k in range(num_found)] 103 | 104 | 105 | class ContinuousResolver: 106 | """A convenience class resolving streams continuously in the background. 107 | 108 | This object can be queried at any time for the set of streams that are 109 | currently visible on the network. 110 | 111 | """ 112 | 113 | def __init__(self, prop=None, value=None, pred=None, forget_after=5.0): 114 | """Construct a new continuous_resolver. 115 | 116 | Keyword arguments: 117 | forget_after -- When a stream is no longer visible on the network 118 | (e.g., because it was shut down), this is the time in 119 | seconds after which it is no longer reported by the 120 | resolver. 121 | 122 | """ 123 | if pred is not None: 124 | if prop is not None or value is not None: 125 | raise ValueError( 126 | "you can only either pass the prop/value " 127 | "argument or the pred argument, but not " 128 | "both." 129 | ) 130 | self.obj = lib.lsl_create_continuous_resolver_bypred( 131 | str.encode(pred), ctypes.c_double(forget_after) 132 | ) 133 | elif prop is not None and value is not None: 134 | self.obj = lib.lsl_create_continuous_resolver_byprop( 135 | str.encode(prop), str.encode(value), ctypes.c_double(forget_after) 136 | ) 137 | elif prop is not None or value is not None: 138 | raise ValueError( 139 | "if prop is specified, then value must be " 140 | "specified, too, and vice versa." 141 | ) 142 | else: 143 | self.obj = lib.lsl_create_continuous_resolver(ctypes.c_double(forget_after)) 144 | self.obj = ctypes.c_void_p(self.obj) 145 | if not self.obj: 146 | raise RuntimeError("could not create continuous resolver.") 147 | 148 | def __del__(self): 149 | """Destructor for the continuous resolver.""" 150 | # noinspection PyBroadException 151 | try: 152 | lib.lsl_destroy_continuous_resolver(self.obj) 153 | except Exception: 154 | pass 155 | 156 | def results(self): 157 | """Obtain the set of currently present streams on the network. 158 | 159 | Returns a list of matching StreamInfo objects (with empty desc 160 | field), any of which can subsequently be used to open an inlet. 161 | 162 | """ 163 | # noinspection PyCallingNonCallable 164 | buffer = (ctypes.c_void_p * 1024)() 165 | num_found = lib.lsl_resolver_results(self.obj, ctypes.byref(buffer), 1024) 166 | return [StreamInfo(handle=buffer[k]) for k in range(num_found)] 167 | 168 | 169 | def resolve_stream(*args): 170 | if len(args) == 0: 171 | return resolve_streams() 172 | elif type(args[0]) in [int, float]: 173 | return resolve_streams(args[0]) 174 | elif type(args[0]) is str: 175 | if len(args) == 1: 176 | return resolve_bypred(args[0]) 177 | elif type(args[1]) in [int, float]: 178 | return resolve_bypred(args[0], args[1]) 179 | else: 180 | if len(args) == 2: 181 | return resolve_byprop(args[0], args[1]) 182 | else: 183 | return resolve_byprop(args[0], args[1], args[2]) 184 | -------------------------------------------------------------------------------- /src/pylsl/util.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | from .lib import lib 4 | 5 | # Constant to indicate that a stream has variable sampling rate. 6 | IRREGULAR_RATE = 0.0 7 | 8 | # Constant to indicate that a sample has the next successive time stamp 9 | # according to the stream's defined sampling rate. Optional optimization to 10 | # transmit less data per sample. 11 | DEDUCED_TIMESTAMP = -1.0 12 | 13 | # A very large time value (ca. 1 year); can be used in timeouts. 14 | FOREVER = 32000000.0 15 | 16 | # Value formats supported by LSL. LSL data streams are sequences of samples, 17 | # each of which is a same-size vector of values with one of the below types. 18 | 19 | # Post processing flags 20 | proc_none = 0 # No automatic post-processing; return the ground-truth time stamps for manual post-processing. 21 | proc_clocksync = 1 # Perform automatic clock synchronization; equivalent to manually adding the time_correction(). 22 | proc_dejitter = 2 # Remove jitter from time stamps using a smoothing algorithm to the received time stamps. 23 | proc_monotonize = 4 # Force the time-stamps to be monotonically ascending. Only makes sense if timestamps are dejittered. 24 | proc_threadsafe = 8 # Post-processing is thread-safe (same inlet can be read from by multiple threads). 25 | proc_ALL = ( 26 | proc_none | proc_clocksync | proc_dejitter | proc_monotonize | proc_threadsafe 27 | ) 28 | 29 | 30 | def protocol_version(): 31 | """Protocol version. 32 | 33 | The major version is protocol_version() / 100; 34 | The minor version is protocol_version() % 100; 35 | 36 | Clients with different minor versions are protocol-compatible with each 37 | other while clients with different major versions will refuse to work 38 | together. 39 | 40 | """ 41 | return lib.lsl_protocol_version() 42 | 43 | 44 | def library_version(): 45 | """Version of the underlying liblsl library. 46 | 47 | The major version is library_version() / 100; 48 | The minor version is library_version() % 100; 49 | 50 | """ 51 | return lib.lsl_library_version() 52 | 53 | 54 | def library_info(): 55 | """Get a string containing library information. The format of the string shouldn't be used 56 | for anything important except giving a a debugging person a good idea which exact library 57 | version is used.""" 58 | return lib.lsl_library_info().decode("utf-8") 59 | 60 | 61 | def local_clock(): 62 | """Obtain a local system time stamp in seconds. 63 | 64 | The resolution is better than a millisecond. This reading can be used to 65 | assign time stamps to samples as they are being acquired. 66 | 67 | If the "age" of a sample is known at a particular time (e.g., from USB 68 | transmission delays), it can be used as an offset to lsl_local_clock() to 69 | obtain a better estimate of when a sample was actually captured. See 70 | StreamOutlet.push_sample() for a use case. 71 | 72 | """ 73 | return lib.lsl_local_clock() 74 | 75 | 76 | class TimeoutError(RuntimeError): 77 | # note: although this overrides the name of a built-in exception, 78 | # this API is retained here for compatibility with the Python 2.x 79 | # version of pylsl 80 | pass 81 | 82 | 83 | class LostError(RuntimeError): 84 | pass 85 | 86 | 87 | class InvalidArgumentError(RuntimeError): 88 | pass 89 | 90 | 91 | class InternalError(RuntimeError): 92 | pass 93 | 94 | 95 | def handle_error(errcode): 96 | """Error handler function. Translates an error code into an exception.""" 97 | if type(errcode) is ctypes.c_int: 98 | errcode = errcode.value 99 | if errcode == 0: 100 | pass # no error 101 | elif errcode == -1: 102 | raise TimeoutError("the operation failed due to a timeout.") 103 | elif errcode == -2: 104 | raise LostError("the stream has been lost.") 105 | elif errcode == -3: 106 | raise InvalidArgumentError("an argument was incorrectly specified.") 107 | elif errcode == -4: 108 | raise InternalError("an internal error has occurred.") 109 | elif errcode < 0: 110 | raise RuntimeError("an unknown error has occurred.") 111 | -------------------------------------------------------------------------------- /test/test_format.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | 3 | import pytest 4 | 5 | import pylsl 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "channel_format", [pylsl.cf_int32, pylsl.cf_float32, pylsl.cf_double64] 10 | ) 11 | def test_format(channel_format: int): 12 | expected_type = { 13 | pylsl.cf_int32: ctypes.c_int, 14 | pylsl.cf_float32: ctypes.c_float, 15 | pylsl.cf_double64: ctypes.c_double, 16 | }[channel_format] 17 | 18 | feature_info = pylsl.StreamInfo( 19 | name="test", 20 | type="EEG", 21 | channel_count=1, 22 | nominal_srate=1, 23 | channel_format=channel_format, 24 | source_id="testid", 25 | ) 26 | outlet = pylsl.StreamOutlet(feature_info) 27 | assert outlet.value_type == expected_type 28 | 29 | streams = pylsl.resolve_byprop("name", "test", timeout=1) 30 | inlet = pylsl.StreamInlet(streams[0]) 31 | 32 | assert inlet.value_type == expected_type 33 | -------------------------------------------------------------------------------- /test/test_info.py: -------------------------------------------------------------------------------- 1 | import pylsl 2 | 3 | 4 | def test_info_src_id(): 5 | name = "TestName" 6 | strm_type = "TestType" 7 | chans = 32 8 | srate = 1000.0 9 | fmt = pylsl.cf_float32 10 | 11 | info = pylsl.StreamInfo( 12 | name=name, 13 | type=strm_type, 14 | channel_count=chans, 15 | nominal_srate=srate, 16 | channel_format=fmt, 17 | source_id=None, 18 | ) 19 | expected_src_id = str(hash((name, strm_type, chans, srate, fmt))) 20 | assert info.source_id() == expected_src_id 21 | 22 | # Augment info with desc 23 | info.desc().append_child_value("manufacturer", "pytest") 24 | chns = info.desc().append_child("channels") 25 | for chan_ix in range(1, chans + 1): 26 | ch = chns.append_child("channel") 27 | ch.append_child_value("label", f"Ch{chan_ix}") 28 | 29 | outlet = pylsl.StreamOutlet(info) 30 | outlet_info = outlet.get_info() 31 | 32 | """ 33 | # See comment block in StreamOutlet.__init__ to see why this is commented out. 34 | import socket 35 | outlet_expected_source_id = str(hash((name, strm_type, chans, srate, fmt, socket.gethostname()))) 36 | """ 37 | outlet_expected_source_id = expected_src_id 38 | 39 | assert outlet_info.source_id() == outlet_expected_source_id 40 | out_desc = outlet_info.desc() 41 | assert out_desc.child_value("manufacturer") == "pytest" 42 | assert outlet_info.get_channel_labels() == [ 43 | f"Ch{chan_ix}" for chan_ix in range(1, chans + 1) 44 | ] 45 | --------------------------------------------------------------------------------