├── .dockerignore ├── .github └── workflows │ ├── repo.yml │ └── tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── Dockerfile ├── README.md ├── SConscript ├── SConstruct ├── codecov.yml ├── msgq ├── .gitignore ├── __init__.py ├── event.cc ├── event.h ├── impl_fake.cc ├── impl_fake.h ├── impl_msgq.cc ├── impl_msgq.h ├── impl_zmq.cc ├── impl_zmq.h ├── ipc.cc ├── ipc.h ├── ipc.pxd ├── ipc_pyx.pyx ├── logger │ └── logger.h ├── msgq.cc ├── msgq.h ├── msgq_tests.cc ├── test_runner.cc ├── tests │ ├── __init__.py │ ├── test_fake.py │ ├── test_messaging.py │ └── test_poller.py └── visionipc │ ├── .gitignore │ ├── __init__.py │ ├── test_runner.cc │ ├── tests │ ├── __init__.py │ └── test_visionipc.py │ ├── visionbuf.cc │ ├── visionbuf.h │ ├── visionbuf_cl.cc │ ├── visionbuf_ion.cc │ ├── visionipc.cc │ ├── visionipc.h │ ├── visionipc.pxd │ ├── visionipc_client.cc │ ├── visionipc_client.h │ ├── visionipc_pyx.pxd │ ├── visionipc_pyx.pyx │ ├── visionipc_server.cc │ ├── visionipc_server.h │ └── visionipc_tests.cc ├── pyproject.toml └── site_scons └── site_tools └── cython.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .sconsign.dblite 2 | -------------------------------------------------------------------------------- /.github/workflows/repo.yml: -------------------------------------------------------------------------------- 1 | name: repo 2 | 3 | on: 4 | schedule: 5 | - cron: "0 15 1 * *" 6 | workflow_dispatch: 7 | 8 | jobs: 9 | pre-commit-autoupdate: 10 | name: pre-commit autoupdate 11 | runs-on: ubuntu-latest 12 | container: 13 | steps: 14 | - uses: actions/checkout@v3 15 | - name: pre-commit autoupdate 16 | run: | 17 | git config --global --add safe.directory '*' 18 | pre-commit autoupdate 19 | - name: Create Pull Request 20 | uses: peter-evans/create-pull-request@5b4a9f6a9e2af26e5f02351490b90d01eb8ec1e5 21 | with: 22 | token: ${{ secrets.ACTIONS_CREATE_PR_PAT }} 23 | commit-message: Update pre-commit hook versions 24 | title: 'pre-commit: autoupdate hooks' 25 | branch: pre-commit-updates 26 | base: master 27 | delete-branch: true 28 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | DOCKER_REGISTRY: ghcr.io/commaai 7 | RUN: docker run -e PYTHONWARNINGS=error --shm-size 1G --name msgq msgq /bin/sh -c 8 | RUN_NAMED: docker run -e PYTHONWARNINGS=error --shm-size 1G --rm msgq /bin/sh -c 9 | CI_RUN: docker run -e GITHUB_ACTION -e GITHUB_REF -e GITHUB_HEAD_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_RUN_ID --rm msgqci /bin/bash -c 10 | BUILD: docker buildx build --pull --load --cache-to type=inline --cache-from $DOCKER_REGISTRY/msgq:latest -t msgq -f Dockerfile . 11 | PYTHONWARNINGS: error 12 | 13 | jobs: 14 | build: 15 | name: build 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Build docker image 20 | run: eval "$BUILD" 21 | - name: Push to dockerhub 22 | if: github.ref == 'refs/heads/master' && github.event_name != 'pull_request' && github.repository == 'commaai/msgq' 23 | run: | 24 | docker login ghcr.io -u ${{ github.actor }} -p ${{ secrets.GITHUB_TOKEN }} 25 | docker tag msgq $DOCKER_REGISTRY/msgq:latest 26 | docker push $DOCKER_REGISTRY/msgq:latest 27 | 28 | unit_tests: 29 | name: unit tests 30 | runs-on: ubuntu-latest 31 | strategy: 32 | matrix: 33 | flags: ['', '--asan', '--ubsan'] 34 | backend: ['MSGQ', 'ZMQ'] 35 | steps: 36 | - uses: actions/checkout@v3 37 | - name: Build docker image 38 | run: eval "$BUILD" 39 | - name: C++ tests 40 | run: | 41 | $RUN "export ${{ matrix.backend }}=1 && \ 42 | scons ${{ matrix.flags }} -j$(nproc) && \ 43 | msgq/test_runner && \ 44 | msgq/visionipc/test_runner" 45 | - name: python tests 46 | run: $RUN_NAMED "${{ matrix.backend }}=1 coverage run -m pytest" 47 | - name: Upload coverage 48 | run: | 49 | docker commit msgq msgqci 50 | $CI_RUN "cd /project/msgq && bash <(curl -s https://codecov.io/bash) -v -F unit_tests_${{ matrix.backend }}" 51 | 52 | static_analysis: 53 | name: static analysis 54 | runs-on: ubuntu-latest 55 | steps: 56 | - uses: actions/checkout@v3 57 | - name: Build docker image 58 | run: eval "$BUILD" 59 | - name: Static analysis 60 | # TODO: a package pre-commit installs has a warning, remove the unset once that's fixed 61 | run: $RUN "git init && git add -A && unset PYTHONWARNINGS && pre-commit run --all" 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /gen/ 2 | *.tmp 3 | *.pyc 4 | __pycache__ 5 | .*.swp 6 | .*.swo 7 | *.os 8 | *.so 9 | *.o 10 | *.a 11 | 12 | test_runner 13 | 14 | libmessaging.* 15 | libmessaging_shared.* 16 | services.h 17 | .sconsign.dblite 18 | .mypy_cache/ 19 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | files: ^msgq/ 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v4.6.0 5 | hooks: 6 | - id: check-ast 7 | - id: check-yaml 8 | - id: check-executables-have-shebangs 9 | - id: check-shebang-scripts-are-executable 10 | - repo: https://github.com/pre-commit/mirrors-mypy 11 | rev: v1.9.0 12 | hooks: 13 | - id: mypy 14 | - repo: https://github.com/astral-sh/ruff-pre-commit 15 | rev: v0.3.5 16 | hooks: 17 | - id: ruff 18 | - repo: local 19 | hooks: 20 | - id: cppcheck 21 | name: cppcheck 22 | entry: cppcheck 23 | language: system 24 | types: [c++] 25 | exclude: '^(msgq/msgq_tests.cc|msgq/test_runner.cc)' 26 | args: 27 | - --error-exitcode=1 28 | - --inline-suppr 29 | - --language=c++ 30 | - --force 31 | - --quiet 32 | - -j4 33 | - repo: https://github.com/cpplint/cpplint 34 | rev: 1.6.1 35 | hooks: 36 | - id: cpplint 37 | args: 38 | - --quiet 39 | - --counting=detailed 40 | - --linelength=240 41 | - --filter=-build,-legal,-readability,-runtime,-whitespace,+build/include_subdir,+build/forward_decl,+build/include_what_you_use,+build/deprecated,+whitespace/comma,+whitespace/line_length,+whitespace/empty_if_body,+whitespace/empty_loop_body,+whitespace/empty_conditional_body,+whitespace/forcolon,+whitespace/parens,+whitespace/semicolon,+whitespace/tab,+readability/braces 42 | - repo: https://github.com/codespell-project/codespell 43 | rev: v2.2.6 44 | hooks: 45 | - id: codespell 46 | args: 47 | - -L ned 48 | - --builtins clear,rare,informal,usage,code,names,en-GB_to_en-US 49 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get update && apt-get install -y --no-install-recommends \ 5 | autoconf \ 6 | build-essential \ 7 | ca-certificates \ 8 | capnproto \ 9 | clang \ 10 | cppcheck \ 11 | curl \ 12 | git \ 13 | libbz2-dev \ 14 | libcapnp-dev \ 15 | libclang-rt-dev \ 16 | libffi-dev \ 17 | liblzma-dev \ 18 | libncurses5-dev \ 19 | libncursesw5-dev \ 20 | libreadline-dev \ 21 | libsqlite3-dev \ 22 | libssl-dev \ 23 | libtool \ 24 | libzmq3-dev \ 25 | llvm \ 26 | make \ 27 | cmake \ 28 | ocl-icd-opencl-dev \ 29 | opencl-headers \ 30 | python3-dev \ 31 | python3-pip \ 32 | tk-dev \ 33 | wget \ 34 | xz-utils \ 35 | zlib1g-dev \ 36 | && rm -rf /var/lib/apt/lists/* 37 | 38 | RUN pip3 install --break-system-packages --no-cache-dir pyyaml Cython scons pycapnp pre-commit ruff parameterized coverage numpy pytest 39 | 40 | WORKDIR /project/msgq 41 | RUN cd /tmp/ && \ 42 | git clone -b v2.x --depth 1 https://github.com/catchorg/Catch2.git && \ 43 | cd Catch2 && \ 44 | mv single_include/* /project/msgq/ && \ 45 | cd .. \ 46 | rm -rf Catch2 47 | 48 | ENV PYTHONPATH=/project/msgq 49 | 50 | COPY . . 51 | RUN ls && rm -rf .git && \ 52 | scons -c && scons -j$(nproc) 53 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MSGQ: A lock free single producer multi consumer message queue 2 | 3 | ## What is this library? 4 | MSGQ is a generic high performance IPC pub sub system with a single publisher and multiple subscribers. MSGQ is designed to be a high performance replacement for ZMQ-like SUB/PUB patterns. It uses a ring buffer in shared memory to efficiently read and write data. Each read requires a copy. Writing can be done without a copy, as long as the size of the data is known in advance. While MSGQ is the core of this library, this library also allows replacing the MSGQ backend with ZMQ or a spoofed implementation that can be used for deterministic testing. This library also contains visionipc, an IPC system specifically for large contiguous buffers (like images/video). 5 | 6 | ## Storage 7 | The storage for the queue consists of an area of metadata, and the actual buffer. The metadata contains: 8 | 9 | 1. A counter to the number of readers that are active 10 | 2. A pointer to the head of the queue for writing. From now on referred to as *write pointer* 11 | 3. A cycle counter for the writer. This counter is incremented when the writer wraps around 12 | 4. N pointers, pointing to the current read position for all the readers. From now on referred to as *read pointer* 13 | 5. N counters, counting the number of cycles for all the readers 14 | 6. N booleans, indicating validity for all the readers. From now on referred to as *validity flag* 15 | 16 | The counter and the pointer are both 32 bit values, packed into 64 bit so they can be read and written atomically. 17 | 18 | The data buffer is a ring buffer. All messages are prefixed by an 8 byte size field, followed by the data. A size of -1 indicates a wrap-around, and means the next message is stored at the beginning of the buffer. 19 | 20 | 21 | ## Writing 22 | Writing involves the following steps: 23 | 24 | 1. Check if the area that is to be written overlaps with any of the read pointers, mark those readers as invalid by clearing the validity flag. 25 | 2. Write the message 26 | 3. Increase the write pointer by the size of the message 27 | 28 | In case there is not enough space at the end of the buffer, a special empty message with a prefix of -1 is written. The cycle counter is incremented by one. In this case step 1 will check there are no read pointers pointing to the remainder of the buffer. Then another write cycle will start with the actual message. 29 | 30 | There always needs to be 8 bytes of empty space at the end of the buffer. By doing this there is always space to write the -1. 31 | 32 | ## Reset reader 33 | When the reader is lagging too much behind the read pointer becomes invalid and no longer points to the beginning of a valid message. To reset a reader to the current write pointer, the following steps are performed: 34 | 35 | 1. Set valid flag 36 | 2. Set read cycle counter to that of the writer 37 | 3. Set read pointer to write pointer 38 | 39 | ## Reading 40 | Reading involves the following steps: 41 | 42 | 1. Read the size field at the current read pointer 43 | 2. Read the validity flag 44 | 3. Copy the data out of the buffer 45 | 4. Increase the read pointer by the size of the message 46 | 5. Check the validity flag again 47 | 48 | Before starting the copy, the valid flag is checked. This is to prevent a race condition where the size prefix was invalid, and the read could read outside of the buffer. Make sure that step 1 and 2 are not reordered by your compiler or CPU. 49 | 50 | If a writer overwrites the data while it's being copied out, the data will be invalid. Therefore the validity flag is also checked after reading it. The order of step 4 and 5 does not matter. 51 | 52 | If at steps 2 or 5 the validity flag is not set, the reader is reset. Any data that was already read is discarded. After the reader is reset, the reading starts from the beginning. 53 | 54 | If a message with size -1 is encountered, step 3 and 4 are replaced by increasing the cycle counter and setting the read pointer to the beginning of the buffer. After that another read is performed. 55 | -------------------------------------------------------------------------------- /SConscript: -------------------------------------------------------------------------------- 1 | Import('env', 'envCython', 'arch', 'common') 2 | 3 | 4 | visionipc_dir = Dir('msgq/visionipc') 5 | gen_dir = Dir('gen') 6 | 7 | 8 | # Build msgq 9 | msgq_objects = env.SharedObject([ 10 | 'msgq/ipc.cc', 11 | 'msgq/event.cc', 12 | 'msgq/impl_zmq.cc', 13 | 'msgq/impl_msgq.cc', 14 | 'msgq/impl_fake.cc', 15 | 'msgq/msgq.cc', 16 | ]) 17 | msgq = env.Library('msgq', msgq_objects) 18 | msgq_python = envCython.Program('msgq/ipc_pyx.so', 'msgq/ipc_pyx.pyx', LIBS=envCython["LIBS"]+[msgq, "zmq", common]) 19 | 20 | # Build Vision IPC 21 | vipc_files = ['visionipc.cc', 'visionipc_server.cc', 'visionipc_client.cc', 'visionbuf.cc'] 22 | vipc_sources = [f'{visionipc_dir.abspath}/{f}' for f in vipc_files] 23 | 24 | if arch == "larch64": 25 | vipc_sources += [f'{visionipc_dir.abspath}/visionbuf_ion.cc'] 26 | else: 27 | vipc_sources += [f'{visionipc_dir.abspath}/visionbuf_cl.cc'] 28 | 29 | vipc_objects = env.SharedObject(vipc_sources) 30 | visionipc = env.Library('visionipc', vipc_objects) 31 | 32 | 33 | vipc_frameworks = [] 34 | vipc_libs = envCython["LIBS"] + [visionipc, msgq, common, "zmq"] 35 | if arch == "Darwin": 36 | vipc_frameworks.append('OpenCL') 37 | else: 38 | vipc_libs.append('OpenCL') 39 | envCython.Program(f'{visionipc_dir.abspath}/visionipc_pyx.so', f'{visionipc_dir.abspath}/visionipc_pyx.pyx', 40 | LIBS=vipc_libs, FRAMEWORKS=vipc_frameworks) 41 | 42 | if GetOption('extras'): 43 | env.Program('msgq/test_runner', ['msgq/test_runner.cc', 'msgq/msgq_tests.cc'], LIBS=[msgq, common]) 44 | env.Program(f'{visionipc_dir.abspath}/test_runner', 45 | [f'{visionipc_dir.abspath}/test_runner.cc', f'{visionipc_dir.abspath}/visionipc_tests.cc'], 46 | LIBS=['pthread'] + vipc_libs, FRAMEWORKS=vipc_frameworks) 47 | 48 | Export('visionipc', 'msgq', 'msgq_python') 49 | -------------------------------------------------------------------------------- /SConstruct: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import subprocess 4 | import sysconfig 5 | import numpy as np 6 | 7 | arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip() 8 | if platform.system() == "Darwin": 9 | arch = "Darwin" 10 | 11 | common = '' 12 | 13 | cpppath = [ 14 | f"#/", 15 | '#msgq/', 16 | '/usr/lib/include', 17 | '/opt/homebrew/include', 18 | sysconfig.get_paths()['include'], 19 | ] 20 | 21 | libpath = [ 22 | '/opt/homebrew/lib', 23 | ] 24 | 25 | AddOption('--minimal', 26 | action='store_false', 27 | dest='extras', 28 | default=True, 29 | help='the minimum build. no tests, tools, etc.') 30 | 31 | AddOption('--asan', 32 | action='store_true', 33 | help='turn on ASAN') 34 | 35 | AddOption('--ubsan', 36 | action='store_true', 37 | help='turn on UBSan') 38 | 39 | ccflags = [] 40 | ldflags = [] 41 | if GetOption('ubsan'): 42 | flags = [ 43 | "-fsanitize=undefined", 44 | "-fno-sanitize-recover=undefined", 45 | ] 46 | ccflags += flags 47 | ldflags += flags 48 | elif GetOption('asan'): 49 | ccflags += ["-fsanitize=address", "-fno-omit-frame-pointer"] 50 | ldflags += ["-fsanitize=address"] 51 | 52 | env = Environment( 53 | ENV=os.environ, 54 | CC='clang', 55 | CXX='clang++', 56 | CCFLAGS=[ 57 | "-g", 58 | "-fPIC", 59 | "-O2", 60 | "-Wunused", 61 | "-Werror", 62 | "-Wshadow", 63 | "-Wno-vla-cxx-extension", 64 | "-Wno-unknown-warning-option", 65 | ] + ccflags, 66 | LDFLAGS=ldflags, 67 | LINKFLAGS=ldflags, 68 | 69 | CFLAGS="-std=gnu11", 70 | CXXFLAGS="-std=c++1z", 71 | CPPPATH=cpppath, 72 | LIBPATH=libpath, 73 | CYTHONCFILESUFFIX=".cpp", 74 | tools=["default", "cython"] 75 | ) 76 | 77 | Export('env', 'arch', 'common') 78 | 79 | envCython = env.Clone(LIBS=[]) 80 | envCython["CPPPATH"] += [np.get_include()] 81 | envCython["CCFLAGS"] += ["-Wno-#warnings", "-Wno-shadow", "-Wno-deprecated-declarations"] 82 | envCython["CCFLAGS"].remove('-Werror') 83 | if arch == "Darwin": 84 | envCython["LINKFLAGS"] = ["-bundle", "-undefined", "dynamic_lookup"] 85 | else: 86 | envCython["LINKFLAGS"] = ["-pthread", "-shared"] 87 | 88 | Export('envCython') 89 | 90 | 91 | SConscript(['SConscript']) 92 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | coverage: 3 | status: 4 | project: 5 | default: 6 | informational: true 7 | patch: off 8 | 9 | -------------------------------------------------------------------------------- /msgq/.gitignore: -------------------------------------------------------------------------------- 1 | ipc_pyx.cpp 2 | -------------------------------------------------------------------------------- /msgq/__init__.py: -------------------------------------------------------------------------------- 1 | # must be built with scons 2 | from msgq.ipc_pyx import Context, Poller, SubSocket, PubSocket, SocketEventHandle, toggle_fake_events, \ 3 | set_fake_prefix, get_fake_prefix, delete_fake_prefix, wait_for_one_event 4 | from msgq.ipc_pyx import MultiplePublishersError, IpcError 5 | 6 | from typing import Optional, List 7 | 8 | assert MultiplePublishersError 9 | assert IpcError 10 | assert toggle_fake_events 11 | assert set_fake_prefix 12 | assert get_fake_prefix 13 | assert delete_fake_prefix 14 | assert wait_for_one_event 15 | 16 | NO_TRAVERSAL_LIMIT = 2**64-1 17 | 18 | context = Context() 19 | 20 | 21 | def fake_event_handle(endpoint: str, identifier: Optional[str] = None, override: bool = True, enable: bool = False) -> SocketEventHandle: 22 | identifier = identifier or get_fake_prefix() 23 | handle = SocketEventHandle(endpoint, identifier, override) 24 | if override: 25 | handle.enabled = enable 26 | 27 | return handle 28 | 29 | def pub_sock(endpoint: str) -> PubSocket: 30 | sock = PubSocket() 31 | sock.connect(context, endpoint) 32 | return sock 33 | 34 | 35 | def sub_sock(endpoint: str, poller: Optional[Poller] = None, addr: str = "127.0.0.1", 36 | conflate: bool = False, timeout: Optional[int] = None) -> SubSocket: 37 | sock = SubSocket() 38 | sock.connect(context, endpoint, addr.encode('utf8'), conflate) 39 | 40 | if timeout is not None: 41 | sock.setTimeout(timeout) 42 | 43 | if poller is not None: 44 | poller.registerSocket(sock) 45 | return sock 46 | 47 | def drain_sock_raw(sock: SubSocket, wait_for_one: bool = False) -> List[bytes]: 48 | """Receive all message currently available on the queue""" 49 | ret: List[bytes] = [] 50 | while 1: 51 | if wait_for_one and len(ret) == 0: 52 | dat = sock.receive() 53 | else: 54 | dat = sock.receive(non_blocking=True) 55 | 56 | if dat is None: 57 | break 58 | 59 | ret.append(dat) 60 | 61 | return ret 62 | -------------------------------------------------------------------------------- /msgq/event.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "msgq/event.h" 17 | 18 | #ifndef __APPLE__ 19 | #include 20 | 21 | void event_state_shm_mmap(std::string endpoint, std::string identifier, char **shm_mem, std::string *shm_path) { 22 | const char* op_prefix = std::getenv("OPENPILOT_PREFIX"); 23 | 24 | std::string full_path = "/dev/shm/"; 25 | if (op_prefix) { 26 | full_path += std::string(op_prefix) + "/"; 27 | } 28 | full_path += CEREAL_EVENTS_PREFIX + "/"; 29 | if (identifier.size() > 0) { 30 | full_path += identifier + "/"; 31 | } 32 | std::filesystem::create_directories(full_path); 33 | full_path += endpoint; 34 | 35 | int shm_fd = open(full_path.c_str(), O_RDWR | O_CREAT, 0664); 36 | if (shm_fd < 0) { 37 | throw std::runtime_error("Could not open shared memory file."); 38 | } 39 | 40 | int rc = ftruncate(shm_fd, sizeof(EventState)); 41 | if (rc < 0){ 42 | close(shm_fd); 43 | throw std::runtime_error("Could not truncate shared memory file."); 44 | } 45 | 46 | char * mem = (char*)mmap(NULL, sizeof(EventState), PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); 47 | close(shm_fd); 48 | if (mem == nullptr) { 49 | throw std::runtime_error("Could not map shared memory file."); 50 | } 51 | 52 | if (shm_mem != nullptr) 53 | *shm_mem = mem; 54 | if (shm_path != nullptr) 55 | *shm_path = full_path; 56 | } 57 | 58 | SocketEventHandle::SocketEventHandle(std::string endpoint, std::string identifier, bool override) { 59 | char *mem; 60 | event_state_shm_mmap(endpoint, identifier, &mem, &this->shm_path); 61 | 62 | this->state = (EventState*)mem; 63 | if (override) { 64 | this->state->fds[0] = eventfd(0, EFD_NONBLOCK); 65 | this->state->fds[1] = eventfd(0, EFD_NONBLOCK); 66 | } 67 | } 68 | 69 | SocketEventHandle::~SocketEventHandle() { 70 | close(this->state->fds[0]); 71 | close(this->state->fds[1]); 72 | munmap(this->state, sizeof(EventState)); 73 | unlink(this->shm_path.c_str()); 74 | } 75 | 76 | bool SocketEventHandle::is_enabled() { 77 | return this->state->enabled; 78 | } 79 | 80 | void SocketEventHandle::set_enabled(bool enabled) { 81 | this->state->enabled = enabled; 82 | } 83 | 84 | Event SocketEventHandle::recv_called() { 85 | return Event(this->state->fds[0]); 86 | } 87 | 88 | Event SocketEventHandle::recv_ready() { 89 | return Event(this->state->fds[1]); 90 | } 91 | 92 | void SocketEventHandle::toggle_fake_events(bool enabled) { 93 | if (enabled) 94 | setenv("CEREAL_FAKE", "1", true); 95 | else 96 | unsetenv("CEREAL_FAKE"); 97 | } 98 | 99 | void SocketEventHandle::set_fake_prefix(std::string prefix) { 100 | if (prefix.size() == 0) { 101 | unsetenv("CEREAL_FAKE_PREFIX"); 102 | } else { 103 | setenv("CEREAL_FAKE_PREFIX", prefix.c_str(), true); 104 | } 105 | } 106 | 107 | std::string SocketEventHandle::fake_prefix() { 108 | const char* prefix = std::getenv("CEREAL_FAKE_PREFIX"); 109 | if (prefix == nullptr) { 110 | return ""; 111 | } else { 112 | return std::string(prefix); 113 | } 114 | } 115 | 116 | Event::Event(int fd): event_fd(fd) {} 117 | 118 | void Event::set() const { 119 | throw_if_invalid(); 120 | 121 | uint64_t val = 1; 122 | size_t count = write(this->event_fd, &val, sizeof(uint64_t)); 123 | assert(count == sizeof(uint64_t)); 124 | } 125 | 126 | int Event::clear() const { 127 | throw_if_invalid(); 128 | 129 | uint64_t val = 0; 130 | // read the eventfd to clear it 131 | read(this->event_fd, &val, sizeof(uint64_t)); 132 | 133 | return val; 134 | } 135 | 136 | void Event::wait(int timeout_sec) const { 137 | throw_if_invalid(); 138 | 139 | int event_count; 140 | struct pollfd fds = { this->event_fd, POLLIN, 0 }; 141 | struct timespec timeout = { timeout_sec, 0 };; 142 | 143 | sigset_t signals; 144 | sigfillset(&signals); 145 | sigdelset(&signals, SIGALRM); 146 | sigdelset(&signals, SIGINT); 147 | sigdelset(&signals, SIGTERM); 148 | sigdelset(&signals, SIGQUIT); 149 | 150 | event_count = ppoll(&fds, 1, timeout_sec < 0 ? nullptr : &timeout, &signals); 151 | 152 | if (event_count == 0) { 153 | throw std::runtime_error("Event timed out pid: " + std::to_string(getpid())); 154 | } else if (event_count < 0) { 155 | throw std::runtime_error("Event poll failed, errno: " + std::to_string(errno) + " pid: " + std::to_string(getpid())); 156 | } 157 | } 158 | 159 | bool Event::peek() const { 160 | throw_if_invalid(); 161 | 162 | int event_count; 163 | 164 | struct pollfd fds = { this->event_fd, POLLIN, 0 }; 165 | 166 | // poll with timeout zero to return status immediately 167 | event_count = poll(&fds, 1, 0); 168 | 169 | return event_count != 0; 170 | } 171 | 172 | bool Event::is_valid() const { 173 | return event_fd != -1; 174 | } 175 | 176 | int Event::fd() const { 177 | return event_fd; 178 | } 179 | 180 | int Event::wait_for_one(const std::vector& events, int timeout_sec) { 181 | struct pollfd fds[events.size()]; 182 | for (size_t i = 0; i < events.size(); i++) { 183 | fds[i] = { events[i].fd(), POLLIN, 0 }; 184 | } 185 | 186 | struct timespec timeout = { timeout_sec, 0 }; 187 | 188 | sigset_t signals; 189 | sigfillset(&signals); 190 | sigdelset(&signals, SIGALRM); 191 | sigdelset(&signals, SIGINT); 192 | sigdelset(&signals, SIGTERM); 193 | sigdelset(&signals, SIGQUIT); 194 | 195 | int event_count = ppoll(fds, events.size(), timeout_sec < 0 ? nullptr : &timeout, &signals); 196 | 197 | if (event_count == 0) { 198 | throw std::runtime_error("Event timed out pid: " + std::to_string(getpid())); 199 | } else if (event_count < 0) { 200 | throw std::runtime_error("Event poll failed, errno: " + std::to_string(errno) + " pid: " + std::to_string(getpid())); 201 | } 202 | 203 | for (size_t i = 0; i < events.size(); i++) { 204 | if (fds[i].revents & POLLIN) { 205 | return i; 206 | } 207 | } 208 | 209 | throw std::runtime_error("Event poll failed, no events ready"); 210 | } 211 | #else 212 | // Stub implementation for Darwin, which does not support eventfd 213 | void event_state_shm_mmap(std::string endpoint, std::string identifier, char **shm_mem, std::string *shm_path) {} 214 | 215 | SocketEventHandle::SocketEventHandle(std::string endpoint, std::string identifier, bool override) { 216 | std::cerr << "SocketEventHandle not supported on macOS" << std::endl; 217 | assert(false); 218 | } 219 | SocketEventHandle::~SocketEventHandle() {} 220 | bool SocketEventHandle::is_enabled() { return this->state->enabled; } 221 | void SocketEventHandle::set_enabled(bool enabled) {} 222 | Event SocketEventHandle::recv_called() { return Event(); } 223 | Event SocketEventHandle::recv_ready() { return Event(); } 224 | void SocketEventHandle::toggle_fake_events(bool enabled) {} 225 | void SocketEventHandle::set_fake_prefix(std::string prefix) {} 226 | std::string SocketEventHandle::fake_prefix() { return ""; } 227 | 228 | Event::Event(int fd): event_fd(fd) {} 229 | void Event::set() const {} 230 | int Event::clear() const { return 0; } 231 | void Event::wait(int timeout_sec) const {} 232 | bool Event::peek() const { return false; } 233 | bool Event::is_valid() const { return false; } 234 | int Event::fd() const { return this->event_fd; } 235 | int Event::wait_for_one(const std::vector& events, int timeout_sec) { return -1; } 236 | #endif 237 | -------------------------------------------------------------------------------- /msgq/event.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #define CEREAL_EVENTS_PREFIX std::string("cereal_events") 7 | 8 | void event_state_shm_mmap(std::string endpoint, std::string identifier, char **shm_mem, std::string *shm_path); 9 | 10 | enum EventPurpose { 11 | RECV_CALLED, 12 | RECV_READY 13 | }; 14 | 15 | struct EventState { 16 | int fds[2]; 17 | bool enabled; 18 | }; 19 | 20 | class Event { 21 | private: 22 | int event_fd = -1; 23 | 24 | inline void throw_if_invalid() const { 25 | if (!this->is_valid()) { 26 | throw std::runtime_error("Event does not have valid file descriptor."); 27 | } 28 | } 29 | public: 30 | Event(int fd = -1); 31 | 32 | void set() const; 33 | int clear() const; 34 | void wait(int timeout_sec = -1) const; 35 | bool peek() const; 36 | bool is_valid() const; 37 | int fd() const; 38 | 39 | static int wait_for_one(const std::vector& events, int timeout_sec = -1); 40 | }; 41 | 42 | class SocketEventHandle { 43 | private: 44 | std::string shm_path; 45 | EventState* state; 46 | public: 47 | SocketEventHandle(std::string endpoint, std::string identifier = "", bool override = true); 48 | ~SocketEventHandle(); 49 | 50 | bool is_enabled(); 51 | void set_enabled(bool enabled); 52 | Event recv_called(); 53 | Event recv_ready(); 54 | 55 | static void toggle_fake_events(bool enabled); 56 | static void set_fake_prefix(std::string prefix); 57 | static std::string fake_prefix(); 58 | }; 59 | -------------------------------------------------------------------------------- /msgq/impl_fake.cc: -------------------------------------------------------------------------------- 1 | #include "msgq/impl_fake.h" 2 | 3 | void FakePoller::registerSocket(SubSocket *socket) { 4 | this->sockets.push_back(socket); 5 | } 6 | 7 | std::vector FakePoller::poll(int timeout) { 8 | return this->sockets; 9 | } 10 | -------------------------------------------------------------------------------- /msgq/impl_fake.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "msgq/ipc.h" 15 | #include "msgq/event.h" 16 | 17 | template 18 | class FakeSubSocket: public TSubSocket { 19 | private: 20 | Event *recv_called = nullptr; 21 | Event *recv_ready = nullptr; 22 | EventState *state = nullptr; 23 | 24 | public: 25 | FakeSubSocket(): TSubSocket() {} 26 | ~FakeSubSocket() { 27 | delete recv_called; 28 | delete recv_ready; 29 | if (state != nullptr) { 30 | munmap(state, sizeof(EventState)); 31 | } 32 | } 33 | 34 | int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true) override { 35 | const char* cereal_prefix = std::getenv("CEREAL_FAKE_PREFIX"); 36 | 37 | char* mem; 38 | std::string identifier = cereal_prefix != nullptr ? std::string(cereal_prefix) : ""; 39 | event_state_shm_mmap(endpoint, identifier, &mem, nullptr); 40 | 41 | this->state = (EventState*)mem; 42 | this->recv_called = new Event(state->fds[EventPurpose::RECV_CALLED]); 43 | this->recv_ready = new Event(state->fds[EventPurpose::RECV_READY]); 44 | 45 | return TSubSocket::connect(context, endpoint, address, conflate, check_endpoint); 46 | } 47 | 48 | Message *receive(bool non_blocking=false) override { 49 | if (this->state->enabled) { 50 | this->recv_called->set(); 51 | this->recv_ready->wait(); 52 | this->recv_ready->clear(); 53 | } 54 | 55 | return TSubSocket::receive(non_blocking); 56 | } 57 | }; 58 | 59 | class FakePoller: public Poller { 60 | private: 61 | std::vector sockets; 62 | 63 | public: 64 | void registerSocket(SubSocket *socket) override; 65 | std::vector poll(int timeout) override; 66 | ~FakePoller() {} 67 | }; 68 | -------------------------------------------------------------------------------- /msgq/impl_msgq.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "msgq/impl_msgq.h" 8 | 9 | MSGQContext::MSGQContext() { 10 | } 11 | 12 | MSGQContext::~MSGQContext() { 13 | } 14 | 15 | void MSGQMessage::init(size_t sz) { 16 | size = sz; 17 | data = new char[size]; 18 | } 19 | 20 | void MSGQMessage::init(char * d, size_t sz) { 21 | size = sz; 22 | data = new char[size]; 23 | memcpy(data, d, size); 24 | } 25 | 26 | void MSGQMessage::takeOwnership(char * d, size_t sz) { 27 | size = sz; 28 | data = d; 29 | } 30 | 31 | void MSGQMessage::close() { 32 | if (size > 0){ 33 | delete[] data; 34 | } 35 | size = 0; 36 | } 37 | 38 | MSGQMessage::~MSGQMessage() { 39 | this->close(); 40 | } 41 | 42 | int MSGQSubSocket::connect(Context *context, std::string endpoint, std::string address, bool conflate, bool check_endpoint){ 43 | assert(context); 44 | assert(address == "127.0.0.1"); 45 | 46 | q = new msgq_queue_t; 47 | int r = msgq_new_queue(q, endpoint.c_str(), DEFAULT_SEGMENT_SIZE); 48 | if (r != 0){ 49 | return r; 50 | } 51 | 52 | msgq_init_subscriber(q); 53 | 54 | if (conflate){ 55 | q->read_conflate = true; 56 | } 57 | 58 | timeout = -1; 59 | 60 | return 0; 61 | } 62 | 63 | 64 | Message * MSGQSubSocket::receive(bool non_blocking){ 65 | msgq_msg_t msg; 66 | 67 | MSGQMessage *r = NULL; 68 | 69 | int rc = msgq_msg_recv(&msg, q); 70 | 71 | // Hack to implement blocking read with a poller. Don't use this 72 | while (!non_blocking && rc == 0){ 73 | msgq_pollitem_t items[1]; 74 | items[0].q = q; 75 | 76 | int t = (timeout != -1) ? timeout : 100; 77 | 78 | int n = msgq_poll(items, 1, t); 79 | rc = msgq_msg_recv(&msg, q); 80 | 81 | // The poll indicated a message was ready, but the receive failed. Try again 82 | if (n == 1 && rc == 0){ 83 | continue; 84 | } 85 | 86 | if (timeout != -1){ 87 | break; 88 | } 89 | } 90 | 91 | if (rc > 0){ 92 | r = new MSGQMessage; 93 | r->takeOwnership(msg.data, msg.size); 94 | } 95 | 96 | return (Message*)r; 97 | } 98 | 99 | void MSGQSubSocket::setTimeout(int t){ 100 | timeout = t; 101 | } 102 | 103 | MSGQSubSocket::~MSGQSubSocket(){ 104 | if (q != NULL){ 105 | msgq_close_queue(q); 106 | delete q; 107 | } 108 | } 109 | 110 | int MSGQPubSocket::connect(Context *context, std::string endpoint, bool check_endpoint){ 111 | assert(context); 112 | 113 | // TODO 114 | //if (check_endpoint && !service_exists(std::string(endpoint))){ 115 | // std::cout << "Warning, " << std::string(endpoint) << " is not in service list." << std::endl; 116 | //} 117 | 118 | q = new msgq_queue_t; 119 | int r = msgq_new_queue(q, endpoint.c_str(), DEFAULT_SEGMENT_SIZE); 120 | if (r != 0){ 121 | return r; 122 | } 123 | 124 | msgq_init_publisher(q); 125 | 126 | return 0; 127 | } 128 | 129 | int MSGQPubSocket::sendMessage(Message *message){ 130 | msgq_msg_t msg; 131 | msg.data = message->getData(); 132 | msg.size = message->getSize(); 133 | 134 | return msgq_msg_send(&msg, q); 135 | } 136 | 137 | int MSGQPubSocket::send(char *data, size_t size){ 138 | msgq_msg_t msg; 139 | msg.data = data; 140 | msg.size = size; 141 | 142 | return msgq_msg_send(&msg, q); 143 | } 144 | 145 | bool MSGQPubSocket::all_readers_updated() { 146 | return msgq_all_readers_updated(q); 147 | } 148 | 149 | MSGQPubSocket::~MSGQPubSocket(){ 150 | if (q != NULL){ 151 | msgq_close_queue(q); 152 | delete q; 153 | } 154 | } 155 | 156 | 157 | void MSGQPoller::registerSocket(SubSocket * socket){ 158 | assert(num_polls + 1 < MAX_POLLERS); 159 | polls[num_polls].q = (msgq_queue_t*)socket->getRawSocket(); 160 | 161 | sockets.push_back(socket); 162 | num_polls++; 163 | } 164 | 165 | std::vector MSGQPoller::poll(int timeout){ 166 | std::vector r; 167 | 168 | msgq_poll(polls, num_polls, timeout); 169 | for (size_t i = 0; i < num_polls; i++){ 170 | if (polls[i].revents){ 171 | r.push_back(sockets[i]); 172 | } 173 | } 174 | 175 | return r; 176 | } 177 | -------------------------------------------------------------------------------- /msgq/impl_msgq.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "msgq/ipc.h" 7 | #include "msgq/msgq.h" 8 | 9 | #define MAX_POLLERS 128 10 | 11 | class MSGQContext : public Context { 12 | private: 13 | void * context = NULL; 14 | public: 15 | MSGQContext(); 16 | void * getRawContext() {return context;} 17 | ~MSGQContext(); 18 | }; 19 | 20 | class MSGQMessage : public Message { 21 | private: 22 | char * data; 23 | size_t size; 24 | public: 25 | void init(size_t size); 26 | void init(char *data, size_t size); 27 | void takeOwnership(char *data, size_t size); 28 | size_t getSize(){return size;} 29 | char * getData(){return data;} 30 | void close(); 31 | ~MSGQMessage(); 32 | }; 33 | 34 | class MSGQSubSocket : public SubSocket { 35 | private: 36 | msgq_queue_t * q = NULL; 37 | int timeout; 38 | public: 39 | int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true); 40 | void setTimeout(int timeout); 41 | void * getRawSocket() {return (void*)q;} 42 | Message *receive(bool non_blocking=false); 43 | ~MSGQSubSocket(); 44 | }; 45 | 46 | class MSGQPubSocket : public PubSocket { 47 | private: 48 | msgq_queue_t * q = NULL; 49 | public: 50 | int connect(Context *context, std::string endpoint, bool check_endpoint=true); 51 | int sendMessage(Message *message); 52 | int send(char *data, size_t size); 53 | bool all_readers_updated(); 54 | ~MSGQPubSocket(); 55 | }; 56 | 57 | class MSGQPoller : public Poller { 58 | private: 59 | std::vector sockets; 60 | msgq_pollitem_t polls[MAX_POLLERS]; 61 | size_t num_polls = 0; 62 | 63 | public: 64 | void registerSocket(SubSocket *socket); 65 | std::vector poll(int timeout); 66 | ~MSGQPoller(){} 67 | }; 68 | -------------------------------------------------------------------------------- /msgq/impl_zmq.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "msgq/impl_zmq.h" 9 | 10 | static size_t fnv1a_hash(const std::string &str) { 11 | const size_t fnv_prime = 0x100000001b3; 12 | size_t hash_value = 0xcbf29ce484222325; 13 | for (char c : str) { 14 | hash_value ^= (unsigned char)c; 15 | hash_value *= fnv_prime; 16 | } 17 | return hash_value; 18 | } 19 | 20 | //FIXME: This is a hack to get the port number from the socket name, might have collisions 21 | static int get_port(std::string endpoint) { 22 | size_t hash_value = fnv1a_hash(endpoint); 23 | int start_port = 8023; 24 | int max_port = 65535; 25 | int port = start_port + (hash_value % (max_port - start_port)); 26 | return port; 27 | } 28 | 29 | ZMQContext::ZMQContext() { 30 | context = zmq_ctx_new(); 31 | } 32 | 33 | ZMQContext::~ZMQContext() { 34 | zmq_ctx_term(context); 35 | } 36 | 37 | void ZMQMessage::init(size_t sz) { 38 | size = sz; 39 | data = new char[size]; 40 | } 41 | 42 | void ZMQMessage::init(char * d, size_t sz) { 43 | size = sz; 44 | data = new char[size]; 45 | memcpy(data, d, size); 46 | } 47 | 48 | void ZMQMessage::close() { 49 | if (size > 0){ 50 | delete[] data; 51 | } 52 | size = 0; 53 | } 54 | 55 | ZMQMessage::~ZMQMessage() { 56 | this->close(); 57 | } 58 | 59 | 60 | int ZMQSubSocket::connect(Context *context, std::string endpoint, std::string address, bool conflate, bool check_endpoint){ 61 | sock = zmq_socket(context->getRawContext(), ZMQ_SUB); 62 | if (sock == NULL){ 63 | return -1; 64 | } 65 | 66 | zmq_setsockopt(sock, ZMQ_SUBSCRIBE, "", 0); 67 | 68 | if (conflate){ 69 | int arg = 1; 70 | zmq_setsockopt(sock, ZMQ_CONFLATE, &arg, sizeof(int)); 71 | } 72 | 73 | int reconnect_ivl = 500; 74 | zmq_setsockopt(sock, ZMQ_RECONNECT_IVL_MAX, &reconnect_ivl, sizeof(reconnect_ivl)); 75 | 76 | 77 | full_endpoint = "tcp://" + address + ":"; 78 | if (check_endpoint){ 79 | full_endpoint += std::to_string(get_port(endpoint)); 80 | } else { 81 | full_endpoint += endpoint; 82 | } 83 | 84 | return zmq_connect(sock, full_endpoint.c_str()); 85 | } 86 | 87 | 88 | Message * ZMQSubSocket::receive(bool non_blocking){ 89 | zmq_msg_t msg; 90 | assert(zmq_msg_init(&msg) == 0); 91 | 92 | int flags = non_blocking ? ZMQ_DONTWAIT : 0; 93 | int rc = zmq_msg_recv(&msg, sock, flags); 94 | Message *r = NULL; 95 | 96 | if (rc >= 0){ 97 | // Make a copy to ensure the data is aligned 98 | r = new ZMQMessage; 99 | r->init((char*)zmq_msg_data(&msg), zmq_msg_size(&msg)); 100 | } 101 | 102 | zmq_msg_close(&msg); 103 | return r; 104 | } 105 | 106 | void ZMQSubSocket::setTimeout(int timeout){ 107 | zmq_setsockopt(sock, ZMQ_RCVTIMEO, &timeout, sizeof(int)); 108 | } 109 | 110 | ZMQSubSocket::~ZMQSubSocket(){ 111 | zmq_close(sock); 112 | } 113 | 114 | int ZMQPubSocket::connect(Context *context, std::string endpoint, bool check_endpoint){ 115 | sock = zmq_socket(context->getRawContext(), ZMQ_PUB); 116 | if (sock == NULL){ 117 | return -1; 118 | } 119 | 120 | full_endpoint = "tcp://*:"; 121 | if (check_endpoint){ 122 | full_endpoint += std::to_string(get_port(endpoint)); 123 | } else { 124 | full_endpoint += endpoint; 125 | } 126 | 127 | // ZMQ pub sockets cannot be shared between processes, so we need to ensure pid stays the same 128 | pid = getpid(); 129 | 130 | return zmq_bind(sock, full_endpoint.c_str()); 131 | } 132 | 133 | int ZMQPubSocket::sendMessage(Message *message) { 134 | assert(pid == getpid()); 135 | return zmq_send(sock, message->getData(), message->getSize(), ZMQ_DONTWAIT); 136 | } 137 | 138 | int ZMQPubSocket::send(char *data, size_t size) { 139 | assert(pid == getpid()); 140 | return zmq_send(sock, data, size, ZMQ_DONTWAIT); 141 | } 142 | 143 | bool ZMQPubSocket::all_readers_updated() { 144 | assert(false); // TODO not implemented 145 | return false; 146 | } 147 | 148 | ZMQPubSocket::~ZMQPubSocket(){ 149 | zmq_close(sock); 150 | } 151 | 152 | 153 | void ZMQPoller::registerSocket(SubSocket * socket){ 154 | assert(num_polls + 1 < MAX_POLLERS); 155 | polls[num_polls].socket = socket->getRawSocket(); 156 | polls[num_polls].events = ZMQ_POLLIN; 157 | 158 | sockets.push_back(socket); 159 | num_polls++; 160 | } 161 | 162 | std::vector ZMQPoller::poll(int timeout){ 163 | std::vector r; 164 | 165 | int rc = zmq_poll(polls, num_polls, timeout); 166 | if (rc < 0){ 167 | return r; 168 | } 169 | 170 | for (size_t i = 0; i < num_polls; i++){ 171 | if (polls[i].revents){ 172 | r.push_back(sockets[i]); 173 | } 174 | } 175 | 176 | return r; 177 | } 178 | -------------------------------------------------------------------------------- /msgq/impl_zmq.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include "msgq/ipc.h" 8 | 9 | #define MAX_POLLERS 128 10 | 11 | class ZMQContext : public Context { 12 | private: 13 | void * context = NULL; 14 | public: 15 | ZMQContext(); 16 | void * getRawContext() {return context;} 17 | ~ZMQContext(); 18 | }; 19 | 20 | class ZMQMessage : public Message { 21 | private: 22 | char * data; 23 | size_t size; 24 | public: 25 | void init(size_t size); 26 | void init(char *data, size_t size); 27 | size_t getSize(){return size;} 28 | char * getData(){return data;} 29 | void close(); 30 | ~ZMQMessage(); 31 | }; 32 | 33 | class ZMQSubSocket : public SubSocket { 34 | private: 35 | void * sock; 36 | std::string full_endpoint; 37 | public: 38 | int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true); 39 | void setTimeout(int timeout); 40 | void * getRawSocket() {return sock;} 41 | Message *receive(bool non_blocking=false); 42 | ~ZMQSubSocket(); 43 | }; 44 | 45 | class ZMQPubSocket : public PubSocket { 46 | private: 47 | void * sock; 48 | std::string full_endpoint; 49 | int pid = -1; 50 | public: 51 | int connect(Context *context, std::string endpoint, bool check_endpoint=true); 52 | int sendMessage(Message *message); 53 | int send(char *data, size_t size); 54 | bool all_readers_updated(); 55 | ~ZMQPubSocket(); 56 | }; 57 | 58 | class ZMQPoller : public Poller { 59 | private: 60 | std::vector sockets; 61 | zmq_pollitem_t polls[MAX_POLLERS]; 62 | size_t num_polls = 0; 63 | 64 | public: 65 | void registerSocket(SubSocket *socket); 66 | std::vector poll(int timeout); 67 | ~ZMQPoller(){} 68 | }; 69 | -------------------------------------------------------------------------------- /msgq/ipc.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "msgq/ipc.h" 5 | #include "msgq/impl_zmq.h" 6 | #include "msgq/impl_msgq.h" 7 | #include "msgq/impl_fake.h" 8 | 9 | #ifdef __APPLE__ 10 | const bool MUST_USE_ZMQ = true; 11 | #else 12 | const bool MUST_USE_ZMQ = false; 13 | #endif 14 | 15 | bool messaging_use_zmq(){ 16 | if (std::getenv("ZMQ") || MUST_USE_ZMQ) { 17 | if (std::getenv("OPENPILOT_PREFIX")) { 18 | std::cerr << "OPENPILOT_PREFIX not supported with ZMQ backend\n"; 19 | assert(false); 20 | } 21 | return true; 22 | } 23 | return false; 24 | } 25 | 26 | bool messaging_use_fake(){ 27 | char* fake_enabled = std::getenv("CEREAL_FAKE"); 28 | return fake_enabled != NULL; 29 | } 30 | 31 | Context * Context::create(){ 32 | Context * c; 33 | if (messaging_use_zmq()){ 34 | c = new ZMQContext(); 35 | } else { 36 | c = new MSGQContext(); 37 | } 38 | return c; 39 | } 40 | 41 | SubSocket * SubSocket::create(){ 42 | SubSocket * s; 43 | if (messaging_use_fake()) { 44 | if (messaging_use_zmq()) { 45 | s = new FakeSubSocket(); 46 | } else { 47 | s = new FakeSubSocket(); 48 | } 49 | } else { 50 | if (messaging_use_zmq()){ 51 | s = new ZMQSubSocket(); 52 | } else { 53 | s = new MSGQSubSocket(); 54 | } 55 | } 56 | 57 | return s; 58 | } 59 | 60 | SubSocket * SubSocket::create(Context * context, std::string endpoint, std::string address, bool conflate, bool check_endpoint){ 61 | SubSocket *s = SubSocket::create(); 62 | int r = s->connect(context, endpoint, address, conflate, check_endpoint); 63 | 64 | if (r == 0) { 65 | return s; 66 | } else { 67 | std::cerr << "Error, failed to connect SubSocket to " << endpoint << ": " << strerror(errno) << std::endl; 68 | 69 | delete s; 70 | return nullptr; 71 | } 72 | } 73 | 74 | PubSocket * PubSocket::create(){ 75 | PubSocket * s; 76 | if (messaging_use_zmq()){ 77 | s = new ZMQPubSocket(); 78 | } else { 79 | s = new MSGQPubSocket(); 80 | } 81 | 82 | return s; 83 | } 84 | 85 | PubSocket * PubSocket::create(Context * context, std::string endpoint, bool check_endpoint){ 86 | PubSocket *s = PubSocket::create(); 87 | int r = s->connect(context, endpoint, check_endpoint); 88 | 89 | if (r == 0) { 90 | return s; 91 | } else { 92 | std::cerr << "Error, failed to bind PubSocket to " << endpoint << ": " << strerror(errno) << std::endl; 93 | 94 | delete s; 95 | return nullptr; 96 | } 97 | } 98 | 99 | Poller * Poller::create(){ 100 | Poller * p; 101 | if (messaging_use_fake()) { 102 | p = new FakePoller(); 103 | } else { 104 | if (messaging_use_zmq()){ 105 | p = new ZMQPoller(); 106 | } else { 107 | p = new MSGQPoller(); 108 | } 109 | } 110 | return p; 111 | } 112 | 113 | Poller * Poller::create(std::vector sockets){ 114 | Poller * p = Poller::create(); 115 | 116 | for (auto s : sockets){ 117 | p->registerSocket(s); 118 | } 119 | return p; 120 | } 121 | -------------------------------------------------------------------------------- /msgq/ipc.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | 11 | 12 | #ifdef __APPLE__ 13 | #define CLOCK_BOOTTIME CLOCK_MONOTONIC 14 | #endif 15 | 16 | #define MSG_MULTIPLE_PUBLISHERS 100 17 | 18 | bool messaging_use_zmq(); 19 | 20 | class Context { 21 | public: 22 | virtual void * getRawContext() = 0; 23 | static Context * create(); 24 | virtual ~Context(){} 25 | }; 26 | 27 | class Message { 28 | public: 29 | virtual void init(size_t size) = 0; 30 | virtual void init(char * data, size_t size) = 0; 31 | virtual void close() = 0; 32 | virtual size_t getSize() = 0; 33 | virtual char * getData() = 0; 34 | virtual ~Message(){} 35 | }; 36 | 37 | 38 | class SubSocket { 39 | public: 40 | virtual int connect(Context *context, std::string endpoint, std::string address, bool conflate=false, bool check_endpoint=true) = 0; 41 | virtual void setTimeout(int timeout) = 0; 42 | virtual Message *receive(bool non_blocking=false) = 0; 43 | virtual void * getRawSocket() = 0; 44 | static SubSocket * create(); 45 | static SubSocket * create(Context * context, std::string endpoint, std::string address="127.0.0.1", bool conflate=false, bool check_endpoint=true); 46 | virtual ~SubSocket(){} 47 | }; 48 | 49 | class PubSocket { 50 | public: 51 | virtual int connect(Context *context, std::string endpoint, bool check_endpoint=true) = 0; 52 | virtual int sendMessage(Message *message) = 0; 53 | virtual int send(char *data, size_t size) = 0; 54 | virtual bool all_readers_updated() = 0; 55 | static PubSocket * create(); 56 | static PubSocket * create(Context * context, std::string endpoint, bool check_endpoint=true); 57 | static PubSocket * create(Context * context, std::string endpoint, int port, bool check_endpoint=true); 58 | virtual ~PubSocket(){} 59 | }; 60 | 61 | class Poller { 62 | public: 63 | virtual void registerSocket(SubSocket *socket) = 0; 64 | virtual std::vector poll(int timeout) = 0; 65 | static Poller * create(); 66 | static Poller * create(std::vector sockets); 67 | virtual ~Poller(){} 68 | }; -------------------------------------------------------------------------------- /msgq/ipc.pxd: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | #cython: language_level=3 3 | 4 | from libcpp.string cimport string 5 | from libcpp.vector cimport vector 6 | from libcpp cimport bool 7 | 8 | 9 | cdef extern from "msgq/impl_fake.h": 10 | cdef cppclass Event: 11 | @staticmethod 12 | int wait_for_one(vector[Event], int) except + 13 | 14 | Event() 15 | Event(int) 16 | void set() 17 | int clear() 18 | void wait(int) except + 19 | bool peek() 20 | int fd() 21 | 22 | cdef cppclass SocketEventHandle: 23 | @staticmethod 24 | void toggle_fake_events(bool) 25 | @staticmethod 26 | void set_fake_prefix(string) 27 | @staticmethod 28 | string fake_prefix() 29 | 30 | SocketEventHandle(string, string, bool) 31 | bool is_enabled() 32 | void set_enabled(bool) 33 | Event recv_called() 34 | Event recv_ready() 35 | 36 | 37 | cdef extern from "msgq/ipc.h": 38 | cdef cppclass Context: 39 | @staticmethod 40 | Context * create() 41 | 42 | cdef cppclass Message: 43 | void init(size_t) 44 | void init(char *, size_t) 45 | void close() 46 | size_t getSize() 47 | char *getData() 48 | 49 | cdef cppclass SubSocket: 50 | @staticmethod 51 | SubSocket * create() nogil 52 | int connect(Context *, string, string, bool) nogil 53 | Message * receive(bool) nogil 54 | void setTimeout(int) nogil 55 | 56 | cdef cppclass PubSocket: 57 | @staticmethod 58 | PubSocket * create() 59 | int connect(Context *, string) 60 | int sendMessage(Message *) 61 | int send(char *, size_t) 62 | bool all_readers_updated() 63 | 64 | cdef cppclass Poller: 65 | @staticmethod 66 | Poller * create() 67 | void registerSocket(SubSocket *) 68 | vector[SubSocket*] poll(int) nogil 69 | -------------------------------------------------------------------------------- /msgq/ipc_pyx.pyx: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | # cython: c_string_encoding=ascii, language_level=3 3 | 4 | import sys 5 | from libcpp.string cimport string 6 | from libcpp.vector cimport vector 7 | from libcpp cimport bool 8 | from libc cimport errno 9 | from libc.string cimport strerror 10 | from cython.operator import dereference 11 | 12 | 13 | from .ipc cimport Context as cppContext 14 | from .ipc cimport SubSocket as cppSubSocket 15 | from .ipc cimport PubSocket as cppPubSocket 16 | from .ipc cimport Poller as cppPoller 17 | from .ipc cimport Message as cppMessage 18 | from .ipc cimport Event as cppEvent, SocketEventHandle as cppSocketEventHandle 19 | 20 | 21 | class IpcError(Exception): 22 | def __init__(self, endpoint=None): 23 | suffix = f"with {endpoint.decode('utf-8')}" if endpoint else "" 24 | message = f"Messaging failure {suffix}: {strerror(errno.errno).decode('utf-8')}" 25 | super().__init__(message) 26 | 27 | 28 | class MultiplePublishersError(IpcError): 29 | pass 30 | 31 | 32 | def toggle_fake_events(bool enabled): 33 | cppSocketEventHandle.toggle_fake_events(enabled) 34 | 35 | 36 | def set_fake_prefix(string prefix): 37 | cppSocketEventHandle.set_fake_prefix(prefix) 38 | 39 | 40 | def get_fake_prefix(): 41 | return cppSocketEventHandle.fake_prefix() 42 | 43 | 44 | def delete_fake_prefix(): 45 | cppSocketEventHandle.set_fake_prefix(b"") 46 | 47 | 48 | def wait_for_one_event(list events, int timeout=-1): 49 | cdef vector[cppEvent] items 50 | for event in events: 51 | items.push_back(dereference(event.ptr)) 52 | return cppEvent.wait_for_one(items, timeout) 53 | 54 | 55 | cdef class Event: 56 | cdef cppEvent event; 57 | 58 | def __cinit__(self): 59 | pass 60 | 61 | cdef setEvent(self, cppEvent event): 62 | self.event = event 63 | 64 | def set(self): 65 | self.event.set() 66 | 67 | def clear(self): 68 | return self.event.clear() 69 | 70 | def wait(self, int timeout=-1): 71 | self.event.wait(timeout) 72 | 73 | def peek(self): 74 | return self.event.peek() 75 | 76 | @property 77 | def fd(self): 78 | return self.event.fd() 79 | 80 | @property 81 | def ptr(self): 82 | return &self.event 83 | 84 | 85 | cdef class SocketEventHandle: 86 | cdef cppSocketEventHandle * handle; 87 | 88 | def __cinit__(self, string endpoint, string identifier, bool override): 89 | self.handle = new cppSocketEventHandle(endpoint, identifier, override) 90 | 91 | def __dealloc__(self): 92 | del self.handle 93 | 94 | @property 95 | def enabled(self): 96 | return self.handle.is_enabled() 97 | 98 | @enabled.setter 99 | def enabled(self, bool value): 100 | self.handle.set_enabled(value) 101 | 102 | @property 103 | def recv_called_event(self): 104 | e = Event() 105 | e.setEvent(self.handle.recv_called()) 106 | 107 | return e 108 | 109 | @property 110 | def recv_ready_event(self): 111 | e = Event() 112 | e.setEvent(self.handle.recv_ready()) 113 | 114 | return e 115 | 116 | 117 | cdef class Context: 118 | cdef cppContext * context 119 | 120 | def __cinit__(self): 121 | self.context = cppContext.create() 122 | 123 | def term(self): 124 | del self.context 125 | self.context = NULL 126 | 127 | def __dealloc__(self): 128 | pass 129 | # Deleting the context will hang if sockets are still active 130 | # TODO: Figure out a way to make sure the context is closed last 131 | # del self.context 132 | 133 | 134 | cdef class Poller: 135 | cdef cppPoller * poller 136 | cdef list sub_sockets 137 | 138 | def __cinit__(self): 139 | self.sub_sockets = [] 140 | self.poller = cppPoller.create() 141 | 142 | def __dealloc__(self): 143 | del self.poller 144 | 145 | def registerSocket(self, SubSocket socket): 146 | self.sub_sockets.append(socket) 147 | self.poller.registerSocket(socket.socket) 148 | 149 | def poll(self, timeout): 150 | sockets = [] 151 | cdef int t = timeout 152 | 153 | with nogil: 154 | result = self.poller.poll(t) 155 | 156 | for s in result: 157 | socket = SubSocket() 158 | socket.setPtr(s) 159 | sockets.append(socket) 160 | 161 | return sockets 162 | 163 | 164 | cdef class SubSocket: 165 | cdef cppSubSocket * socket 166 | cdef bool is_owner 167 | 168 | def __cinit__(self): 169 | with nogil: 170 | self.socket = cppSubSocket.create() 171 | 172 | self.is_owner = True 173 | if self.socket == NULL: 174 | raise IpcError 175 | 176 | def __dealloc__(self): 177 | if self.is_owner: 178 | with nogil: 179 | del self.socket 180 | 181 | cdef setPtr(self, cppSubSocket * ptr): 182 | if self.is_owner: 183 | with nogil: 184 | del self.socket 185 | 186 | self.is_owner = False 187 | self.socket = ptr 188 | 189 | def connect(self, Context context, string endpoint, string address=b"127.0.0.1", bool conflate=False): 190 | cdef int r 191 | with nogil: 192 | r = self.socket.connect(context.context, endpoint, address, conflate) 193 | 194 | if r != 0: 195 | if errno.errno == errno.EADDRINUSE: 196 | raise MultiplePublishersError(endpoint) 197 | else: 198 | raise IpcError(endpoint) 199 | 200 | def setTimeout(self, int timeout): 201 | with nogil: 202 | self.socket.setTimeout(timeout) 203 | 204 | def receive(self, bool non_blocking=False): 205 | cdef cppMessage *msg 206 | with nogil: 207 | msg = self.socket.receive(non_blocking) 208 | 209 | if msg == NULL: 210 | return None 211 | else: 212 | sz = msg.getSize() 213 | m = msg.getData()[:sz] 214 | with nogil: 215 | del msg 216 | 217 | return m 218 | 219 | 220 | cdef class PubSocket: 221 | cdef cppPubSocket * socket 222 | 223 | def __cinit__(self): 224 | self.socket = cppPubSocket.create() 225 | if self.socket == NULL: 226 | raise IpcError 227 | 228 | def __dealloc__(self): 229 | del self.socket 230 | 231 | def connect(self, Context context, string endpoint): 232 | r = self.socket.connect(context.context, endpoint) 233 | 234 | if r != 0: 235 | if errno.errno == errno.EADDRINUSE: 236 | raise MultiplePublishersError(endpoint) 237 | else: 238 | raise IpcError(endpoint) 239 | 240 | def send(self, bytes data): 241 | length = len(data) 242 | r = self.socket.send(data, length) 243 | 244 | if r != length: 245 | if errno.errno == errno.EADDRINUSE: 246 | raise MultiplePublishersError 247 | else: 248 | raise IpcError 249 | 250 | def all_readers_updated(self): 251 | return self.socket.all_readers_updated() 252 | -------------------------------------------------------------------------------- /msgq/logger/logger.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifdef SWAGLOG 4 | // cppcheck-suppress preprocessorErrorDirective 5 | #include SWAGLOG 6 | #else 7 | 8 | #define CLOUDLOG_DEBUG 10 9 | #define CLOUDLOG_INFO 20 10 | #define CLOUDLOG_WARNING 30 11 | #define CLOUDLOG_ERROR 40 12 | #define CLOUDLOG_CRITICAL 50 13 | 14 | #define cloudlog(lvl, fmt, ...) printf(fmt "\n", ## __VA_ARGS__) 15 | 16 | #define LOGD(fmt, ...) cloudlog(CLOUDLOG_DEBUG, fmt, ## __VA_ARGS__) 17 | #define LOG(fmt, ...) cloudlog(CLOUDLOG_INFO, fmt, ## __VA_ARGS__) 18 | #define LOGW(fmt, ...) cloudlog(CLOUDLOG_WARNING, fmt, ## __VA_ARGS__) 19 | #define LOGE(fmt, ...) cloudlog(CLOUDLOG_ERROR, fmt, ## __VA_ARGS__) 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /msgq/msgq.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | 24 | #include 25 | 26 | #include "msgq/msgq.h" 27 | 28 | void sigusr2_handler(int signal) { 29 | assert(signal == SIGUSR2); 30 | } 31 | 32 | uint64_t msgq_get_uid(void){ 33 | std::random_device rd("/dev/urandom"); 34 | std::uniform_int_distribution distribution(0, std::numeric_limits::max()); 35 | 36 | #ifdef __APPLE__ 37 | // TODO: this doesn't work 38 | uint64_t uid = distribution(rd) << 32 | getpid(); 39 | #else 40 | uint64_t uid = distribution(rd) << 32 | syscall(SYS_gettid); 41 | #endif 42 | 43 | return uid; 44 | } 45 | 46 | int msgq_msg_init_size(msgq_msg_t * msg, size_t size){ 47 | msg->size = size; 48 | msg->data = new(std::nothrow) char[size]; 49 | 50 | return (msg->data == NULL) ? -1 : 0; 51 | } 52 | 53 | 54 | int msgq_msg_init_data(msgq_msg_t * msg, char * data, size_t size) { 55 | int r = msgq_msg_init_size(msg, size); 56 | 57 | if (r == 0) 58 | memcpy(msg->data, data, size); 59 | 60 | return r; 61 | } 62 | 63 | int msgq_msg_close(msgq_msg_t * msg){ 64 | if (msg->size > 0) 65 | delete[] msg->data; 66 | 67 | msg->size = 0; 68 | 69 | return 0; 70 | } 71 | 72 | void msgq_reset_reader(msgq_queue_t * q){ 73 | int id = q->reader_id; 74 | q->read_valids[id]->store(true); 75 | q->read_pointers[id]->store(*q->write_pointer); 76 | } 77 | 78 | void msgq_wait_for_subscriber(msgq_queue_t *q){ 79 | while (*q->num_readers == 0){ 80 | // wait for subscriber 81 | } 82 | 83 | return; 84 | } 85 | 86 | int msgq_new_queue(msgq_queue_t * q, const char * path, size_t size){ 87 | assert(size < 0xFFFFFFFF); // Buffer must be smaller than 2^32 bytes 88 | std::signal(SIGUSR2, sigusr2_handler); 89 | 90 | std::string full_path = "/dev/shm/"; 91 | const char* prefix = std::getenv("OPENPILOT_PREFIX"); 92 | if (prefix) { 93 | full_path += std::string(prefix) + "/"; 94 | } 95 | full_path += path; 96 | 97 | auto fd = open(full_path.c_str(), O_RDWR | O_CREAT, 0664); 98 | if (fd < 0) { 99 | std::cout << "Warning, could not open: " << full_path << std::endl; 100 | return -1; 101 | } 102 | 103 | int rc = ftruncate(fd, size + sizeof(msgq_header_t)); 104 | if (rc < 0){ 105 | close(fd); 106 | return -1; 107 | } 108 | char * mem = (char*)mmap(NULL, size + sizeof(msgq_header_t), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 109 | close(fd); 110 | 111 | if (mem == MAP_FAILED){ 112 | return -1; 113 | } 114 | q->mmap_p = mem; 115 | 116 | msgq_header_t *header = (msgq_header_t *)mem; 117 | 118 | // Setup pointers to header segment 119 | q->num_readers = reinterpret_cast*>(&header->num_readers); 120 | q->write_pointer = reinterpret_cast*>(&header->write_pointer); 121 | q->write_uid = reinterpret_cast*>(&header->write_uid); 122 | 123 | for (size_t i = 0; i < NUM_READERS; i++){ 124 | q->read_pointers[i] = reinterpret_cast*>(&header->read_pointers[i]); 125 | q->read_valids[i] = reinterpret_cast*>(&header->read_valids[i]); 126 | q->read_uids[i] = reinterpret_cast*>(&header->read_uids[i]); 127 | } 128 | 129 | q->data = mem + sizeof(msgq_header_t); 130 | q->size = size; 131 | q->reader_id = -1; 132 | 133 | q->endpoint = path; 134 | q->read_conflate = false; 135 | 136 | return 0; 137 | } 138 | 139 | void msgq_close_queue(msgq_queue_t *q){ 140 | if (q->mmap_p != NULL){ 141 | munmap(q->mmap_p, q->size + sizeof(msgq_header_t)); 142 | } 143 | } 144 | 145 | 146 | void msgq_init_publisher(msgq_queue_t * q) { 147 | //std::cout << "Starting publisher" << std::endl; 148 | uint64_t uid = msgq_get_uid(); 149 | 150 | *q->write_uid = uid; 151 | *q->num_readers = 0; 152 | 153 | for (size_t i = 0; i < NUM_READERS; i++){ 154 | *q->read_valids[i] = false; 155 | *q->read_uids[i] = 0; 156 | } 157 | 158 | q->write_uid_local = uid; 159 | } 160 | 161 | static void thread_signal(uint32_t tid) { 162 | #ifndef SYS_tkill 163 | // TODO: this won't work for multithreaded programs 164 | kill(tid, SIGUSR2); 165 | #else 166 | syscall(SYS_tkill, tid, SIGUSR2); 167 | #endif 168 | } 169 | 170 | void msgq_init_subscriber(msgq_queue_t * q) { 171 | assert(q != NULL); 172 | assert(q->num_readers != NULL); 173 | 174 | uint64_t uid = msgq_get_uid(); 175 | 176 | // Get reader id 177 | while (true){ 178 | uint64_t cur_num_readers = *q->num_readers; 179 | uint64_t new_num_readers = cur_num_readers + 1; 180 | 181 | // No more slots available. Reset all subscribers to kick out inactive ones 182 | if (new_num_readers > NUM_READERS){ 183 | //std::cout << "Warning, evicting all subscribers!" << std::endl; 184 | *q->num_readers = 0; 185 | 186 | for (size_t i = 0; i < NUM_READERS; i++){ 187 | *q->read_valids[i] = false; 188 | 189 | uint64_t old_uid = *q->read_uids[i]; 190 | *q->read_uids[i] = 0; 191 | 192 | // Wake up reader in case they are in a poll 193 | thread_signal(old_uid & 0xFFFFFFFF); 194 | } 195 | 196 | continue; 197 | } 198 | 199 | // Use atomic compare and swap to handle race condition 200 | // where two subscribers start at the same time 201 | if (std::atomic_compare_exchange_strong(q->num_readers, 202 | &cur_num_readers, 203 | new_num_readers)){ 204 | q->reader_id = cur_num_readers; 205 | q->read_uid_local = uid; 206 | 207 | // We start with read_valid = false, 208 | // on the first read the read pointer will be synchronized with the write pointer 209 | *q->read_valids[cur_num_readers] = false; 210 | *q->read_pointers[cur_num_readers] = 0; 211 | *q->read_uids[cur_num_readers] = uid; 212 | break; 213 | } 214 | } 215 | 216 | //std::cout << "New subscriber id: " << q->reader_id << " uid: " << q->read_uid_local << " " << q->endpoint << std::endl; 217 | msgq_reset_reader(q); 218 | } 219 | 220 | int msgq_msg_send(msgq_msg_t * msg, msgq_queue_t *q){ 221 | // Die if we are no longer the active publisher 222 | if (q->write_uid_local != *q->write_uid){ 223 | std::cout << "Killing old publisher: " << q->endpoint << std::endl; 224 | errno = EADDRINUSE; 225 | return -1; 226 | } 227 | 228 | uint64_t total_msg_size = ALIGN(msg->size + sizeof(int64_t)); 229 | 230 | // We need to fit at least three messages in the queue, 231 | // then we can always safely access the last message 232 | assert(3 * total_msg_size <= q->size); 233 | 234 | uint64_t num_readers = *q->num_readers; 235 | 236 | uint32_t write_cycles, write_pointer; 237 | UNPACK64(write_cycles, write_pointer, *q->write_pointer); 238 | 239 | char *p = q->data + write_pointer; // add base offset 240 | 241 | // Check remaining space 242 | // Always leave space for a wraparound tag for the next message, including alignment 243 | int64_t remaining_space = q->size - write_pointer - total_msg_size - sizeof(int64_t); 244 | if (remaining_space <= 0){ 245 | // Write -1 size tag indicating wraparound 246 | *(int64_t*)p = -1; 247 | 248 | // Invalidate all readers that are beyond the write pointer 249 | // TODO: should we handle the case where a new reader shows up while this is running? 250 | for (uint64_t i = 0; i < num_readers; i++){ 251 | uint64_t read_pointer = *q->read_pointers[i]; 252 | uint64_t read_cycles = read_pointer >> 32; 253 | read_pointer &= 0xFFFFFFFF; 254 | 255 | if ((read_pointer > write_pointer) && (read_cycles != write_cycles)) { 256 | *q->read_valids[i] = false; 257 | } 258 | } 259 | 260 | // Update global and local copies of write pointer and write_cycles 261 | write_pointer = 0; 262 | write_cycles = write_cycles + 1; 263 | PACK64(*q->write_pointer, write_cycles, write_pointer); 264 | 265 | // Set actual pointer to the beginning of the data segment 266 | p = q->data; 267 | } 268 | 269 | // Invalidate readers that are in the area that will be written 270 | uint64_t start = write_pointer; 271 | uint64_t end = ALIGN(start + sizeof(int64_t) + msg->size); 272 | 273 | for (uint64_t i = 0; i < num_readers; i++){ 274 | uint32_t read_cycles, read_pointer; 275 | UNPACK64(read_cycles, read_pointer, *q->read_pointers[i]); 276 | 277 | if ((read_pointer >= start) && (read_pointer < end) && (read_cycles != write_cycles)) { 278 | *q->read_valids[i] = false; 279 | } 280 | } 281 | 282 | 283 | // Write size tag 284 | std::atomic *size_p = reinterpret_cast*>(p); 285 | *size_p = msg->size; 286 | 287 | // Copy data 288 | memcpy(p + sizeof(int64_t), msg->data, msg->size); 289 | __sync_synchronize(); 290 | 291 | // Update write pointer 292 | uint32_t new_ptr = ALIGN(write_pointer + msg->size + sizeof(int64_t)); 293 | PACK64(*q->write_pointer, write_cycles, new_ptr); 294 | 295 | // Notify readers 296 | for (uint64_t i = 0; i < num_readers; i++){ 297 | uint64_t reader_uid = *q->read_uids[i]; 298 | thread_signal(reader_uid & 0xFFFFFFFF); 299 | } 300 | 301 | return msg->size; 302 | } 303 | 304 | 305 | int msgq_msg_ready(msgq_queue_t * q){ 306 | start: 307 | int id = q->reader_id; 308 | assert(id >= 0); // Make sure subscriber is initialized 309 | 310 | if (q->read_uid_local != *q->read_uids[id]){ 311 | //std::cout << q->endpoint << ": Reader was evicted, reconnecting" << std::endl; 312 | msgq_init_subscriber(q); 313 | goto start; 314 | } 315 | 316 | // Check valid 317 | if (!*q->read_valids[id]){ 318 | msgq_reset_reader(q); 319 | goto start; 320 | } 321 | 322 | uint32_t read_cycles, read_pointer; 323 | UNPACK64(read_cycles, read_pointer, *q->read_pointers[id]); 324 | UNUSED(read_cycles); 325 | 326 | uint32_t write_cycles, write_pointer; 327 | UNPACK64(write_cycles, write_pointer, *q->write_pointer); 328 | UNUSED(write_cycles); 329 | 330 | // Check if new message is available 331 | return (read_pointer != write_pointer); 332 | } 333 | 334 | int msgq_msg_recv(msgq_msg_t * msg, msgq_queue_t * q){ 335 | start: 336 | int id = q->reader_id; 337 | assert(id >= 0); // Make sure subscriber is initialized 338 | 339 | if (q->read_uid_local != *q->read_uids[id]){ 340 | //std::cout << q->endpoint << ": Reader was evicted, reconnecting" << std::endl; 341 | msgq_init_subscriber(q); 342 | goto start; 343 | } 344 | 345 | // Check valid 346 | if (!*q->read_valids[id]){ 347 | msgq_reset_reader(q); 348 | goto start; 349 | } 350 | 351 | uint32_t read_cycles, read_pointer; 352 | UNPACK64(read_cycles, read_pointer, *q->read_pointers[id]); 353 | 354 | uint32_t write_cycles, write_pointer; 355 | UNPACK64(write_cycles, write_pointer, *q->write_pointer); 356 | UNUSED(write_cycles); 357 | 358 | char * p = q->data + read_pointer; 359 | 360 | // Check if new message is available 361 | if (read_pointer == write_pointer) { 362 | msg->size = 0; 363 | return 0; 364 | } 365 | 366 | // Read potential message size 367 | std::atomic *size_p = reinterpret_cast*>(p); 368 | std::int64_t size = *size_p; 369 | 370 | // Check if the size that was read is valid 371 | if (!*q->read_valids[id]){ 372 | msgq_reset_reader(q); 373 | goto start; 374 | } 375 | 376 | // If size is -1 the buffer was full, and we need to wrap around 377 | if (size == -1){ 378 | read_cycles++; 379 | PACK64(*q->read_pointers[id], read_cycles, 0); 380 | goto start; 381 | } 382 | 383 | // crashing is better than passing garbage data to the consumer 384 | // the size will have weird value if it was overwritten by data accidentally 385 | assert((uint64_t)size < q->size); 386 | assert(size > 0); 387 | 388 | uint32_t new_read_pointer = ALIGN(read_pointer + sizeof(std::int64_t) + size); 389 | 390 | // If conflate is true, check if this is the latest message, else start over 391 | if (q->read_conflate){ 392 | if (new_read_pointer != write_pointer){ 393 | // Update read pointer 394 | PACK64(*q->read_pointers[id], read_cycles, new_read_pointer); 395 | goto start; 396 | } 397 | } 398 | 399 | // Copy message 400 | if (msgq_msg_init_size(msg, size) < 0) 401 | return -1; 402 | 403 | __sync_synchronize(); 404 | memcpy(msg->data, p + sizeof(int64_t), size); 405 | __sync_synchronize(); 406 | 407 | // Update read pointer 408 | PACK64(*q->read_pointers[id], read_cycles, new_read_pointer); 409 | 410 | // Check if the actual data that was copied is valid 411 | if (!*q->read_valids[id]){ 412 | msgq_msg_close(msg); 413 | msgq_reset_reader(q); 414 | goto start; 415 | } 416 | 417 | 418 | return msg->size; 419 | } 420 | 421 | 422 | 423 | int msgq_poll(msgq_pollitem_t * items, size_t nitems, int timeout){ 424 | int num = 0; 425 | 426 | // Check if messages ready 427 | for (size_t i = 0; i < nitems; i++) { 428 | items[i].revents = msgq_msg_ready(items[i].q); 429 | if (items[i].revents) num++; 430 | } 431 | 432 | int ms = (timeout == -1) ? 100 : timeout; 433 | struct timespec ts; 434 | ts.tv_sec = ms / 1000; 435 | ts.tv_nsec = (ms % 1000) * 1000 * 1000; 436 | 437 | 438 | while (num == 0) { 439 | int ret; 440 | 441 | ret = nanosleep(&ts, &ts); 442 | 443 | // Check if messages ready 444 | for (size_t i = 0; i < nitems; i++) { 445 | if (items[i].revents == 0 && msgq_msg_ready(items[i].q)){ 446 | num += 1; 447 | items[i].revents = 1; 448 | } 449 | } 450 | 451 | // exit if we had a timeout and the sleep finished 452 | if (timeout != -1 && ret == 0){ 453 | break; 454 | } 455 | } 456 | 457 | return num; 458 | } 459 | 460 | bool msgq_all_readers_updated(msgq_queue_t *q) { 461 | uint64_t num_readers = *q->num_readers; 462 | for (uint64_t i = 0; i < num_readers; i++) { 463 | if (*q->read_valids[i] && *q->write_pointer != *q->read_pointers[i]) { 464 | return false; 465 | } 466 | } 467 | return num_readers > 0; 468 | } 469 | -------------------------------------------------------------------------------- /msgq/msgq.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #define DEFAULT_SEGMENT_SIZE (10 * 1024 * 1024) 9 | #define NUM_READERS 15 10 | #define ALIGN(n) ((n + (8 - 1)) & -8) 11 | 12 | #define UNUSED(x) (void)x 13 | #define UNPACK64(higher, lower, input) do {uint64_t tmp = input; higher = tmp >> 32; lower = tmp & 0xFFFFFFFF;} while (0) 14 | #define PACK64(output, higher, lower) output = ((uint64_t)higher << 32) | ((uint64_t)lower & 0xFFFFFFFF) 15 | 16 | struct msgq_header_t { 17 | uint64_t num_readers; 18 | uint64_t write_pointer; 19 | uint64_t write_uid; 20 | uint64_t read_pointers[NUM_READERS]; 21 | uint64_t read_valids[NUM_READERS]; 22 | uint64_t read_uids[NUM_READERS]; 23 | }; 24 | 25 | struct msgq_queue_t { 26 | std::atomic *num_readers; 27 | std::atomic *write_pointer; 28 | std::atomic *write_uid; 29 | std::atomic *read_pointers[NUM_READERS]; 30 | std::atomic *read_valids[NUM_READERS]; 31 | std::atomic *read_uids[NUM_READERS]; 32 | char * mmap_p; 33 | char * data; 34 | size_t size; 35 | int reader_id; 36 | uint64_t read_uid_local; 37 | uint64_t write_uid_local; 38 | 39 | bool read_conflate; 40 | std::string endpoint; 41 | }; 42 | 43 | struct msgq_msg_t { 44 | size_t size; 45 | char * data; 46 | }; 47 | 48 | struct msgq_pollitem_t { 49 | msgq_queue_t *q; 50 | int revents; 51 | }; 52 | 53 | void msgq_wait_for_subscriber(msgq_queue_t *q); 54 | void msgq_reset_reader(msgq_queue_t *q); 55 | 56 | int msgq_msg_init_size(msgq_msg_t *msg, size_t size); 57 | int msgq_msg_init_data(msgq_msg_t *msg, char * data, size_t size); 58 | int msgq_msg_close(msgq_msg_t *msg); 59 | 60 | int msgq_new_queue(msgq_queue_t * q, const char * path, size_t size); 61 | void msgq_close_queue(msgq_queue_t *q); 62 | void msgq_init_publisher(msgq_queue_t * q); 63 | void msgq_init_subscriber(msgq_queue_t * q); 64 | 65 | int msgq_msg_send(msgq_msg_t *msg, msgq_queue_t *q); 66 | int msgq_msg_recv(msgq_msg_t *msg, msgq_queue_t *q); 67 | int msgq_msg_ready(msgq_queue_t * q); 68 | int msgq_poll(msgq_pollitem_t * items, size_t nitems, int timeout); 69 | 70 | bool msgq_all_readers_updated(msgq_queue_t *q); 71 | -------------------------------------------------------------------------------- /msgq/msgq_tests.cc: -------------------------------------------------------------------------------- 1 | #include "catch2/catch.hpp" 2 | #include "msgq/msgq.h" 3 | 4 | TEST_CASE("ALIGN") 5 | { 6 | REQUIRE(ALIGN(0) == 0); 7 | REQUIRE(ALIGN(1) == 8); 8 | REQUIRE(ALIGN(7) == 8); 9 | REQUIRE(ALIGN(8) == 8); 10 | REQUIRE(ALIGN(99999) == 100000); 11 | } 12 | 13 | TEST_CASE("msgq_msg_init_size") 14 | { 15 | const size_t msg_size = 30; 16 | msgq_msg_t msg; 17 | 18 | msgq_msg_init_size(&msg, msg_size); 19 | REQUIRE(msg.size == msg_size); 20 | 21 | msgq_msg_close(&msg); 22 | } 23 | 24 | TEST_CASE("msgq_msg_init_data") 25 | { 26 | const size_t msg_size = 30; 27 | char *data = new char[msg_size]; 28 | 29 | for (size_t i = 0; i < msg_size; i++) 30 | { 31 | data[i] = i; 32 | } 33 | 34 | msgq_msg_t msg; 35 | msgq_msg_init_data(&msg, data, msg_size); 36 | 37 | REQUIRE(msg.size == msg_size); 38 | REQUIRE(memcmp(msg.data, data, msg_size) == 0); 39 | 40 | delete[] data; 41 | msgq_msg_close(&msg); 42 | } 43 | 44 | TEST_CASE("msgq_init_subscriber") 45 | { 46 | remove("/dev/shm/test_queue"); 47 | msgq_queue_t q; 48 | msgq_new_queue(&q, "test_queue", 1024); 49 | REQUIRE(*q.num_readers == 0); 50 | 51 | q.reader_id = 1; 52 | *q.read_valids[0] = false; 53 | *q.read_pointers[0] = ((uint64_t)1 << 32); 54 | 55 | *q.write_pointer = 255; 56 | 57 | msgq_init_subscriber(&q); 58 | REQUIRE(q.read_conflate == false); 59 | REQUIRE(*q.read_valids[0] == true); 60 | REQUIRE((*q.read_pointers[0] >> 32) == 0); 61 | REQUIRE((*q.read_pointers[0] & 0xFFFFFFFF) == 255); 62 | } 63 | 64 | TEST_CASE("msgq_msg_send first message") 65 | { 66 | remove("/dev/shm/test_queue"); 67 | msgq_queue_t q; 68 | msgq_new_queue(&q, "test_queue", 1024); 69 | msgq_init_publisher(&q); 70 | 71 | REQUIRE(*q.write_pointer == 0); 72 | 73 | size_t msg_size = 128; 74 | 75 | SECTION("Aligned message size") 76 | { 77 | } 78 | SECTION("Unaligned message size") 79 | { 80 | msg_size--; 81 | } 82 | char *data = new char[msg_size]; 83 | 84 | for (size_t i = 0; i < msg_size; i++) 85 | { 86 | data[i] = i; 87 | } 88 | 89 | msgq_msg_t msg; 90 | msgq_msg_init_data(&msg, data, msg_size); 91 | 92 | msgq_msg_send(&msg, &q); 93 | REQUIRE(*(int64_t *)q.data == msg_size); // Check size tag 94 | REQUIRE(*q.write_pointer == 128 + sizeof(int64_t)); 95 | REQUIRE(memcmp(q.data + sizeof(int64_t), data, msg_size) == 0); 96 | 97 | delete[] data; 98 | msgq_msg_close(&msg); 99 | } 100 | 101 | TEST_CASE("msgq_msg_send test wraparound") 102 | { 103 | remove("/dev/shm/test_queue"); 104 | msgq_queue_t q; 105 | msgq_new_queue(&q, "test_queue", 1024); 106 | msgq_init_publisher(&q); 107 | 108 | REQUIRE((*q.write_pointer & 0xFFFFFFFF) == 0); 109 | REQUIRE((*q.write_pointer >> 32) == 0); 110 | 111 | const size_t msg_size = 120; 112 | msgq_msg_t msg; 113 | msgq_msg_init_size(&msg, msg_size); 114 | 115 | for (int i = 0; i < 8; i++) 116 | { 117 | msgq_msg_send(&msg, &q); 118 | } 119 | // Check 8th message was written at the beginning 120 | REQUIRE((*q.write_pointer & 0xFFFFFFFF) == msg_size + sizeof(int64_t)); 121 | 122 | // Check cycle count 123 | REQUIRE((*q.write_pointer >> 32) == 1); 124 | 125 | // Check wraparound tag 126 | char *tag_location = q.data; 127 | tag_location += 7 * (msg_size + sizeof(int64_t)); 128 | REQUIRE(*(int64_t *)tag_location == -1); 129 | 130 | msgq_msg_close(&msg); 131 | } 132 | 133 | TEST_CASE("msgq_msg_recv test wraparound") 134 | { 135 | remove("/dev/shm/test_queue"); 136 | msgq_queue_t q_pub, q_sub; 137 | msgq_new_queue(&q_pub, "test_queue", 1024); 138 | msgq_new_queue(&q_sub, "test_queue", 1024); 139 | 140 | msgq_init_publisher(&q_pub); 141 | msgq_init_subscriber(&q_sub); 142 | 143 | REQUIRE((*q_pub.write_pointer >> 32) == 0); 144 | REQUIRE((*q_sub.read_pointers[0] >> 32) == 0); 145 | 146 | const size_t msg_size = 120; 147 | msgq_msg_t msg1; 148 | msgq_msg_init_size(&msg1, msg_size); 149 | 150 | SECTION("Check cycle counter after reset") 151 | { 152 | for (int i = 0; i < 8; i++) 153 | { 154 | msgq_msg_send(&msg1, &q_pub); 155 | } 156 | 157 | msgq_msg_t msg2; 158 | msgq_msg_recv(&msg2, &q_sub); 159 | REQUIRE(msg2.size == 0); // Reader had to reset 160 | msgq_msg_close(&msg2); 161 | } 162 | SECTION("Check cycle counter while keeping up with writer") 163 | { 164 | for (int i = 0; i < 8; i++) 165 | { 166 | msgq_msg_send(&msg1, &q_pub); 167 | 168 | msgq_msg_t msg2; 169 | msgq_msg_recv(&msg2, &q_sub); 170 | REQUIRE(msg2.size > 0); 171 | msgq_msg_close(&msg2); 172 | } 173 | } 174 | 175 | REQUIRE((*q_sub.read_pointers[0] >> 32) == 1); 176 | msgq_msg_close(&msg1); 177 | } 178 | 179 | TEST_CASE("msgq_msg_send test invalidation") 180 | { 181 | remove("/dev/shm/test_queue"); 182 | msgq_queue_t q_pub, q_sub; 183 | msgq_new_queue(&q_pub, "test_queue", 1024); 184 | msgq_new_queue(&q_sub, "test_queue", 1024); 185 | 186 | msgq_init_publisher(&q_pub); 187 | msgq_init_subscriber(&q_sub); 188 | *q_sub.write_pointer = (uint64_t)1 << 32; 189 | 190 | REQUIRE(*q_sub.read_valids[0] == true); 191 | 192 | SECTION("read pointer in tag") 193 | { 194 | *q_sub.read_pointers[0] = 0; 195 | } 196 | SECTION("read pointer in data section") 197 | { 198 | *q_sub.read_pointers[0] = 64; 199 | } 200 | SECTION("read pointer in wraparound section") 201 | { 202 | *q_pub.write_pointer = ((uint64_t)1 << 32) | 1000; // Writer is one cycle ahead 203 | *q_sub.read_pointers[0] = 1020; 204 | } 205 | 206 | msgq_msg_t msg; 207 | msgq_msg_init_size(&msg, 128); 208 | msgq_msg_send(&msg, &q_pub); 209 | 210 | REQUIRE(*q_sub.read_valids[0] == false); 211 | 212 | msgq_msg_close(&msg); 213 | } 214 | 215 | TEST_CASE("msgq_init_subscriber init 2 subscribers") 216 | { 217 | remove("/dev/shm/test_queue"); 218 | msgq_queue_t q1, q2; 219 | msgq_new_queue(&q1, "test_queue", 1024); 220 | msgq_new_queue(&q2, "test_queue", 1024); 221 | 222 | *q1.num_readers = 0; 223 | 224 | REQUIRE(*q1.num_readers == 0); 225 | REQUIRE(*q2.num_readers == 0); 226 | 227 | msgq_init_subscriber(&q1); 228 | REQUIRE(*q1.num_readers == 1); 229 | REQUIRE(*q2.num_readers == 1); 230 | REQUIRE(q1.reader_id == 0); 231 | 232 | msgq_init_subscriber(&q2); 233 | REQUIRE(*q1.num_readers == 2); 234 | REQUIRE(*q2.num_readers == 2); 235 | REQUIRE(q2.reader_id == 1); 236 | } 237 | 238 | TEST_CASE("Write 1 msg, read 1 msg", "[integration]") 239 | { 240 | remove("/dev/shm/test_queue"); 241 | const size_t msg_size = 128; 242 | msgq_queue_t writer, reader; 243 | 244 | msgq_new_queue(&writer, "test_queue", 1024); 245 | msgq_new_queue(&reader, "test_queue", 1024); 246 | 247 | msgq_init_publisher(&writer); 248 | msgq_init_subscriber(&reader); 249 | 250 | // Build 128 byte message 251 | msgq_msg_t outgoing_msg; 252 | msgq_msg_init_size(&outgoing_msg, msg_size); 253 | 254 | for (size_t i = 0; i < msg_size; i++) 255 | { 256 | outgoing_msg.data[i] = i; 257 | } 258 | 259 | REQUIRE(msgq_msg_send(&outgoing_msg, &writer) == msg_size); 260 | 261 | msgq_msg_t incoming_msg1; 262 | REQUIRE(msgq_msg_recv(&incoming_msg1, &reader) == msg_size); 263 | REQUIRE(memcmp(incoming_msg1.data, outgoing_msg.data, msg_size) == 0); 264 | 265 | // Verify that there are no more messages 266 | msgq_msg_t incoming_msg2; 267 | REQUIRE(msgq_msg_recv(&incoming_msg2, &reader) == 0); 268 | 269 | msgq_msg_close(&outgoing_msg); 270 | msgq_msg_close(&incoming_msg1); 271 | msgq_msg_close(&incoming_msg2); 272 | } 273 | 274 | TEST_CASE("Write 2 msg, read 2 msg - conflate = false", "[integration]") 275 | { 276 | remove("/dev/shm/test_queue"); 277 | const size_t msg_size = 128; 278 | msgq_queue_t writer, reader; 279 | 280 | msgq_new_queue(&writer, "test_queue", 1024); 281 | msgq_new_queue(&reader, "test_queue", 1024); 282 | 283 | msgq_init_publisher(&writer); 284 | msgq_init_subscriber(&reader); 285 | 286 | // Build 128 byte message 287 | msgq_msg_t outgoing_msg; 288 | msgq_msg_init_size(&outgoing_msg, msg_size); 289 | 290 | for (size_t i = 0; i < msg_size; i++) 291 | { 292 | outgoing_msg.data[i] = i; 293 | } 294 | 295 | REQUIRE(msgq_msg_send(&outgoing_msg, &writer) == msg_size); 296 | REQUIRE(msgq_msg_send(&outgoing_msg, &writer) == msg_size); 297 | 298 | msgq_msg_t incoming_msg1; 299 | REQUIRE(msgq_msg_recv(&incoming_msg1, &reader) == msg_size); 300 | REQUIRE(memcmp(incoming_msg1.data, outgoing_msg.data, msg_size) == 0); 301 | 302 | msgq_msg_t incoming_msg2; 303 | REQUIRE(msgq_msg_recv(&incoming_msg2, &reader) == msg_size); 304 | REQUIRE(memcmp(incoming_msg2.data, outgoing_msg.data, msg_size) == 0); 305 | 306 | msgq_msg_close(&outgoing_msg); 307 | msgq_msg_close(&incoming_msg1); 308 | msgq_msg_close(&incoming_msg2); 309 | } 310 | 311 | TEST_CASE("Write 2 msg, read 2 msg - conflate = true", "[integration]") 312 | { 313 | remove("/dev/shm/test_queue"); 314 | const size_t msg_size = 128; 315 | msgq_queue_t writer, reader; 316 | 317 | msgq_new_queue(&writer, "test_queue", 1024); 318 | msgq_new_queue(&reader, "test_queue", 1024); 319 | 320 | msgq_init_publisher(&writer); 321 | msgq_init_subscriber(&reader); 322 | reader.read_conflate = true; 323 | 324 | // Build 128 byte message 325 | msgq_msg_t outgoing_msg; 326 | msgq_msg_init_size(&outgoing_msg, msg_size); 327 | 328 | for (size_t i = 0; i < msg_size; i++) 329 | { 330 | outgoing_msg.data[i] = i; 331 | } 332 | 333 | REQUIRE(msgq_msg_send(&outgoing_msg, &writer) == msg_size); 334 | REQUIRE(msgq_msg_send(&outgoing_msg, &writer) == msg_size); 335 | 336 | msgq_msg_t incoming_msg1; 337 | REQUIRE(msgq_msg_recv(&incoming_msg1, &reader) == msg_size); 338 | REQUIRE(memcmp(incoming_msg1.data, outgoing_msg.data, msg_size) == 0); 339 | 340 | // Verify that there are no more messages 341 | msgq_msg_t incoming_msg2; 342 | REQUIRE(msgq_msg_recv(&incoming_msg2, &reader) == 0); 343 | 344 | msgq_msg_close(&outgoing_msg); 345 | msgq_msg_close(&incoming_msg1); 346 | msgq_msg_close(&incoming_msg2); 347 | } 348 | 349 | TEST_CASE("1 publisher, 1 slow subscriber", "[integration]") 350 | { 351 | remove("/dev/shm/test_queue"); 352 | msgq_queue_t writer, reader; 353 | 354 | msgq_new_queue(&writer, "test_queue", 1024); 355 | msgq_new_queue(&reader, "test_queue", 1024); 356 | 357 | msgq_init_publisher(&writer); 358 | msgq_init_subscriber(&reader); 359 | 360 | int n_received = 0; 361 | int n_skipped = 0; 362 | 363 | for (uint64_t i = 0; i < 1e5; i++) 364 | { 365 | msgq_msg_t outgoing_msg; 366 | msgq_msg_init_data(&outgoing_msg, (char *)&i, sizeof(uint64_t)); 367 | msgq_msg_send(&outgoing_msg, &writer); 368 | msgq_msg_close(&outgoing_msg); 369 | 370 | if (i % 10 == 0) 371 | { 372 | msgq_msg_t msg1; 373 | msgq_msg_recv(&msg1, &reader); 374 | 375 | if (msg1.size == 0) 376 | { 377 | n_skipped++; 378 | } 379 | else 380 | { 381 | n_received++; 382 | } 383 | msgq_msg_close(&msg1); 384 | } 385 | } 386 | 387 | // TODO: verify these numbers by hand 388 | REQUIRE(n_received == 8572); 389 | REQUIRE(n_skipped == 1428); 390 | } 391 | 392 | TEST_CASE("1 publisher, 2 subscribers", "[integration]") 393 | { 394 | remove("/dev/shm/test_queue"); 395 | msgq_queue_t writer, reader1, reader2; 396 | 397 | msgq_new_queue(&writer, "test_queue", 1024); 398 | msgq_new_queue(&reader1, "test_queue", 1024); 399 | msgq_new_queue(&reader2, "test_queue", 1024); 400 | 401 | msgq_init_publisher(&writer); 402 | msgq_init_subscriber(&reader1); 403 | msgq_init_subscriber(&reader2); 404 | 405 | for (uint64_t i = 0; i < 1024 * 3; i++) 406 | { 407 | msgq_msg_t outgoing_msg; 408 | msgq_msg_init_data(&outgoing_msg, (char *)&i, sizeof(uint64_t)); 409 | msgq_msg_send(&outgoing_msg, &writer); 410 | 411 | msgq_msg_t msg1, msg2; 412 | msgq_msg_recv(&msg1, &reader1); 413 | msgq_msg_recv(&msg2, &reader2); 414 | 415 | REQUIRE(msg1.size == sizeof(uint64_t)); 416 | REQUIRE(msg2.size == sizeof(uint64_t)); 417 | REQUIRE(*(uint64_t *)msg1.data == i); 418 | REQUIRE(*(uint64_t *)msg2.data == i); 419 | 420 | msgq_msg_close(&outgoing_msg); 421 | msgq_msg_close(&msg1); 422 | msgq_msg_close(&msg2); 423 | } 424 | } 425 | -------------------------------------------------------------------------------- /msgq/test_runner.cc: -------------------------------------------------------------------------------- 1 | #define CATCH_CONFIG_MAIN 2 | #include "catch2/catch.hpp" 3 | -------------------------------------------------------------------------------- /msgq/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/commaai/msgq/5483a02de303d40cb2632d59f3f3a54dabfb5965/msgq/tests/__init__.py -------------------------------------------------------------------------------- /msgq/tests/test_fake.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import multiprocessing 4 | import platform 5 | import msgq 6 | from parameterized import parameterized_class 7 | from typing import Optional 8 | 9 | WAIT_TIMEOUT = 5 10 | 11 | 12 | @pytest.mark.skipif(condition=platform.system() == "Darwin", reason="Events not supported on macOS") 13 | class TestEvents: 14 | 15 | def test_mutation(self): 16 | handle = msgq.fake_event_handle("carState") 17 | event = handle.recv_called_event 18 | 19 | assert not event.peek() 20 | event.set() 21 | assert event.peek() 22 | event.clear() 23 | assert not event.peek() 24 | 25 | del event 26 | 27 | def test_wait(self): 28 | handle = msgq.fake_event_handle("carState") 29 | event = handle.recv_called_event 30 | 31 | event.set() 32 | try: 33 | event.wait(WAIT_TIMEOUT) 34 | assert event.peek() 35 | except RuntimeError: 36 | pytest.fail("event.wait() timed out") 37 | 38 | def test_wait_multiprocess(self): 39 | handle = msgq.fake_event_handle("carState") 40 | event = handle.recv_called_event 41 | 42 | def set_event_run(): 43 | event.set() 44 | 45 | try: 46 | p = multiprocessing.Process(target=set_event_run) 47 | p.start() 48 | event.wait(WAIT_TIMEOUT) 49 | assert event.peek() 50 | except RuntimeError: 51 | pytest.fail("event.wait() timed out") 52 | 53 | p.kill() 54 | 55 | def test_wait_zero_timeout(self): 56 | handle = msgq.fake_event_handle("carState") 57 | event = handle.recv_called_event 58 | 59 | try: 60 | event.wait(0) 61 | pytest.fail("event.wait() did not time out") 62 | except RuntimeError: 63 | assert not event.peek() 64 | 65 | 66 | @pytest.mark.skipif(condition=platform.system() == "Darwin", reason="FakeSockets not supported on macOS") 67 | @pytest.mark.skipif(condition="ZMQ" in os.environ, reason="FakeSockets not supported on ZMQ") 68 | @parameterized_class([{"prefix": None}, {"prefix": "test"}]) 69 | class TestFakeSockets: 70 | prefix: Optional[str] = None 71 | 72 | def setup_method(self): 73 | msgq.toggle_fake_events(True) 74 | if self.prefix is not None: 75 | msgq.set_fake_prefix(self.prefix) 76 | else: 77 | msgq.delete_fake_prefix() 78 | 79 | def teardown_method(self): 80 | msgq.toggle_fake_events(False) 81 | msgq.delete_fake_prefix() 82 | 83 | def test_event_handle_init(self): 84 | handle = msgq.fake_event_handle("controlsState", override=True) 85 | 86 | assert not handle.enabled 87 | assert handle.recv_called_event.fd >= 0 88 | assert handle.recv_ready_event.fd >= 0 89 | 90 | def test_non_managed_socket_state(self): 91 | # non managed socket should have zero state 92 | _ = msgq.pub_sock("ubloxGnss") 93 | 94 | handle = msgq.fake_event_handle("ubloxGnss", override=False) 95 | 96 | assert not handle.enabled 97 | assert handle.recv_called_event.fd == 0 98 | assert handle.recv_ready_event.fd == 0 99 | 100 | def test_managed_socket_state(self): 101 | # managed socket should not change anything about the state 102 | handle = msgq.fake_event_handle("ubloxGnss") 103 | handle.enabled = True 104 | 105 | expected_enabled = handle.enabled 106 | expected_recv_called_fd = handle.recv_called_event.fd 107 | expected_recv_ready_fd = handle.recv_ready_event.fd 108 | 109 | _ = msgq.pub_sock("ubloxGnss") 110 | 111 | assert handle.enabled == expected_enabled 112 | assert handle.recv_called_event.fd == expected_recv_called_fd 113 | assert handle.recv_ready_event.fd == expected_recv_ready_fd 114 | 115 | def test_sockets_enable_disable(self): 116 | carState_handle = msgq.fake_event_handle("ubloxGnss", enable=True) 117 | recv_called = carState_handle.recv_called_event 118 | recv_ready = carState_handle.recv_ready_event 119 | 120 | pub_sock = msgq.pub_sock("ubloxGnss") 121 | sub_sock = msgq.sub_sock("ubloxGnss") 122 | 123 | try: 124 | carState_handle.enabled = True 125 | recv_ready.set() 126 | pub_sock.send(b"test") 127 | _ = sub_sock.receive() 128 | assert recv_called.peek() 129 | recv_called.clear() 130 | 131 | carState_handle.enabled = False 132 | recv_ready.set() 133 | pub_sock.send(b"test") 134 | _ = sub_sock.receive() 135 | assert not recv_called.peek() 136 | except RuntimeError: 137 | pytest.fail("event.wait() timed out") 138 | 139 | def test_synced_pub_sub(self): 140 | def daemon_repub_process_run(): 141 | pub_sock = msgq.pub_sock("ubloxGnss") 142 | sub_sock = msgq.sub_sock("carState") 143 | 144 | frame = -1 145 | while True: 146 | frame += 1 147 | msg = sub_sock.receive(non_blocking=True) 148 | if msg is None: 149 | print("none received") 150 | continue 151 | 152 | bts = frame.to_bytes(8, 'little') 153 | pub_sock.send(bts) 154 | 155 | carState_handle = msgq.fake_event_handle("carState", enable=True) 156 | recv_called = carState_handle.recv_called_event 157 | recv_ready = carState_handle.recv_ready_event 158 | 159 | p = multiprocessing.Process(target=daemon_repub_process_run) 160 | p.start() 161 | 162 | pub_sock = msgq.pub_sock("carState") 163 | sub_sock = msgq.sub_sock("ubloxGnss") 164 | 165 | try: 166 | for i in range(10): 167 | recv_called.wait(WAIT_TIMEOUT) 168 | recv_called.clear() 169 | 170 | if i == 0: 171 | sub_sock.receive(non_blocking=True) 172 | 173 | bts = i.to_bytes(8, 'little') 174 | pub_sock.send(bts) 175 | 176 | recv_ready.set() 177 | recv_called.wait(WAIT_TIMEOUT) 178 | 179 | msg = sub_sock.receive(non_blocking=True) 180 | assert msg is not None 181 | assert len(msg) == 8 182 | 183 | frame = int.from_bytes(msg, 'little') 184 | assert frame == i 185 | except RuntimeError: 186 | pytest.fail("event.wait() timed out") 187 | finally: 188 | p.kill() 189 | -------------------------------------------------------------------------------- /msgq/tests/test_messaging.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import time 4 | import string 5 | import msgq 6 | 7 | 8 | def random_sock(): 9 | return ''.join(random.choices(string.ascii_uppercase + string.digits, k=10)) 10 | 11 | def random_bytes(length=1000): 12 | return bytes([random.randrange(0xFF) for _ in range(length)]) 13 | 14 | def zmq_sleep(t=1): 15 | if "ZMQ" in os.environ: 16 | time.sleep(t) 17 | 18 | class TestPubSubSockets: 19 | 20 | def setup_method(self): 21 | # ZMQ pub socket takes too long to die 22 | # sleep to prevent multiple publishers error between tests 23 | zmq_sleep() 24 | 25 | def test_pub_sub(self): 26 | sock = random_sock() 27 | pub_sock = msgq.pub_sock(sock) 28 | sub_sock = msgq.sub_sock(sock, conflate=False, timeout=None) 29 | zmq_sleep(3) 30 | 31 | for _ in range(1000): 32 | msg = random_bytes() 33 | pub_sock.send(msg) 34 | recvd = sub_sock.receive() 35 | assert msg == recvd 36 | 37 | def test_conflate(self): 38 | sock = random_sock() 39 | pub_sock = msgq.pub_sock(sock) 40 | for conflate in [True, False]: 41 | for _ in range(10): 42 | num_msgs = random.randint(3, 10) 43 | sub_sock = msgq.sub_sock(sock, conflate=conflate, timeout=None) 44 | zmq_sleep() 45 | 46 | sent_msgs = [] 47 | for __ in range(num_msgs): 48 | msg = random_bytes() 49 | pub_sock.send(msg) 50 | sent_msgs.append(msg) 51 | time.sleep(0.1) 52 | recvd_msgs = msgq.drain_sock_raw(sub_sock) 53 | if conflate: 54 | assert len(recvd_msgs) == 1 55 | assert recvd_msgs[0] == sent_msgs[-1] 56 | else: 57 | assert len(recvd_msgs) == len(sent_msgs) 58 | for rec_msg, sent_msg in zip(recvd_msgs, sent_msgs): 59 | assert rec_msg == sent_msg 60 | 61 | def test_receive_timeout(self): 62 | sock = random_sock() 63 | for _ in range(10): 64 | timeout = random.randrange(200) 65 | sub_sock = msgq.sub_sock(sock, timeout=timeout) 66 | zmq_sleep() 67 | 68 | start_time = time.monotonic() 69 | recvd = sub_sock.receive() 70 | assert (time.monotonic() - start_time) < 0.2 71 | assert recvd is None 72 | -------------------------------------------------------------------------------- /msgq/tests/test_poller.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import time 3 | import msgq 4 | import concurrent.futures 5 | 6 | SERVICE_NAME = 'myService' 7 | 8 | def poller(): 9 | context = msgq.Context() 10 | 11 | p = msgq.Poller() 12 | 13 | sub = msgq.SubSocket() 14 | sub.connect(context, SERVICE_NAME) 15 | p.registerSocket(sub) 16 | 17 | socks = p.poll(10000) 18 | r = [s.receive(non_blocking=True) for s in socks] 19 | 20 | return r 21 | 22 | 23 | class TestPoller: 24 | def test_poll_once(self): 25 | context = msgq.Context() 26 | 27 | pub = msgq.PubSocket() 28 | pub.connect(context, SERVICE_NAME) 29 | 30 | with concurrent.futures.ThreadPoolExecutor() as e: 31 | poll = e.submit(poller) 32 | 33 | time.sleep(0.1) # Slow joiner syndrome 34 | 35 | # Send message 36 | pub.send(b"a") 37 | 38 | # Wait for poll result 39 | result = poll.result() 40 | 41 | del pub 42 | context.term() 43 | 44 | assert result == [b"a"] 45 | 46 | def test_poll_and_create_many_subscribers(self): 47 | context = msgq.Context() 48 | 49 | pub = msgq.PubSocket() 50 | pub.connect(context, SERVICE_NAME) 51 | 52 | with concurrent.futures.ThreadPoolExecutor() as e: 53 | poll = e.submit(poller) 54 | 55 | time.sleep(0.1) # Slow joiner syndrome 56 | c = msgq.Context() 57 | for _ in range(10): 58 | msgq.SubSocket().connect(c, SERVICE_NAME) 59 | 60 | time.sleep(0.1) 61 | 62 | # Send message 63 | pub.send(b"a") 64 | 65 | # Wait for poll result 66 | result = poll.result() 67 | 68 | del pub 69 | context.term() 70 | 71 | assert result == [b"a"] 72 | 73 | def test_multiple_publishers_exception(self): 74 | context = msgq.Context() 75 | 76 | with pytest.raises(msgq.MultiplePublishersError): 77 | pub1 = msgq.PubSocket() 78 | pub1.connect(context, SERVICE_NAME) 79 | 80 | pub2 = msgq.PubSocket() 81 | pub2.connect(context, SERVICE_NAME) 82 | 83 | pub1.send(b"a") 84 | 85 | del pub1 86 | del pub2 87 | context.term() 88 | 89 | def test_multiple_messages(self): 90 | context = msgq.Context() 91 | 92 | pub = msgq.PubSocket() 93 | pub.connect(context, SERVICE_NAME) 94 | 95 | sub = msgq.SubSocket() 96 | sub.connect(context, SERVICE_NAME) 97 | 98 | time.sleep(0.1) # Slow joiner 99 | 100 | for i in range(1, 100): 101 | pub.send(b'a'*i) 102 | 103 | msg_seen = False 104 | i = 1 105 | while True: 106 | r = sub.receive(non_blocking=True) 107 | 108 | if r is not None: 109 | assert b'a'*i == r 110 | 111 | msg_seen = True 112 | i += 1 113 | 114 | if r is None and msg_seen: # ZMQ sometimes receives nothing on the first receive 115 | break 116 | 117 | del pub 118 | del sub 119 | context.term() 120 | 121 | def test_conflate(self): 122 | context = msgq.Context() 123 | 124 | pub = msgq.PubSocket() 125 | pub.connect(context, SERVICE_NAME) 126 | 127 | sub = msgq.SubSocket() 128 | sub.connect(context, SERVICE_NAME, conflate=True) 129 | 130 | time.sleep(0.1) # Slow joiner 131 | pub.send(b'a') 132 | pub.send(b'b') 133 | 134 | assert b'b' == sub.receive() 135 | 136 | del pub 137 | del sub 138 | context.term() 139 | -------------------------------------------------------------------------------- /msgq/visionipc/.gitignore: -------------------------------------------------------------------------------- 1 | visionipc_pyx.cpp 2 | *.so 3 | -------------------------------------------------------------------------------- /msgq/visionipc/__init__.py: -------------------------------------------------------------------------------- 1 | from msgq.visionipc.visionipc_pyx import VisionBuf, VisionIpcClient, VisionIpcServer, VisionStreamType, get_endpoint_name 2 | assert VisionBuf 3 | assert VisionIpcClient 4 | assert VisionIpcServer 5 | assert VisionStreamType 6 | assert get_endpoint_name 7 | -------------------------------------------------------------------------------- /msgq/visionipc/test_runner.cc: -------------------------------------------------------------------------------- 1 | #define CATCH_CONFIG_MAIN 2 | #include "catch2/catch.hpp" 3 | -------------------------------------------------------------------------------- /msgq/visionipc/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/commaai/msgq/5483a02de303d40cb2632d59f3f3a54dabfb5965/msgq/visionipc/tests/__init__.py -------------------------------------------------------------------------------- /msgq/visionipc/tests/test_visionipc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import random 4 | import numpy as np 5 | from msgq.visionipc import VisionIpcServer, VisionIpcClient, VisionStreamType 6 | 7 | def zmq_sleep(t=1): 8 | if "ZMQ" in os.environ: 9 | time.sleep(t) 10 | 11 | 12 | class TestVisionIpc: 13 | 14 | def setup_vipc(self, name, *stream_types, num_buffers=1, width=100, height=100, conflate=False): 15 | self.server = VisionIpcServer(name) 16 | for stream_type in stream_types: 17 | self.server.create_buffers(stream_type, num_buffers, width, height) 18 | self.server.start_listener() 19 | 20 | if len(stream_types): 21 | self.client = VisionIpcClient(name, stream_types[0], conflate) 22 | assert self.client.connect(True) 23 | else: 24 | self.client = None 25 | 26 | zmq_sleep() 27 | return self.server, self.client 28 | 29 | def test_connect(self): 30 | self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD) 31 | assert self.client.is_connected 32 | del self.client 33 | del self.server 34 | 35 | def test_available_streams(self): 36 | for k in range(4): 37 | stream_types = set(random.choices([x.value for x in VisionStreamType], k=k)) 38 | self.setup_vipc("camerad", *stream_types) 39 | available_streams = VisionIpcClient.available_streams("camerad", True) 40 | assert available_streams == stream_types 41 | del self.client 42 | del self.server 43 | 44 | def test_buffers(self): 45 | width, height, num_buffers = 100, 200, 5 46 | self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD, num_buffers=num_buffers, width=width, height=height) 47 | assert self.client.width == width 48 | assert self.client.height == height 49 | assert self.client.buffer_len > 0 50 | assert self.client.num_buffers == num_buffers 51 | del self.client 52 | del self.server 53 | 54 | def test_send_single_buffer(self): 55 | self.setup_vipc("camerad", VisionStreamType.VISION_STREAM_ROAD) 56 | 57 | buf = np.zeros(self.client.buffer_len, dtype=np.uint8) 58 | buf.view('width = init_width; 5 | this->height = init_height; 6 | this->stride = init_stride; 7 | this->uv_offset = init_uv_offset; 8 | 9 | this->y = (uint8_t *)this->addr; 10 | this->uv = this->y + this->uv_offset; 11 | } 12 | 13 | 14 | uint64_t VisionBuf::get_frame_id() { 15 | return *frame_id; 16 | } 17 | 18 | void VisionBuf::set_frame_id(uint64_t id) { 19 | *frame_id = id; 20 | } 21 | -------------------------------------------------------------------------------- /msgq/visionipc/visionbuf.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "msgq/visionipc/visionipc.h" 4 | 5 | #define CL_USE_DEPRECATED_OPENCL_1_2_APIS 6 | #ifdef __APPLE__ 7 | #include 8 | #else 9 | #include 10 | #endif 11 | 12 | #define VISIONBUF_SYNC_FROM_DEVICE 0 13 | #define VISIONBUF_SYNC_TO_DEVICE 1 14 | 15 | enum VisionStreamType { 16 | VISION_STREAM_ROAD, 17 | VISION_STREAM_DRIVER, 18 | VISION_STREAM_WIDE_ROAD, 19 | 20 | VISION_STREAM_MAP, 21 | VISION_STREAM_MAX, 22 | }; 23 | 24 | class VisionBuf { 25 | public: 26 | size_t len = 0; 27 | size_t mmap_len = 0; 28 | void * addr = nullptr; 29 | uint64_t *frame_id; 30 | int fd = 0; 31 | 32 | size_t width = 0; 33 | size_t height = 0; 34 | size_t stride = 0; 35 | size_t uv_offset = 0; 36 | 37 | // YUV 38 | uint8_t * y = nullptr; 39 | uint8_t * uv = nullptr; 40 | 41 | // Visionipc 42 | uint64_t server_id = 0; 43 | size_t idx = 0; 44 | VisionStreamType type; 45 | 46 | // OpenCL 47 | cl_mem buf_cl = nullptr; 48 | cl_command_queue copy_q = nullptr; 49 | 50 | // ion 51 | int handle = 0; 52 | 53 | void allocate(size_t len); 54 | void import(); 55 | void init_cl(cl_device_id device_id, cl_context ctx); 56 | void init_yuv(size_t width, size_t height, size_t stride, size_t uv_offset); 57 | int sync(int dir); 58 | int free(); 59 | 60 | void set_frame_id(uint64_t id); 61 | uint64_t get_frame_id(); 62 | }; 63 | -------------------------------------------------------------------------------- /msgq/visionipc/visionbuf_cl.cc: -------------------------------------------------------------------------------- 1 | #include "msgq/visionipc/visionbuf.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | std::atomic offset = 0; 13 | 14 | static void *malloc_with_fd(size_t len, int *fd) { 15 | char full_path[0x100]; 16 | 17 | #ifdef __APPLE__ 18 | snprintf(full_path, sizeof(full_path)-1, "/tmp/visionbuf_%d_%d", getpid(), offset++); 19 | #else 20 | snprintf(full_path, sizeof(full_path)-1, "/dev/shm/visionbuf_%d_%d", getpid(), offset++); 21 | #endif 22 | 23 | *fd = open(full_path, O_RDWR | O_CREAT, 0664); 24 | assert(*fd >= 0); 25 | 26 | unlink(full_path); 27 | 28 | ftruncate(*fd, len); 29 | void *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0); 30 | assert(addr != MAP_FAILED); 31 | 32 | return addr; 33 | } 34 | 35 | void VisionBuf::allocate(size_t length) { 36 | this->len = length; 37 | this->mmap_len = this->len + sizeof(uint64_t); 38 | this->addr = malloc_with_fd(this->mmap_len, &this->fd); 39 | this->frame_id = (uint64_t*)((uint8_t*)this->addr + this->len); 40 | } 41 | 42 | void VisionBuf::init_cl(cl_device_id device_id, cl_context ctx){ 43 | int err; 44 | 45 | this->copy_q = clCreateCommandQueue(ctx, device_id, 0, &err); 46 | assert(err == 0); 47 | 48 | this->buf_cl = clCreateBuffer(ctx, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, this->len, this->addr, &err); 49 | assert(err == 0); 50 | } 51 | 52 | 53 | void VisionBuf::import(){ 54 | assert(this->fd >= 0); 55 | this->addr = mmap(NULL, this->mmap_len, PROT_READ | PROT_WRITE, MAP_SHARED, this->fd, 0); 56 | assert(this->addr != MAP_FAILED); 57 | 58 | this->frame_id = (uint64_t*)((uint8_t*)this->addr + this->len); 59 | } 60 | 61 | 62 | int VisionBuf::sync(int dir) { 63 | int err = 0; 64 | if (!this->buf_cl) return 0; 65 | 66 | if (dir == VISIONBUF_SYNC_FROM_DEVICE) { 67 | err = clEnqueueReadBuffer(this->copy_q, this->buf_cl, CL_FALSE, 0, this->len, this->addr, 0, NULL, NULL); 68 | } else { 69 | err = clEnqueueWriteBuffer(this->copy_q, this->buf_cl, CL_FALSE, 0, this->len, this->addr, 0, NULL, NULL); 70 | } 71 | 72 | if (err == 0){ 73 | err = clFinish(this->copy_q); 74 | } 75 | 76 | return err; 77 | } 78 | 79 | int VisionBuf::free() { 80 | int err = 0; 81 | if (this->buf_cl){ 82 | err = clReleaseMemObject(this->buf_cl); 83 | if (err != 0) return err; 84 | 85 | err = clReleaseCommandQueue(this->copy_q); 86 | if (err != 0) return err; 87 | } 88 | 89 | err = munmap(this->addr, this->mmap_len); 90 | if (err != 0) return err; 91 | 92 | err = close(this->fd); 93 | return err; 94 | } 95 | -------------------------------------------------------------------------------- /msgq/visionipc/visionbuf_ion.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | 17 | #include "msgq/visionipc/visionbuf.h" 18 | 19 | // keep trying if x gets interrupted by a signal 20 | #define HANDLE_EINTR(x) \ 21 | ({ \ 22 | decltype(x) ret; \ 23 | int try_cnt = 0; \ 24 | do { \ 25 | ret = (x); \ 26 | } while (ret == -1 && errno == EINTR && try_cnt++ < 100); \ 27 | ret; \ 28 | }) 29 | 30 | // just hard-code these for convenience 31 | // size_t device_page_size = 0; 32 | // clGetDeviceInfo(device_id, CL_DEVICE_PAGE_SIZE_QCOM, 33 | // sizeof(device_page_size), &device_page_size, 34 | // NULL); 35 | 36 | // size_t padding_cl = 0; 37 | // clGetDeviceInfo(device_id, CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM, 38 | // sizeof(padding_cl), &padding_cl, 39 | // NULL); 40 | #define DEVICE_PAGE_SIZE_CL 4096 41 | #define PADDING_CL 0 42 | 43 | struct IonFileHandle { 44 | IonFileHandle() { 45 | fd = open("/dev/ion", O_RDWR | O_NONBLOCK); 46 | assert(fd >= 0); 47 | } 48 | ~IonFileHandle() { 49 | close(fd); 50 | } 51 | int fd = -1; 52 | }; 53 | 54 | int ion_fd() { 55 | static IonFileHandle fh; 56 | return fh.fd; 57 | } 58 | 59 | void VisionBuf::allocate(size_t length) { 60 | struct ion_allocation_data ion_alloc = {0}; 61 | ion_alloc.len = length + PADDING_CL + sizeof(uint64_t); 62 | ion_alloc.align = 4096; 63 | ion_alloc.heap_id_mask = 1 << ION_IOMMU_HEAP_ID; 64 | ion_alloc.flags = ION_FLAG_CACHED; 65 | 66 | int err = HANDLE_EINTR(ioctl(ion_fd(), ION_IOC_ALLOC, &ion_alloc)); 67 | assert(err == 0); 68 | 69 | struct ion_fd_data ion_fd_data = {0}; 70 | ion_fd_data.handle = ion_alloc.handle; 71 | err = HANDLE_EINTR(ioctl(ion_fd(), ION_IOC_SHARE, &ion_fd_data)); 72 | assert(err == 0); 73 | 74 | void *mmap_addr = mmap(NULL, ion_alloc.len, 75 | PROT_READ | PROT_WRITE, 76 | MAP_SHARED, ion_fd_data.fd, 0); 77 | assert(mmap_addr != MAP_FAILED); 78 | 79 | memset(mmap_addr, 0, ion_alloc.len); 80 | 81 | this->len = length; 82 | this->mmap_len = ion_alloc.len; 83 | this->addr = mmap_addr; 84 | this->handle = ion_alloc.handle; 85 | this->fd = ion_fd_data.fd; 86 | this->frame_id = (uint64_t*)((uint8_t*)this->addr + this->len + PADDING_CL); 87 | } 88 | 89 | void VisionBuf::import(){ 90 | int err; 91 | assert(this->fd >= 0); 92 | 93 | // Get handle 94 | struct ion_fd_data fd_data = {0}; 95 | fd_data.fd = this->fd; 96 | err = HANDLE_EINTR(ioctl(ion_fd(), ION_IOC_IMPORT, &fd_data)); 97 | assert(err == 0); 98 | 99 | this->handle = fd_data.handle; 100 | this->addr = mmap(NULL, this->mmap_len, PROT_READ | PROT_WRITE, MAP_SHARED, this->fd, 0); 101 | assert(this->addr != MAP_FAILED); 102 | 103 | this->frame_id = (uint64_t*)((uint8_t*)this->addr + this->len + PADDING_CL); 104 | } 105 | 106 | void VisionBuf::init_cl(cl_device_id device_id, cl_context ctx) { 107 | int err; 108 | 109 | assert(((uintptr_t)this->addr % DEVICE_PAGE_SIZE_CL) == 0); 110 | 111 | cl_mem_ion_host_ptr ion_cl = {0}; 112 | ion_cl.ext_host_ptr.allocation_type = CL_MEM_ION_HOST_PTR_QCOM; 113 | ion_cl.ext_host_ptr.host_cache_policy = CL_MEM_HOST_UNCACHED_QCOM; 114 | ion_cl.ion_filedesc = this->fd; 115 | ion_cl.ion_hostptr = this->addr; 116 | 117 | this->buf_cl = clCreateBuffer(ctx, 118 | CL_MEM_USE_HOST_PTR | CL_MEM_EXT_HOST_PTR_QCOM, 119 | this->len, &ion_cl, &err); 120 | assert(err == 0); 121 | } 122 | 123 | 124 | int VisionBuf::sync(int dir) { 125 | struct ion_flush_data flush_data = {0}; 126 | flush_data.handle = this->handle; 127 | flush_data.vaddr = this->addr; 128 | flush_data.offset = 0; 129 | flush_data.length = this->len; 130 | 131 | // ION_IOC_INV_CACHES ~= DMA_FROM_DEVICE 132 | // ION_IOC_CLEAN_CACHES ~= DMA_TO_DEVICE 133 | // ION_IOC_CLEAN_INV_CACHES ~= DMA_BIDIRECTIONAL 134 | 135 | struct ion_custom_data custom_data = {0}; 136 | 137 | assert(dir == VISIONBUF_SYNC_FROM_DEVICE || dir == VISIONBUF_SYNC_TO_DEVICE); 138 | custom_data.cmd = (dir == VISIONBUF_SYNC_FROM_DEVICE) ? 139 | ION_IOC_INV_CACHES : ION_IOC_CLEAN_CACHES; 140 | 141 | custom_data.arg = (unsigned long)&flush_data; 142 | return HANDLE_EINTR(ioctl(ion_fd(), ION_IOC_CUSTOM, &custom_data)); 143 | } 144 | 145 | int VisionBuf::free() { 146 | int err = 0; 147 | 148 | if (this->buf_cl){ 149 | err = clReleaseMemObject(this->buf_cl); 150 | if (err != 0) return err; 151 | } 152 | 153 | err = munmap(this->addr, this->mmap_len); 154 | if (err != 0) return err; 155 | 156 | err = close(this->fd); 157 | if (err != 0) return err; 158 | 159 | struct ion_handle_data handle_data = {.handle = this->handle}; 160 | return HANDLE_EINTR(ioctl(ion_fd(), ION_IOC_FREE, &handle_data)); 161 | } 162 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #ifdef __APPLE__ 13 | #define getsocket() socket(AF_UNIX, SOCK_STREAM, 0) 14 | #else 15 | #define getsocket() socket(AF_UNIX, SOCK_SEQPACKET, 0) 16 | #endif 17 | 18 | #include "msgq/visionipc/visionipc.h" 19 | 20 | int ipc_connect(const char* socket_path) { 21 | int err; 22 | 23 | int sock = getsocket(); 24 | 25 | if (sock < 0) return -1; 26 | struct sockaddr_un addr = { 27 | .sun_family = AF_UNIX, 28 | }; 29 | snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", socket_path); 30 | err = connect(sock, (struct sockaddr*)&addr, sizeof(addr)); 31 | if (err != 0) { 32 | close(sock); 33 | return -1; 34 | } 35 | 36 | return sock; 37 | } 38 | 39 | int ipc_bind(const char* socket_path) { 40 | int err; 41 | 42 | unlink(socket_path); 43 | 44 | int sock = getsocket(); 45 | 46 | struct sockaddr_un addr = { 47 | .sun_family = AF_UNIX, 48 | }; 49 | snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", socket_path); 50 | err = bind(sock, (struct sockaddr *)&addr, sizeof(addr)); 51 | assert(err == 0); 52 | 53 | err = listen(sock, 3); 54 | assert(err == 0); 55 | 56 | return sock; 57 | } 58 | 59 | 60 | int ipc_sendrecv_with_fds(bool send, int fd, void *buf, size_t buf_size, int* fds, int num_fds, 61 | int *out_num_fds) { 62 | char control_buf[CMSG_SPACE(sizeof(int) * num_fds)]; 63 | memset(control_buf, 0, CMSG_SPACE(sizeof(int) * num_fds)); 64 | 65 | struct iovec iov = { 66 | .iov_base = buf, 67 | .iov_len = buf_size, 68 | }; 69 | struct msghdr msg = { 70 | .msg_iov = &iov, 71 | .msg_iovlen = 1, 72 | }; 73 | 74 | if (num_fds > 0) { 75 | assert(fds); 76 | 77 | msg.msg_control = control_buf; 78 | msg.msg_controllen = CMSG_SPACE(sizeof(int) * num_fds); 79 | } 80 | 81 | if (send) { 82 | if (num_fds) { 83 | struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); 84 | assert(cmsg); 85 | cmsg->cmsg_level = SOL_SOCKET; 86 | cmsg->cmsg_type = SCM_RIGHTS; 87 | cmsg->cmsg_len = CMSG_LEN(sizeof(int) * num_fds); 88 | memcpy(CMSG_DATA(cmsg), fds, sizeof(int) * num_fds); 89 | } 90 | return sendmsg(fd, &msg, 0); 91 | } else { 92 | int r = recvmsg(fd, &msg, 0); 93 | if (r < 0) return r; 94 | 95 | int recv_fds = 0; 96 | if (msg.msg_controllen > 0) { 97 | struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg); 98 | assert(cmsg); 99 | assert(cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS); 100 | recv_fds = (cmsg->cmsg_len - CMSG_LEN(0)); 101 | assert(recv_fds > 0 && (recv_fds % sizeof(int)) == 0); 102 | recv_fds /= sizeof(int); 103 | 104 | assert(fds && recv_fds <= num_fds); 105 | memcpy(fds, CMSG_DATA(cmsg), sizeof(int) * recv_fds); 106 | } 107 | 108 | if (msg.msg_flags) { 109 | for (int i=0; i 4 | #include 5 | 6 | 7 | int ipc_connect(const char* socket_path); 8 | int ipc_bind(const char* socket_path); 9 | int ipc_sendrecv_with_fds(bool send, int fd, void *buf, size_t buf_size, int* fds, int num_fds, 10 | int *out_num_fds); 11 | 12 | constexpr int VISIONIPC_MAX_FDS = 128; 13 | 14 | struct VisionIpcBufExtra { 15 | uint32_t frame_id; 16 | uint64_t timestamp_sof; 17 | uint64_t timestamp_eof; 18 | bool valid; 19 | }; 20 | 21 | struct VisionIpcPacket { 22 | uint64_t server_id; 23 | size_t idx; 24 | struct VisionIpcBufExtra extra; 25 | }; 26 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc.pxd: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | #cython: language_level=3 3 | 4 | from libcpp.string cimport string 5 | from libcpp.vector cimport vector 6 | from libcpp.set cimport set 7 | from libc.stdint cimport uint32_t, uint64_t 8 | from libcpp cimport bool, int 9 | 10 | cdef extern from "msgq/visionipc/visionbuf.h": 11 | struct _cl_device_id 12 | struct _cl_context 13 | struct _cl_mem 14 | 15 | ctypedef _cl_device_id * cl_device_id 16 | ctypedef _cl_context * cl_context 17 | ctypedef _cl_mem * cl_mem 18 | 19 | cdef enum VisionStreamType: 20 | pass 21 | 22 | cdef cppclass VisionBuf: 23 | void * addr 24 | int fd 25 | size_t len 26 | size_t width 27 | size_t height 28 | size_t stride 29 | size_t uv_offset 30 | size_t idx 31 | cl_mem buf_cl 32 | void set_frame_id(uint64_t id) 33 | 34 | cdef extern from "msgq/visionipc/visionipc.h": 35 | struct VisionIpcBufExtra: 36 | uint32_t frame_id 37 | uint64_t timestamp_sof 38 | uint64_t timestamp_eof 39 | bool valid 40 | 41 | cdef extern from "msgq/visionipc/visionipc_server.h": 42 | string get_endpoint_name(string, VisionStreamType) 43 | 44 | cdef cppclass VisionIpcServer: 45 | VisionIpcServer(string, void*, void*) 46 | void create_buffers(VisionStreamType, size_t, size_t, size_t) 47 | void create_buffers_with_sizes(VisionStreamType, size_t, size_t, size_t, size_t, size_t, size_t) 48 | VisionBuf * get_buffer(VisionStreamType) 49 | void send(VisionBuf *, VisionIpcBufExtra *, bool) 50 | void start_listener() 51 | 52 | cdef extern from "msgq/visionipc/visionipc_client.h": 53 | cdef cppclass VisionIpcClient: 54 | int num_buffers 55 | VisionBuf buffers[1] 56 | VisionIpcClient(string, VisionStreamType, bool, void*, void*) 57 | VisionBuf * recv(VisionIpcBufExtra *, int) 58 | bool connect(bool) 59 | bool is_connected() 60 | @staticmethod 61 | set[VisionStreamType] getAvailableStreams(string, bool) 62 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc_client.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include "msgq/visionipc/visionipc.h" 8 | #include "msgq/visionipc/visionipc_client.h" 9 | #include "msgq/visionipc/visionipc_server.h" 10 | #include "msgq/logger/logger.h" 11 | 12 | static int connect_to_vipc_server(const std::string &name, bool blocking) { 13 | const std::string ipc_path = get_ipc_path(name); 14 | int socket_fd = ipc_connect(ipc_path.c_str()); 15 | while (socket_fd < 0 && blocking) { 16 | std::cout << "VisionIpcClient connecting" << std::endl; 17 | std::this_thread::sleep_for(std::chrono::milliseconds(100)); 18 | socket_fd = ipc_connect(ipc_path.c_str()); 19 | } 20 | return socket_fd; 21 | } 22 | 23 | VisionIpcClient::VisionIpcClient(std::string name, VisionStreamType type, bool conflate, cl_device_id device_id, cl_context ctx) : name(name), type(type), device_id(device_id), ctx(ctx) { 24 | msg_ctx = Context::create(); 25 | sock = SubSocket::create(msg_ctx, get_endpoint_name(name, type), "127.0.0.1", conflate, false); 26 | 27 | poller = Poller::create(); 28 | poller->registerSocket(sock); 29 | } 30 | 31 | // Connect is not thread safe. Do not use the buffers while calling connect 32 | bool VisionIpcClient::connect(bool blocking){ 33 | connected = false; 34 | 35 | // Cleanup old buffers on reconnect 36 | for (size_t i = 0; i < num_buffers; i++){ 37 | if (buffers[i].free() != 0) { 38 | LOGE("Failed to free buffer %zu", i); 39 | } 40 | } 41 | 42 | num_buffers = 0; 43 | 44 | int socket_fd = connect_to_vipc_server(name, blocking); 45 | if (socket_fd < 0) { 46 | return false; 47 | } 48 | // Send stream type to server to request FDs 49 | int r = ipc_sendrecv_with_fds(true, socket_fd, &type, sizeof(type), nullptr, 0, nullptr); 50 | assert(r == sizeof(type)); 51 | 52 | // Get FDs 53 | int fds[VISIONIPC_MAX_FDS]; 54 | VisionBuf bufs[VISIONIPC_MAX_FDS]; 55 | r = ipc_sendrecv_with_fds(false, socket_fd, &bufs, sizeof(bufs), fds, VISIONIPC_MAX_FDS, &num_buffers); 56 | if (r < 0) { 57 | // only expected error is server shutting down 58 | assert(errno == ECONNRESET); 59 | close(socket_fd); 60 | return false; 61 | } 62 | 63 | assert(num_buffers >= 0); 64 | assert(r == sizeof(VisionBuf) * num_buffers); 65 | 66 | // Import buffers 67 | for (size_t i = 0; i < num_buffers; i++){ 68 | buffers[i] = bufs[i]; 69 | buffers[i].fd = fds[i]; 70 | buffers[i].import(); 71 | buffers[i].init_yuv(buffers[i].width, buffers[i].height, buffers[i].stride, buffers[i].uv_offset); 72 | 73 | if (device_id) buffers[i].init_cl(device_id, ctx); 74 | } 75 | 76 | close(socket_fd); 77 | connected = true; 78 | return true; 79 | } 80 | 81 | VisionBuf * VisionIpcClient::recv(VisionIpcBufExtra * extra, const int timeout_ms){ 82 | auto p = poller->poll(timeout_ms); 83 | 84 | if (!p.size()){ 85 | return nullptr; 86 | } 87 | 88 | Message * r = sock->receive(true); 89 | if (r == nullptr){ 90 | return nullptr; 91 | } 92 | 93 | // Get buffer 94 | assert(r->getSize() == sizeof(VisionIpcPacket)); 95 | VisionIpcPacket *packet = (VisionIpcPacket*)r->getData(); 96 | 97 | assert(packet->idx < num_buffers); 98 | VisionBuf * buf = &buffers[packet->idx]; 99 | 100 | if (buf->server_id != packet->server_id){ 101 | connected = false; 102 | delete r; 103 | return nullptr; 104 | } 105 | 106 | if (extra) { 107 | *extra = packet->extra; 108 | } 109 | 110 | if (buf->sync(VISIONBUF_SYNC_TO_DEVICE) != 0) { 111 | LOGE("Failed to sync buffer"); 112 | } 113 | 114 | delete r; 115 | return buf; 116 | } 117 | 118 | std::set VisionIpcClient::getAvailableStreams(const std::string &name, bool blocking) { 119 | int socket_fd = connect_to_vipc_server(name, blocking); 120 | if (socket_fd < 0) { 121 | return {}; 122 | } 123 | // Send VISION_STREAM_MAX to server to request available streams 124 | int request = VISION_STREAM_MAX; 125 | int r = ipc_sendrecv_with_fds(true, socket_fd, &request, sizeof(request), nullptr, 0, nullptr); 126 | assert(r == sizeof(request)); 127 | 128 | VisionStreamType available_streams[VISION_STREAM_MAX] = {}; 129 | r = ipc_sendrecv_with_fds(false, socket_fd, &available_streams, sizeof(available_streams), nullptr, 0, nullptr); 130 | if (r < 0) { 131 | // only expected error is server shutting down 132 | assert(errno == ECONNRESET); 133 | close(socket_fd); 134 | return {}; 135 | } 136 | 137 | assert(r % sizeof(VisionStreamType) == 0); 138 | close(socket_fd); 139 | return std::set(available_streams, available_streams + r / sizeof(VisionStreamType)); 140 | } 141 | 142 | VisionIpcClient::~VisionIpcClient(){ 143 | for (size_t i = 0; i < num_buffers; i++){ 144 | if (buffers[i].free() != 0) { 145 | LOGE("Failed to free buffer %zu", i); 146 | } 147 | } 148 | 149 | delete sock; 150 | delete poller; 151 | delete msg_ctx; 152 | } 153 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc_client.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "msgq/ipc.h" 7 | #include "msgq/visionipc/visionbuf.h" 8 | 9 | 10 | class VisionIpcClient { 11 | private: 12 | std::string name; 13 | Context * msg_ctx; 14 | SubSocket * sock; 15 | Poller * poller; 16 | 17 | cl_device_id device_id = nullptr; 18 | cl_context ctx = nullptr; 19 | 20 | public: 21 | bool connected = false; 22 | VisionStreamType type; 23 | int num_buffers = 0; 24 | VisionBuf buffers[VISIONIPC_MAX_FDS]; 25 | VisionIpcClient(std::string name, VisionStreamType type, bool conflate, cl_device_id device_id=nullptr, cl_context ctx=nullptr); 26 | ~VisionIpcClient(); 27 | VisionBuf * recv(VisionIpcBufExtra * extra=nullptr, const int timeout_ms=100); 28 | bool connect(bool blocking=true); 29 | bool is_connected() { return connected; } 30 | static std::set getAvailableStreams(const std::string &name, bool blocking = true); 31 | }; 32 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc_pyx.pxd: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | #cython: language_level=3 3 | 4 | from .visionipc cimport VisionBuf as cppVisionBuf 5 | from .visionipc cimport cl_device_id, cl_context 6 | 7 | cdef class CLContext: 8 | cdef cl_device_id device_id 9 | cdef cl_context context 10 | 11 | cdef class VisionBuf: 12 | cdef cppVisionBuf * buf 13 | 14 | @staticmethod 15 | cdef create(cppVisionBuf*) 16 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc_pyx.pyx: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | # cython: c_string_encoding=ascii, language_level=3 3 | 4 | import sys 5 | import numpy as np 6 | cimport numpy as cnp 7 | from cython.view cimport array 8 | from libc.string cimport memcpy 9 | from libc.stdint cimport uint32_t, uint64_t 10 | from libcpp cimport bool 11 | from libcpp.string cimport string 12 | 13 | from .visionipc cimport VisionIpcServer as cppVisionIpcServer 14 | from .visionipc cimport VisionIpcClient as cppVisionIpcClient 15 | from .visionipc cimport VisionBuf as cppVisionBuf 16 | from .visionipc cimport VisionIpcBufExtra 17 | from .visionipc cimport get_endpoint_name as cpp_get_endpoint_name 18 | 19 | 20 | def get_endpoint_name(string name, VisionStreamType stream): 21 | return cpp_get_endpoint_name(name, stream).decode('utf-8') 22 | 23 | 24 | cpdef enum VisionStreamType: 25 | VISION_STREAM_ROAD 26 | VISION_STREAM_DRIVER 27 | VISION_STREAM_WIDE_ROAD 28 | VISION_STREAM_MAP 29 | 30 | 31 | cdef class VisionBuf: 32 | @staticmethod 33 | cdef create(cppVisionBuf * cbuf): 34 | buf = VisionBuf() 35 | buf.buf = cbuf 36 | return buf 37 | 38 | @property 39 | def data(self): 40 | return np.asarray( self.buf.addr) 41 | 42 | @property 43 | def width(self): 44 | return self.buf.width 45 | 46 | @property 47 | def height(self): 48 | return self.buf.height 49 | 50 | @property 51 | def stride(self): 52 | return self.buf.stride 53 | 54 | @property 55 | def uv_offset(self): 56 | return self.buf.uv_offset 57 | 58 | @property 59 | def idx(self): 60 | return self.buf.idx 61 | 62 | @property 63 | def fd(self): 64 | return self.buf.fd 65 | 66 | 67 | cdef class VisionIpcServer: 68 | cdef cppVisionIpcServer * server 69 | 70 | def __init__(self, string name): 71 | self.server = new cppVisionIpcServer(name, NULL, NULL) 72 | 73 | def create_buffers(self, VisionStreamType tp, size_t num_buffers, size_t width, size_t height): 74 | self.server.create_buffers(tp, num_buffers, width, height) 75 | 76 | def create_buffers_with_sizes(self, VisionStreamType tp, size_t num_buffers, size_t width, size_t height, size_t size, size_t stride, size_t uv_offset): 77 | self.server.create_buffers_with_sizes(tp, num_buffers, width, height, size, stride, uv_offset) 78 | 79 | def send(self, VisionStreamType tp, const unsigned char[:] data, uint32_t frame_id=0, uint64_t timestamp_sof=0, uint64_t timestamp_eof=0): 80 | cdef cppVisionBuf * buf = self.server.get_buffer(tp) 81 | 82 | # Populate buffer 83 | assert buf.len == len(data) 84 | memcpy(buf.addr, &data[0], len(data)) 85 | buf.set_frame_id(frame_id) 86 | 87 | cdef VisionIpcBufExtra extra 88 | extra.frame_id = frame_id 89 | extra.timestamp_sof = timestamp_sof 90 | extra.timestamp_eof = timestamp_eof 91 | 92 | self.server.send(buf, &extra, False) 93 | 94 | def start_listener(self): 95 | self.server.start_listener() 96 | 97 | def __dealloc__(self): 98 | del self.server 99 | 100 | 101 | cdef class VisionIpcClient: 102 | cdef cppVisionIpcClient * client 103 | cdef VisionIpcBufExtra extra 104 | 105 | def __cinit__(self, string name, VisionStreamType stream, bool conflate, CLContext context = None): 106 | if context: 107 | self.client = new cppVisionIpcClient(name, stream, conflate, context.device_id, context.context) 108 | else: 109 | self.client = new cppVisionIpcClient(name, stream, conflate, NULL, NULL) 110 | 111 | def __dealloc__(self): 112 | del self.client 113 | 114 | @property 115 | def width(self): 116 | return self.client.buffers[0].width if self.client.num_buffers else None 117 | 118 | @property 119 | def height(self): 120 | return self.client.buffers[0].height if self.client.num_buffers else None 121 | 122 | @property 123 | def stride(self): 124 | return self.client.buffers[0].stride if self.client.num_buffers else None 125 | 126 | @property 127 | def uv_offset(self): 128 | return self.client.buffers[0].uv_offset if self.client.num_buffers else None 129 | 130 | @property 131 | def buffer_len(self): 132 | return self.client.buffers[0].len if self.client.num_buffers else None 133 | 134 | @property 135 | def num_buffers(self): 136 | return self.client.num_buffers 137 | 138 | @property 139 | def frame_id(self): 140 | return self.extra.frame_id 141 | 142 | @property 143 | def timestamp_sof(self): 144 | return self.extra.timestamp_sof 145 | 146 | @property 147 | def timestamp_eof(self): 148 | return self.extra.timestamp_eof 149 | 150 | @property 151 | def valid(self): 152 | return self.extra.valid 153 | 154 | def recv(self, int timeout_ms=100): 155 | buf = self.client.recv(&self.extra, timeout_ms) 156 | if not buf: 157 | return None 158 | return VisionBuf.create(buf) 159 | 160 | def connect(self, bool blocking): 161 | return self.client.connect(blocking) 162 | 163 | def is_connected(self): 164 | return self.client.is_connected() 165 | 166 | @staticmethod 167 | def available_streams(string name, bool block): 168 | return cppVisionIpcClient.getAvailableStreams(name, block) 169 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc_server.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include "msgq/ipc.h" 12 | #include "msgq/visionipc/visionipc.h" 13 | #include "msgq/visionipc/visionipc_server.h" 14 | #include "msgq/logger/logger.h" 15 | 16 | std::string get_endpoint_name(std::string name, VisionStreamType type){ 17 | if (messaging_use_zmq()){ 18 | assert(name == "camerad" || name == "navd"); 19 | return std::to_string(9000 + static_cast(type)); 20 | } else { 21 | return "visionipc_" + name + "_" + std::to_string(type); 22 | } 23 | } 24 | 25 | std::string get_ipc_path(const std::string& name) { 26 | std::string path = "/tmp/"; 27 | if (char* prefix = std::getenv("OPENPILOT_PREFIX")) { 28 | path += std::string(prefix) + "_"; 29 | } 30 | return path + "visionipc_" + name; 31 | } 32 | 33 | VisionIpcServer::VisionIpcServer(std::string name, cl_device_id device_id, cl_context ctx) : name(name), device_id(device_id), ctx(ctx) { 34 | msg_ctx = Context::create(); 35 | 36 | std::random_device rd("/dev/urandom"); 37 | std::uniform_int_distribution distribution(0, std::numeric_limits::max()); 38 | server_id = distribution(rd); 39 | } 40 | 41 | void VisionIpcServer::create_buffers(VisionStreamType type, size_t num_buffers, size_t width, size_t height){ 42 | // TODO: assert that this type is not created yet 43 | assert(num_buffers < VISIONIPC_MAX_FDS); 44 | 45 | size_t size = 0; 46 | size_t stride = 0; 47 | size_t uv_offset = 0; 48 | 49 | size = width * height * 3 / 2; 50 | stride = width; 51 | uv_offset = width * height; 52 | 53 | create_buffers_with_sizes(type, num_buffers, width, height, size, stride, uv_offset); 54 | } 55 | 56 | void VisionIpcServer::create_buffers_with_sizes(VisionStreamType type, size_t num_buffers, size_t width, size_t height, size_t size, size_t stride, size_t uv_offset) { 57 | // Create map + alloc requested buffers 58 | for (size_t i = 0; i < num_buffers; i++){ 59 | VisionBuf* buf = new VisionBuf(); 60 | buf->allocate(size); 61 | buf->idx = i; 62 | buf->type = type; 63 | 64 | if (device_id) buf->init_cl(device_id, ctx); 65 | 66 | buf->init_yuv(width, height, stride, uv_offset); 67 | 68 | buffers[type].push_back(buf); 69 | } 70 | 71 | cur_idx[type] = 0; 72 | 73 | // Create msgq publisher for each of the `name` + type combos 74 | // TODO: compute port number directly if using zmq 75 | sockets[type] = PubSocket::create(msg_ctx, get_endpoint_name(name, type), false); 76 | } 77 | 78 | 79 | void VisionIpcServer::start_listener(){ 80 | listener_thread = std::thread(&VisionIpcServer::listener, this); 81 | } 82 | 83 | 84 | void VisionIpcServer::listener(){ 85 | std::cout << "Starting listener for: " << name << std::endl; 86 | 87 | const std::string ipc_path = get_ipc_path(name); 88 | int sock = ipc_bind(ipc_path.c_str()); 89 | assert(sock >= 0); 90 | 91 | while (!should_exit){ 92 | // Wait for incoming connection 93 | struct pollfd polls[1] = {{0}}; 94 | polls[0].fd = sock; 95 | polls[0].events = POLLIN; 96 | 97 | int ret = poll(polls, 1, 100); 98 | if (ret < 0) { 99 | if (errno == EINTR || errno == EAGAIN) continue; 100 | std::cout << "poll failed, stopping listener" << std::endl; 101 | break; 102 | } 103 | 104 | if (should_exit) break; 105 | if (!polls[0].revents) { 106 | continue; 107 | } 108 | 109 | // Handle incoming request 110 | int fd = accept(sock, NULL, NULL); 111 | assert(fd >= 0); 112 | 113 | VisionStreamType type = VisionStreamType::VISION_STREAM_MAX; 114 | int r = ipc_sendrecv_with_fds(false, fd, &type, sizeof(type), nullptr, 0, nullptr); 115 | assert(r == sizeof(type)); 116 | 117 | // send available stream types 118 | if (type == VisionStreamType::VISION_STREAM_MAX) { 119 | std::vector available_stream_types; 120 | for (auto& [stream_type, _] : buffers) { 121 | available_stream_types.push_back(stream_type); 122 | } 123 | r = ipc_sendrecv_with_fds(true, fd, available_stream_types.data(), available_stream_types.size() * sizeof(VisionStreamType), nullptr, 0, nullptr); 124 | assert(r == available_stream_types.size() * sizeof(VisionStreamType)); 125 | close(fd); 126 | continue; 127 | } 128 | 129 | if (buffers.count(type) <= 0) { 130 | std::cout << "got request for invalid buffer type: " << type << std::endl; 131 | close(fd); 132 | continue; 133 | } 134 | 135 | int fds[VISIONIPC_MAX_FDS]; 136 | int num_fds = buffers[type].size(); 137 | VisionBuf bufs[VISIONIPC_MAX_FDS]; 138 | 139 | for (int i = 0; i < num_fds; i++){ 140 | fds[i] = buffers[type][i]->fd; 141 | bufs[i] = *buffers[type][i]; 142 | 143 | // Remove some private openCL/ion metadata 144 | bufs[i].buf_cl = 0; 145 | bufs[i].copy_q = 0; 146 | bufs[i].handle = 0; 147 | 148 | bufs[i].server_id = server_id; 149 | } 150 | 151 | r = ipc_sendrecv_with_fds(true, fd, &bufs, sizeof(VisionBuf) * num_fds, fds, num_fds, nullptr); 152 | 153 | close(fd); 154 | } 155 | 156 | std::cout << "Stopping listener for: " << name << std::endl; 157 | close(sock); 158 | unlink(ipc_path.c_str()); 159 | } 160 | 161 | 162 | 163 | VisionBuf * VisionIpcServer::get_buffer(VisionStreamType type, int idx){ 164 | // Do we want to keep track if the buffer has been sent out yet and warn user? 165 | assert(buffers.count(type)); 166 | auto b = buffers[type]; 167 | if (idx < 0) { 168 | idx = cur_idx[type]++ % b.size(); 169 | } else { 170 | assert(idx < b.size() && idx >= 0); 171 | cur_idx[type] = idx; 172 | } 173 | return b[idx]; 174 | } 175 | 176 | void VisionIpcServer::send(VisionBuf * buf, VisionIpcBufExtra * extra, bool sync){ 177 | if (sync) { 178 | if (buf->sync(VISIONBUF_SYNC_FROM_DEVICE) != 0) { 179 | LOGE("Failed to sync buffer"); 180 | } 181 | } 182 | assert(buffers.count(buf->type)); 183 | assert(buf->idx < buffers[buf->type].size()); 184 | 185 | // Send over correct msgq socket 186 | VisionIpcPacket packet = {0}; 187 | packet.server_id = server_id; 188 | packet.idx = buf->idx; 189 | packet.extra = *extra; 190 | 191 | sockets[buf->type]->send((char*)&packet, sizeof(packet)); 192 | } 193 | 194 | VisionIpcServer::~VisionIpcServer(){ 195 | should_exit = true; 196 | listener_thread.join(); 197 | 198 | // VisionBuf cleanup 199 | for (auto const& [type, buf] : buffers) { 200 | for (VisionBuf* b : buf){ 201 | if (b->free() != 0) { 202 | LOGE("Failed to free buffer"); 203 | } 204 | delete b; 205 | } 206 | } 207 | 208 | // Messaging cleanup 209 | for (auto const& [type, sock] : sockets) { 210 | delete sock; 211 | } 212 | delete msg_ctx; 213 | } 214 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc_server.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "msgq/ipc.h" 9 | #include "msgq/visionipc/visionbuf.h" 10 | 11 | std::string get_endpoint_name(std::string name, VisionStreamType type); 12 | std::string get_ipc_path(const std::string &name); 13 | 14 | class VisionIpcServer { 15 | private: 16 | cl_device_id device_id = nullptr; 17 | cl_context ctx = nullptr; 18 | uint64_t server_id; 19 | 20 | std::atomic should_exit = false; 21 | std::string name; 22 | std::thread listener_thread; 23 | 24 | std::map > cur_idx; 25 | std::map > buffers; 26 | 27 | Context * msg_ctx; 28 | std::map sockets; 29 | 30 | void listener(void); 31 | 32 | public: 33 | VisionIpcServer(std::string name, cl_device_id device_id=nullptr, cl_context ctx=nullptr); 34 | ~VisionIpcServer(); 35 | 36 | VisionBuf * get_buffer(VisionStreamType type, int idx = -1); 37 | 38 | void create_buffers(VisionStreamType type, size_t num_buffers, size_t width, size_t height); 39 | void create_buffers_with_sizes(VisionStreamType type, size_t num_buffers, size_t width, size_t height, size_t size, size_t stride, size_t uv_offset); 40 | void send(VisionBuf * buf, VisionIpcBufExtra * extra, bool sync=true); 41 | void start_listener(); 42 | }; 43 | -------------------------------------------------------------------------------- /msgq/visionipc/visionipc_tests.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "catch2/catch.hpp" 5 | 6 | #include "msgq/visionipc/visionipc_server.h" 7 | #include "msgq/visionipc/visionipc_client.h" 8 | 9 | 10 | static void zmq_sleep(int milliseconds=1000){ 11 | if (messaging_use_zmq()){ 12 | std::this_thread::sleep_for(std::chrono::milliseconds(milliseconds)); 13 | } 14 | } 15 | 16 | TEST_CASE("Connecting"){ 17 | VisionIpcServer server("camerad"); 18 | server.create_buffers(VISION_STREAM_ROAD, 1, 100, 100); 19 | server.start_listener(); 20 | 21 | VisionIpcClient client = VisionIpcClient("camerad", VISION_STREAM_ROAD, false); 22 | REQUIRE(client.connect()); 23 | 24 | REQUIRE(client.connected); 25 | } 26 | 27 | TEST_CASE("getAvailableStreams"){ 28 | VisionIpcServer server("camerad"); 29 | server.create_buffers(VISION_STREAM_ROAD, 1, 100, 100); 30 | server.create_buffers(VISION_STREAM_WIDE_ROAD, 1, 100, 100); 31 | server.start_listener(); 32 | auto available_streams = VisionIpcClient::getAvailableStreams("camerad"); 33 | REQUIRE(available_streams.size() == 2); 34 | REQUIRE(available_streams.count(VISION_STREAM_ROAD) == 1); 35 | REQUIRE(available_streams.count(VISION_STREAM_WIDE_ROAD) == 1); 36 | } 37 | 38 | TEST_CASE("Check buffers"){ 39 | size_t width = 100, height = 200, num_buffers = 5; 40 | VisionIpcServer server("camerad"); 41 | server.create_buffers(VISION_STREAM_ROAD, num_buffers, width, height); 42 | server.start_listener(); 43 | 44 | VisionIpcClient client = VisionIpcClient("camerad", VISION_STREAM_ROAD, false); 45 | REQUIRE(client.connect()); 46 | 47 | REQUIRE(client.buffers[0].width == width); 48 | REQUIRE(client.buffers[0].height == height); 49 | REQUIRE(client.buffers[0].len); 50 | REQUIRE(client.num_buffers == num_buffers); 51 | } 52 | 53 | TEST_CASE("Send single buffer"){ 54 | VisionIpcServer server("camerad"); 55 | server.create_buffers(VISION_STREAM_ROAD, 1, 100, 100); 56 | server.start_listener(); 57 | 58 | VisionIpcClient client = VisionIpcClient("camerad", VISION_STREAM_ROAD, false); 59 | REQUIRE(client.connect()); 60 | zmq_sleep(); 61 | 62 | VisionBuf * buf = server.get_buffer(VISION_STREAM_ROAD); 63 | REQUIRE(buf != nullptr); 64 | 65 | *((uint64_t*)buf->addr) = 1234; 66 | 67 | VisionIpcBufExtra extra = {0}; 68 | extra.frame_id = 1337; 69 | buf->set_frame_id(extra.frame_id); 70 | 71 | server.send(buf, &extra); 72 | 73 | VisionIpcBufExtra extra_recv = {0}; 74 | VisionBuf * recv_buf = client.recv(&extra_recv); 75 | REQUIRE(recv_buf != nullptr); 76 | REQUIRE(*(uint64_t*)recv_buf->addr == 1234); 77 | REQUIRE(extra_recv.frame_id == extra.frame_id); 78 | REQUIRE(recv_buf->get_frame_id() == extra.frame_id); 79 | } 80 | 81 | 82 | TEST_CASE("Test no conflate"){ 83 | VisionIpcServer server("camerad"); 84 | server.create_buffers(VISION_STREAM_ROAD, 1, 100, 100); 85 | server.start_listener(); 86 | 87 | VisionIpcClient client = VisionIpcClient("camerad", VISION_STREAM_ROAD, false); 88 | REQUIRE(client.connect()); 89 | zmq_sleep(); 90 | 91 | VisionBuf * buf = server.get_buffer(VISION_STREAM_ROAD); 92 | REQUIRE(buf != nullptr); 93 | 94 | VisionIpcBufExtra extra = {0}; 95 | extra.frame_id = 1; 96 | server.send(buf, &extra); 97 | extra.frame_id = 2; 98 | server.send(buf, &extra); 99 | 100 | VisionIpcBufExtra extra_recv = {0}; 101 | VisionBuf * recv_buf = client.recv(&extra_recv); 102 | REQUIRE(recv_buf != nullptr); 103 | REQUIRE(extra_recv.frame_id == 1); 104 | 105 | recv_buf = client.recv(&extra_recv); 106 | REQUIRE(recv_buf != nullptr); 107 | REQUIRE(extra_recv.frame_id == 2); 108 | } 109 | 110 | TEST_CASE("Test conflate"){ 111 | VisionIpcServer server("camerad"); 112 | server.create_buffers(VISION_STREAM_ROAD, 1, 100, 100); 113 | server.start_listener(); 114 | 115 | VisionIpcClient client = VisionIpcClient("camerad", VISION_STREAM_ROAD, true); 116 | REQUIRE(client.connect()); 117 | zmq_sleep(); 118 | 119 | VisionBuf * buf = server.get_buffer(VISION_STREAM_ROAD); 120 | REQUIRE(buf != nullptr); 121 | 122 | VisionIpcBufExtra extra = {0}; 123 | extra.frame_id = 1; 124 | server.send(buf, &extra); 125 | extra.frame_id = 2; 126 | server.send(buf, &extra); 127 | 128 | VisionIpcBufExtra extra_recv = {0}; 129 | VisionBuf * recv_buf = client.recv(&extra_recv); 130 | REQUIRE(recv_buf != nullptr); 131 | REQUIRE(extra_recv.frame_id == 2); 132 | 133 | recv_buf = client.recv(&extra_recv); 134 | REQUIRE(recv_buf == nullptr); 135 | } 136 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # https://beta.ruff.rs/docs/configuration/#using-pyprojecttoml 2 | [tool.ruff] 3 | lint.select = ["E", "F", "W", "PIE", "C4", "ISC", "RUF100", "A"] 4 | lint.ignore = ["W292", "E741", "E402", "C408", "ISC003"] 5 | lint.flake8-implicit-str-concat.allow-multiline=false 6 | 7 | line-length = 160 8 | target-version="py311" 9 | 10 | [tool.ruff.lint.flake8-tidy-imports.banned-api] 11 | "pytest.main".msg = "pytest.main requires special handling that is easy to mess up!" 12 | "unittest".msg = "Use pytest" 13 | 14 | [tool.mypy] 15 | # third-party packages 16 | ignore_missing_imports=true 17 | 18 | # helpful warnings 19 | warn_redundant_casts=true 20 | warn_unreachable=true 21 | warn_unused_ignores=true 22 | 23 | # restrict dynamic typing 24 | warn_return_any=true 25 | check_untyped_defs=true 26 | 27 | [tool.pytest.ini_options] 28 | addopts = "--durations=10" 29 | testpaths = [ 30 | "msgq/tests", 31 | "msgq/visionipc/tests", 32 | ] 33 | -------------------------------------------------------------------------------- /site_scons/site_tools/cython.py: -------------------------------------------------------------------------------- 1 | import re 2 | import SCons 3 | from SCons.Action import Action 4 | from SCons.Scanner import Scanner 5 | 6 | pyx_from_import_re = re.compile(r'^from\s+(\S+)\s+cimport', re.M) 7 | pyx_import_re = re.compile(r'^cimport\s+(\S+)', re.M) 8 | cdef_import_re = re.compile(r'^cdef extern from\s+.(\S+).:', re.M) 9 | 10 | 11 | def pyx_scan(node, env, path, arg=None): 12 | contents = node.get_text_contents() 13 | 14 | # from cimport ... 15 | matches = pyx_from_import_re.findall(contents) 16 | # cimport 17 | matches += pyx_import_re.findall(contents) 18 | 19 | # Modules can be either .pxd or .pyx files 20 | files = [m.replace('.', '/') + '.pxd' for m in matches] 21 | files += [m.replace('.', '/') + '.pyx' for m in matches] 22 | 23 | # cdef extern from 24 | files += cdef_import_re.findall(contents) 25 | 26 | # Handle relative imports 27 | cur_dir = str(node.get_dir()) 28 | files = [cur_dir + f if f.startswith('/') else f for f in files] 29 | 30 | # Filter out non-existing files (probably system imports) 31 | files = [f for f in files if env.File(f).exists()] 32 | return env.File(files) 33 | 34 | 35 | pyxscanner = Scanner(function=pyx_scan, skeys=['.pyx', '.pxd'], recursive=True) 36 | cythonAction = Action("$CYTHONCOM") 37 | 38 | 39 | def create_builder(env): 40 | try: 41 | cython = env['BUILDERS']['Cython'] 42 | except KeyError: 43 | cython = SCons.Builder.Builder( 44 | action=cythonAction, 45 | emitter={}, 46 | suffix=cython_suffix_emitter, 47 | single_source=1 48 | ) 49 | env.Append(SCANNERS=pyxscanner) 50 | env['BUILDERS']['Cython'] = cython 51 | return cython 52 | 53 | def cython_suffix_emitter(env, source): 54 | return "$CYTHONCFILESUFFIX" 55 | 56 | def generate(env): 57 | env["CYTHON"] = "cythonize" 58 | env["CYTHONCOM"] = "$CYTHON $CYTHONFLAGS $SOURCE" 59 | env["CYTHONCFILESUFFIX"] = ".cpp" 60 | 61 | c_file, _ = SCons.Tool.createCFileBuilders(env) 62 | 63 | c_file.suffix['.pyx'] = cython_suffix_emitter 64 | c_file.add_action('.pyx', cythonAction) 65 | 66 | c_file.suffix['.py'] = cython_suffix_emitter 67 | c_file.add_action('.py', cythonAction) 68 | 69 | create_builder(env) 70 | 71 | def exists(env): 72 | return True 73 | --------------------------------------------------------------------------------