├── .clang-format ├── .githooks ├── pre-commit └── readme-template.md ├── .github └── workflows │ ├── ci-conan.yml │ ├── ci-coverage.yml │ ├── ci-emscripten.yml │ ├── ci-fedora.yml │ ├── ci-macos.yml │ ├── ci-opensuse.yml │ ├── ci-ubuntu.yml │ └── ci-windows.yml ├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── LICENSE ├── Makefile ├── README.md ├── examples ├── CMakeLists.txt ├── coro_condition_variable.cpp ├── coro_event.cpp ├── coro_generator.cpp ├── coro_http_200_ok_server.cpp ├── coro_io_scheduler.cpp ├── coro_latch.cpp ├── coro_mutex.cpp ├── coro_queue.cpp ├── coro_ring_buffer.cpp ├── coro_semaphore.cpp ├── coro_shared_mutex.cpp ├── coro_sync_wait.cpp ├── coro_task.cpp ├── coro_tcp_echo_server.cpp ├── coro_thread_pool.cpp ├── coro_when_all.cpp └── coro_when_any.cpp ├── include └── coro │ ├── attribute.hpp │ ├── concepts │ ├── awaitable.hpp │ ├── buffer.hpp │ ├── executor.hpp │ ├── promise.hpp │ └── range_of.hpp │ ├── condition_variable.hpp │ ├── coro.hpp │ ├── default_executor.hpp │ ├── detail │ ├── poll_info.hpp │ ├── task_self_deleting.hpp │ ├── tl_expected.hpp │ └── void_value.hpp │ ├── event.hpp │ ├── expected.hpp │ ├── fd.hpp │ ├── generator.hpp │ ├── io_scheduler.hpp │ ├── latch.hpp │ ├── mutex.hpp │ ├── net │ ├── connect.hpp │ ├── dns │ │ └── resolver.hpp │ ├── hostname.hpp │ ├── ip_address.hpp │ ├── recv_status.hpp │ ├── send_status.hpp │ ├── socket.hpp │ ├── tcp │ │ ├── client.hpp │ │ └── server.hpp │ ├── tls │ │ ├── client.hpp │ │ ├── connection_status.hpp │ │ ├── context.hpp │ │ ├── recv_status.hpp │ │ ├── send_status.hpp │ │ └── server.hpp │ └── udp │ │ └── peer.hpp │ ├── poll.hpp │ ├── queue.hpp │ ├── ring_buffer.hpp │ ├── semaphore.hpp │ ├── shared_mutex.hpp │ ├── sync_wait.hpp │ ├── task.hpp │ ├── thread_pool.hpp │ ├── time.hpp │ ├── when_all.hpp │ └── when_any.hpp ├── libcoro.pc.in ├── src ├── condition_variable.cpp ├── default_executor.cpp ├── detail │ └── task_self_deleting.cpp ├── event.cpp ├── io_scheduler.cpp ├── mutex.cpp ├── net │ ├── connect.cpp │ ├── dns │ │ └── resolver.cpp │ ├── ip_address.cpp │ ├── recv_status.cpp │ ├── send_status.cpp │ ├── socket.cpp │ ├── tcp │ │ ├── client.cpp │ │ └── server.cpp │ ├── tls │ │ ├── client.cpp │ │ ├── connection_status.cpp │ │ ├── context.cpp │ │ ├── recv_status.cpp │ │ ├── send_status.cpp │ │ └── server.cpp │ └── udp │ │ └── peer.cpp ├── poll.cpp ├── semaphore.cpp ├── sync_wait.cpp └── thread_pool.cpp └── test ├── CMakeLists.txt ├── bench.cpp ├── catch_amalgamated.cpp ├── catch_amalgamated.hpp ├── catch_extensions.cpp ├── catch_extensions.hpp ├── main.cpp ├── net ├── test_dns_resolver.cpp ├── test_ip_address.cpp ├── test_tcp_server.cpp ├── test_tls_server.cpp └── test_udp_peers.cpp ├── test_condition_variable.cpp ├── test_event.cpp ├── test_generator.cpp ├── test_io_scheduler.cpp ├── test_latch.cpp ├── test_mutex.cpp ├── test_queue.cpp ├── test_ring_buffer.cpp ├── test_semaphore.cpp ├── test_shared_mutex.cpp ├── test_sync_wait.cpp ├── test_task.cpp ├── test_thread_pool.cpp ├── test_when_all.cpp └── test_when_any.cpp /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | AccessModifierOffset: -4 3 | AlignAfterOpenBracket: AlwaysBreak 4 | AlignConsecutiveMacros: 'true' 5 | AlignConsecutiveAssignments: 'true' 6 | AlignConsecutiveDeclarations: 'true' 7 | AlignEscapedNewlines: Right 8 | AlignOperands: 'true' 9 | AlignTrailingComments: 'true' 10 | AllowAllArgumentsOnNextLine: 'true' 11 | AllowAllConstructorInitializersOnNextLine: 'false' 12 | AllowAllParametersOfDeclarationOnNextLine: 'true' 13 | AllowShortBlocksOnASingleLine: 'true' 14 | AllowShortCaseLabelsOnASingleLine: 'false' 15 | AllowShortFunctionsOnASingleLine: InlineOnly 16 | AllowShortIfStatementsOnASingleLine: Never 17 | AllowShortLambdasOnASingleLine: All 18 | AllowShortLoopsOnASingleLine: 'false' 19 | AlwaysBreakAfterReturnType: None 20 | AlwaysBreakBeforeMultilineStrings: 'true' 21 | AlwaysBreakTemplateDeclarations: 'Yes' 22 | BinPackArguments: 'false' 23 | BinPackParameters: 'false' 24 | BreakAfterJavaFieldAnnotations: 'true' 25 | BreakBeforeBinaryOperators: None 26 | BreakBeforeBraces: Allman 27 | BreakBeforeTernaryOperators: 'true' 28 | BreakConstructorInitializers: BeforeColon 29 | BreakInheritanceList: BeforeColon 30 | BreakStringLiterals: 'false' 31 | ColumnLimit: '120' 32 | CompactNamespaces: 'false' 33 | ConstructorInitializerAllOnOneLineOrOnePerLine: 'true' 34 | ConstructorInitializerIndentWidth: '4' 35 | ContinuationIndentWidth: '4' 36 | Cpp11BracedListStyle: 'true' 37 | FixNamespaceComments: 'true' 38 | IncludeBlocks: Preserve 39 | IndentCaseLabels: 'true' 40 | IndentPPDirectives: BeforeHash 41 | IndentWidth: '4' 42 | IndentWrappedFunctionNames: 'true' 43 | KeepEmptyLinesAtTheStartOfBlocks: 'false' 44 | Language: Cpp 45 | MaxEmptyLinesToKeep: '1' 46 | NamespaceIndentation: None 47 | PointerAlignment: Left 48 | ReflowComments: 'true' 49 | SortIncludes: 'true' 50 | SortUsingDeclarations: 'true' 51 | SpaceAfterCStyleCast: 'false' 52 | SpaceAfterLogicalNot: 'false' 53 | SpaceAfterTemplateKeyword: 'false' 54 | SpaceBeforeAssignmentOperators: 'true' 55 | SpaceBeforeCpp11BracedList: 'false' 56 | SpaceBeforeCtorInitializerColon: 'true' 57 | SpaceBeforeInheritanceColon: 'true' 58 | SpaceBeforeParens: ControlStatements 59 | SpaceBeforeRangeBasedForLoopColon: 'true' 60 | SpaceInEmptyParentheses: 'false' 61 | SpacesInAngles: 'false' 62 | SpacesInCStyleCastParentheses: 'false' 63 | SpacesInContainerLiterals: 'false' 64 | SpacesInParentheses: 'false' 65 | SpacesInSquareBrackets: 'false' 66 | Standard: Cpp11 67 | UseTab: Never 68 | ... 69 | -------------------------------------------------------------------------------- /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FILE_EXTS=".c .h .cpp .hpp .cc .hh .cxx .tcc" 4 | 5 | # Determins if a file has the right extension to be clang-format'ed. 6 | should_clang_format() { 7 | local filename=$(basename "$1") 8 | local extension=".${filename##*.}" 9 | local ext 10 | 11 | local result=0 12 | 13 | # Ignore the test/catch*.hpp file 14 | if [[ "$1" != *"catch"* ]]; then 15 | for ext in $FILE_EXTS; do 16 | # Otherwise, if the extension is in the array of extensions to reformat, echo 1. 17 | [[ "$ext" == "$extension" ]] && result=1 && break 18 | done 19 | fi 20 | 21 | echo $result 22 | } 23 | 24 | # Run the clang-format across the project's changed files. 25 | for file in $(git diff-index --cached --name-only HEAD); do 26 | if [ -f "${file}" ] && [ "$(should_clang_format "${file}")" != "0" ] ; then 27 | echo "clang-format ${file}" 28 | clang-format -i --style=file "${file}" 29 | git add "${file}" 30 | fi 31 | done 32 | 33 | # Update the README.md example code with the given macros. 34 | # template_contents=$(cat '.githooks/readme-template.md') 35 | cp .githooks/readme-template.md README.md 36 | 37 | # Disable patsub_replacement 38 | # 39 | # If the patsub_replacement shell option is enabled using shopt, 40 | # any unquoted instances of ‘&’ in string are replaced with the matching portion of pattern 41 | shopt -u patsub_replacement 42 | 43 | template_contents=$(cat 'README.md') 44 | example_contents=$(cat 'examples/coro_task.cpp') 45 | echo "${template_contents/\$\{EXAMPLE_CORO_TASK_CPP\}/$example_contents}" > README.md 46 | 47 | template_contents=$(cat 'README.md') 48 | example_contents=$(cat 'examples/coro_generator.cpp') 49 | echo "${template_contents/\$\{EXAMPLE_CORO_GENERATOR_CPP\}/$example_contents}" > README.md 50 | 51 | template_contents=$(cat 'README.md') 52 | example_contents=$(cat 'examples/coro_event.cpp') 53 | echo "${template_contents/\$\{EXAMPLE_CORO_EVENT_CPP\}/$example_contents}" > README.md 54 | 55 | template_contents=$(cat 'README.md') 56 | example_contents=$(cat 'examples/coro_latch.cpp') 57 | echo "${template_contents/\$\{EXAMPLE_CORO_LATCH_CPP\}/$example_contents}" > README.md 58 | 59 | template_contents=$(cat 'README.md') 60 | example_contents=$(cat 'examples/coro_mutex.cpp') 61 | echo "${template_contents/\$\{EXAMPLE_CORO_MUTEX_CPP\}/$example_contents}" > README.md 62 | 63 | template_contents=$(cat 'README.md') 64 | example_contents=$(cat 'examples/coro_thread_pool.cpp') 65 | echo "${template_contents/\$\{EXAMPLE_CORO_THREAD_POOL_CPP\}/$example_contents}" > README.md 66 | 67 | template_contents=$(cat 'README.md') 68 | example_contents=$(cat 'examples/coro_io_scheduler.cpp') 69 | echo "${template_contents/\$\{EXAMPLE_CORO_IO_SCHEDULER_CPP\}/$example_contents}" > README.md 70 | 71 | template_contents=$(cat 'README.md') 72 | example_contents=$(cat 'examples/coro_semaphore.cpp') 73 | echo "${template_contents/\$\{EXAMPLE_CORO_SEMAPHORE_CPP\}/$example_contents}" > README.md 74 | 75 | template_contents=$(cat 'README.md') 76 | example_contents=$(cat 'examples/coro_ring_buffer.cpp') 77 | echo "${template_contents/\$\{EXAMPLE_CORO_RING_BUFFER_CPP\}/$example_contents}" > README.md 78 | 79 | template_contents=$(cat 'README.md') 80 | example_contents=$(cat 'examples/coro_queue.cpp') 81 | echo "${template_contents/\$\{EXAMPLE_CORO_QUEUE_CPP\}/$example_contents}" > README.md 82 | 83 | template_contents=$(cat 'README.md') 84 | example_contents=$(cat 'examples/coro_condition_variable.cpp') 85 | echo "${template_contents/\$\{EXAMPLE_CORO_CONDITION_VARIABLE_CPP\}/$example_contents}" > README.md 86 | 87 | template_contents=$(cat 'README.md') 88 | example_contents=$(cat 'examples/coro_shared_mutex.cpp') 89 | echo "${template_contents/\$\{EXAMPLE_CORO_SHARED_MUTEX_CPP\}/$example_contents}" > README.md 90 | 91 | template_contents=$(cat 'README.md') 92 | example_contents=$(cat 'examples/coro_sync_wait.cpp') 93 | echo "${template_contents/\$\{EXAMPLE_CORO_SYNC_WAIT\}/$example_contents}" > README.md 94 | 95 | template_contents=$(cat 'README.md') 96 | example_contents=$(cat 'examples/coro_when_all.cpp') 97 | echo "${template_contents/\$\{EXAMPLE_CORO_WHEN_ALL\}/$example_contents}" > README.md 98 | 99 | template_contents=$(cat 'README.md') 100 | example_contents=$(cat 'examples/coro_when_any.cpp') 101 | echo "${template_contents/\$\{EXAMPLE_CORO_WHEN_ANY\}/$example_contents}" > README.md 102 | 103 | git add README.md 104 | -------------------------------------------------------------------------------- /.github/workflows/ci-conan.yml: -------------------------------------------------------------------------------- 1 | name: ci-conan 2 | 3 | on: [pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | ci-conan-gplusplus: 7 | name: ci-conan-g++-${{ matrix.gplusplus_version }}-shared-${{ matrix.shared }}-build-type-${{ matrix.build_type }} 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | gplusplus_version: [11] 12 | shared: ["False", "True"] 13 | build_type: ["Release", "Debug"] 14 | container: 15 | image: ubuntu:24.04 16 | env: 17 | TZ: America/New_York 18 | DEBIAN_FRONTEND: noninteractive 19 | steps: 20 | - name: Install System Dependencies 21 | run: | 22 | apt-get clean 23 | apt-get update 24 | apt install -y --no-install-recommends \ 25 | build-essential \ 26 | software-properties-common 27 | add-apt-repository ppa:ubuntu-toolchain-r/test 28 | apt-get install -y --no-install-recommends \ 29 | cmake \ 30 | git \ 31 | ninja-build \ 32 | g++-${{ matrix.gplusplus_version }} \ 33 | wget 34 | wget -q -O /tmp/conan.tar.gz https://github.com/conan-io/conan/releases/download/2.0.17/conan-linux-64.tar.gz 35 | tar -xvf /tmp/conan.tar.gz -C /usr/bin 36 | - name: Detect Conan profile 37 | run: | 38 | update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${{ matrix.gplusplus_version }} 100 39 | update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${{ matrix.gplusplus_version }} 100 40 | update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ matrix.gplusplus_version }} 100 41 | update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${{ matrix.gplusplus_version }} 100 42 | conan profile detect 43 | - name: Checkout 44 | uses: actions/checkout@v4 45 | - name: Install Conan Dependencies 46 | run: | 47 | conan install -r conancenter \ 48 | --requires=openssl/3.2.0 \ 49 | --requires=c-ares/1.22.1 \ 50 | --requires=tl-expected/1.1.0 \ 51 | -g CMakeToolchain \ 52 | -g CMakeDeps \ 53 | -of "${GITHUB_WORKSPACE}/build/conan" \ 54 | --build=missing \ 55 | -s build_type=${{ matrix.build_type }} \ 56 | -s compiler.cppstd=20 \ 57 | -o "*/*:shared=${{ matrix.shared }}" 58 | - name: Build 59 | run: | 60 | cmake -S . -B "${GITHUB_WORKSPACE}/build" \ 61 | -GNinja \ 62 | -DCMAKE_CXX_STANDARD=20 \ 63 | -DCMAKE_TOOLCHAIN_FILE=build/conan/conan_toolchain.cmake \ 64 | -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \ 65 | -DLIBCORO_EXTERNAL_DEPENDENCIES=ON \ 66 | -DLIBCORO_FEATURE_NETWORKING=ON \ 67 | -DLIBCORO_FEATURE_TLS=ON \ 68 | -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.shared }} 69 | cmake --build "${GITHUB_WORKSPACE}/build" 70 | - name: Test 71 | run: | 72 | cd build 73 | ctest -VV 74 | -------------------------------------------------------------------------------- /.github/workflows/ci-coverage.yml: -------------------------------------------------------------------------------- 1 | name: ci-coverage 2 | 3 | on: [pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | ci-coverage: 7 | name: ci-coverage 8 | runs-on: ubuntu-latest 9 | container: 10 | image: ubuntu:22.04 11 | env: 12 | TZ: America/New_York 13 | DEBIAN_FRONTEND: noninteractive 14 | steps: 15 | - name: Install Dependencies 16 | run: | 17 | apt-get update 18 | apt-get -y upgrade 19 | apt install -y build-essential software-properties-common 20 | add-apt-repository ppa:ubuntu-toolchain-r/test 21 | apt-get install -y \ 22 | cmake \ 23 | curl \ 24 | git \ 25 | ninja-build \ 26 | g++ \ 27 | libssl-dev \ 28 | lcov 29 | - name: Checkout 30 | uses: actions/checkout@v4 31 | with: 32 | submodules: recursive 33 | - name: Build 34 | run: | 35 | mkdir Debug 36 | cd Debug 37 | cmake \ 38 | -GNinja \ 39 | -DCMAKE_BUILD_TYPE=Debug \ 40 | -DCMAKE_C_COMPILER=gcc \ 41 | -DCMAKE_CXX_COMPILER=g++ \ 42 | -DLIBCORO_FEATURE_NETWORKING=ON \ 43 | -DLIBCORO_FEATURE_TLS=ON \ 44 | -DLIBCORO_CODE_COVERAGE=ON \ 45 | .. 46 | cmake --build . --config Debug 47 | - name: Coverage 48 | run: | 49 | cd Debug 50 | ctest --build-config Debug -VV 51 | gcov -o ./test/CMakeFiles/libcoro_test.dir/main.cpp.o ./test/libcoro_test 52 | lcov --include "*/include/coro/*" --include "*/src/*" --exclude "test/*" -o libcoro_tests.lcov -c -d . 53 | - name: Coveralls GitHub Action 54 | uses: coverallsapp/github-action@v2 55 | with: 56 | file: Debug/libcoro_tests.lcov 57 | format: lcov 58 | -------------------------------------------------------------------------------- /.github/workflows/ci-emscripten.yml: -------------------------------------------------------------------------------- 1 | name: ci-emscripten 2 | 3 | on: [pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | ci-emscripten-3_1_45: 7 | name: emscripten-3_1_45 8 | runs-on: ubuntu-latest 9 | container: 10 | image: ubuntu:22.04 11 | env: 12 | TZ: America/New_York 13 | DEBIAN_FRONTEND: noninteractive 14 | steps: 15 | - name: Install Dependencies 16 | run: | 17 | apt-get update 18 | apt-get -y upgrade 19 | apt install -y build-essential software-properties-common 20 | add-apt-repository ppa:ubuntu-toolchain-r/test 21 | apt-get install -y \ 22 | cmake \ 23 | git \ 24 | ninja-build \ 25 | nodejs 26 | - name: Checkout 27 | uses: actions/checkout@v4 28 | with: 29 | submodules: recursive 30 | # Must be done after checkout since it requires the directory structure to be in place. 31 | - name: Install emsdk 32 | run: | 33 | git clone https://github.com/emscripten-core/emsdk.git 34 | cd emsdk 35 | ./emsdk install 3.1.45 36 | ./emsdk activate 3.1.45 37 | - name: Build 38 | run: | 39 | cd emsdk 40 | . ./emsdk_env.sh 41 | cd .. 42 | mkdir Release 43 | cd Release 44 | emcmake cmake \ 45 | -GNinja \ 46 | -DCMAKE_BUILD_TYPE=Release \ 47 | .. 48 | cmake --build . --config Release 49 | - name: Test 50 | run: | 51 | cd emsdk 52 | . ./emsdk_env.sh 53 | cd ../Release 54 | node --experimental-wasm-threads --experimental-wasm-bulk-memory ./test/libcoro_test.js 55 | -------------------------------------------------------------------------------- /.github/workflows/ci-fedora.yml: -------------------------------------------------------------------------------- 1 | name: ci-fedora 2 | 3 | on: [pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | ci-fedora-gplusplus: 7 | name: fedora-${{ matrix.fedora_version }} 8 | runs-on: ubuntu-latest 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | fedora_version: [37, 38, 39, 40] 13 | cxx_standard: [20, 23] 14 | libcoro_feature_networking: [ {enabled: ON, tls: ON} ] 15 | libcoro_build_shared_libs: [OFF] 16 | container: 17 | image: fedora:${{ matrix.fedora_version }} 18 | steps: 19 | - name: Install Dependencies 20 | run: | 21 | sudo dnf install -y \ 22 | cmake \ 23 | git \ 24 | ninja-build \ 25 | gcc-c++ \ 26 | openssl \ 27 | openssl-devel 28 | - name: Checkout 29 | uses: actions/checkout@v4 30 | with: 31 | submodules: recursive 32 | - name: Release 33 | run: | 34 | mkdir Release 35 | cd Release 36 | cmake \ 37 | -GNinja \ 38 | -DCMAKE_BUILD_TYPE=Release \ 39 | -DCMAKE_C_COMPILER=gcc \ 40 | -DCMAKE_CXX_COMPILER=g++ \ 41 | -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ 42 | -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ 43 | -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ 44 | -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ 45 | .. 46 | cmake --build . --config Release 47 | - name: Test 48 | run: | 49 | cd Release 50 | ctest --build-config Release -VV 51 | -------------------------------------------------------------------------------- /.github/workflows/ci-macos.yml: -------------------------------------------------------------------------------- 1 | name: ci-macos 2 | 3 | on: [pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | macos: 7 | name: macos-15 8 | runs-on: macos-15 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | clang_version: [20] 13 | cxx_standard: [20, 23] 14 | libcoro_feature_networking: [{ enabled: OFF, tls: OFF }] 15 | libcoro_build_shared_libs: [OFF, ON] 16 | steps: 17 | - name: Install Dependencies 18 | run: | 19 | brew update 20 | brew install llvm@${{ matrix.clang_version }} 21 | brew install ninja 22 | - name: Checkout 23 | uses: actions/checkout@v4 24 | with: 25 | submodules: recursive 26 | - name: Release 27 | run: | 28 | brew --prefix llvm@${{ matrix.clang_version }} 29 | ls $(brew --prefix llvm@${{ matrix.clang_version }})/bin 30 | mkdir Release 31 | cd Release 32 | cmake \ 33 | -GNinja \ 34 | -DCMAKE_BUILD_TYPE=Release \ 35 | -DCMAKE_C_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ 36 | -DCMAKE_CXX_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ 37 | -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ 38 | -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ 39 | -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ 40 | -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ 41 | .. 42 | cmake --build . --config Release 43 | - name: Test 44 | run: | 45 | cd Release 46 | ctest --build-config Release -VV 47 | -------------------------------------------------------------------------------- /.github/workflows/ci-opensuse.yml: -------------------------------------------------------------------------------- 1 | name: ci-opensuse 2 | 3 | on: [pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | build-opensuse-15: 7 | name: opensuse-15-g++ 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | gplusplus_version: [10] 12 | cxx_standard: [20] 13 | libcoro_feature_networking: [ {enabled: ON, tls: ON} ] 14 | container: 15 | image: opensuse/leap:15.6 16 | steps: 17 | - name: zypper 18 | run: | 19 | zypper install -y \ 20 | cmake \ 21 | git \ 22 | ninja \ 23 | gcc${{ matrix.gplusplus_version }} \ 24 | gcc${{ matrix.gplusplus_version }}-c++ \ 25 | openssl \ 26 | openssl-devel 27 | # Cannot run higher version of checkout, node isn't backwards compatible 28 | - name: Checkout 29 | uses: actions/checkout@v2 30 | with: 31 | submodules: recursive 32 | - name: Build 33 | run: | 34 | mkdir Release 35 | cd Release 36 | cmake \ 37 | -GNinja \ 38 | -DCMAKE_BUILD_TYPE=Release \ 39 | -DCMAKE_C_COMPILER=gcc-${{ matrix.gplusplus_version }} \ 40 | -DCMAKE_CXX_COMPILER=g++-${{ matrix.gplusplus_version }} \ 41 | -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ 42 | -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ 43 | -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ 44 | .. 45 | cmake --build . --config Release 46 | - name: Test 47 | run: | 48 | cd Release 49 | ctest --build-config Release -VV 50 | 51 | -------------------------------------------------------------------------------- /.github/workflows/ci-windows.yml: -------------------------------------------------------------------------------- 1 | name: ci-windows 2 | 3 | on: [pull_request, workflow_dispatch] 4 | 5 | jobs: 6 | ci-windows-2022: 7 | name: windows-2022 8 | runs-on: windows-latest 9 | strategy: 10 | matrix: 11 | cxx_standard: [20, 23] 12 | libcoro_build_shared_libs: [OFF, ON] 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | with: 17 | submodules: recursive 18 | - name: Build 19 | run: | 20 | mkdir Release 21 | cd Release 22 | cmake -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} .. 23 | cmake --build . --config Release 24 | - name: Test 25 | run: | 26 | cd Release 27 | ctest --build-config Release -VV 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | 34 | /build/ 35 | /Debug/ 36 | /RelWithDebInfo/ 37 | /Release/ 38 | /Testing/ 39 | 40 | /.vscode/ 41 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "vendor/c-ares/c-ares"] 2 | path = vendor/c-ares/c-ares 3 | url = https://github.com/c-ares/c-ares.git 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Internal target for all build targets to call. 2 | # Cleans all build types. 3 | .PHONY: clean 4 | clean: 5 | rm -rf Debug 6 | rm -rf RelWithDebInfo 7 | rm -rf Release 8 | 9 | # Runs clang-format with the project's .clang-format. 10 | .PHONY: format 11 | format: 12 | # Inlcude *.hpp|*.h|*.cpp but ignore catch lib as well as RelWithDebInfo|Release|Debug|build 13 | find . \( -name '*.hpp' -or -name '*.h' -or -name '*.cpp' \) \ 14 | -and -not -name '*catch*' \ 15 | -and -not -iwholename '*/RelWithDebInfo/*' \ 16 | -and -not -iwholename '*/Release/*' \ 17 | -and -not -iwholename '*/Debug/*' \ 18 | -and -not -iwholename '*/build/*' \ 19 | -and -not -iwholename '*/vendor/*' \ 20 | -exec clang-format -i --style=file {} \; 21 | -------------------------------------------------------------------------------- /examples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.12) 2 | project(libcoro_examples) 3 | 4 | if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU") 5 | set(LIBCORO_EXAMPLE_OPTIONS -fcoroutines -Wall -Wextra -pipe) 6 | elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") 7 | set(LIBCORO_EXAMPLE_OPTIONS -Wall -Wextra -pipe) 8 | elseif(MSVC) 9 | set(LIBCORO_EXAMPLE_OPTIONS /W4) 10 | else() 11 | message(FATAL_ERROR "Unsupported compiler.") 12 | endif() 13 | 14 | add_executable(coro_task coro_task.cpp) 15 | target_link_libraries(coro_task PUBLIC libcoro) 16 | target_compile_options(coro_task PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 17 | 18 | add_executable(coro_generator coro_generator.cpp) 19 | target_link_libraries(coro_generator PUBLIC libcoro) 20 | target_compile_options(coro_generator PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 21 | 22 | add_executable(coro_event coro_event.cpp) 23 | target_link_libraries(coro_event PUBLIC libcoro) 24 | target_compile_options(coro_event PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 25 | 26 | if(LIBCORO_FEATURE_NETWORKING) 27 | add_executable(coro_latch coro_latch.cpp) 28 | target_link_libraries(coro_latch PUBLIC libcoro) 29 | target_compile_options(coro_latch PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 30 | endif() 31 | 32 | add_executable(coro_mutex coro_mutex.cpp) 33 | target_link_libraries(coro_mutex PUBLIC libcoro) 34 | target_compile_options(coro_mutex PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 35 | 36 | add_executable(coro_thread_pool coro_thread_pool.cpp) 37 | target_link_libraries(coro_thread_pool PUBLIC libcoro) 38 | target_compile_options(coro_thread_pool PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 39 | 40 | add_executable(coro_semaphore coro_semaphore.cpp) 41 | target_link_libraries(coro_semaphore PUBLIC libcoro) 42 | target_compile_options(coro_task PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 43 | 44 | add_executable(coro_ring_buffer coro_ring_buffer.cpp) 45 | target_link_libraries(coro_ring_buffer PUBLIC libcoro) 46 | target_compile_options(coro_ring_buffer PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 47 | 48 | add_executable(coro_shared_mutex coro_shared_mutex.cpp) 49 | target_link_libraries(coro_shared_mutex PUBLIC libcoro) 50 | target_compile_options(coro_shared_mutex PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 51 | 52 | add_executable(coro_queue coro_queue.cpp) 53 | target_link_libraries(coro_queue PUBLIC libcoro) 54 | target_compile_options(coro_queue PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 55 | 56 | add_executable(coro_sync_wait coro_sync_wait.cpp) 57 | target_link_libraries(coro_sync_wait PUBLIC libcoro) 58 | target_compile_options(coro_sync_wait PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 59 | 60 | add_executable(coro_when_all coro_when_all.cpp) 61 | target_link_libraries(coro_when_all PUBLIC libcoro) 62 | target_compile_options(coro_when_all PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 63 | 64 | if(LIBCORO_FEATURE_NETWORKING) 65 | add_executable(coro_condition_variable coro_condition_variable.cpp) 66 | target_link_libraries(coro_condition_variable PUBLIC libcoro) 67 | target_compile_options(coro_condition_variable PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 68 | 69 | add_executable(coro_io_scheduler coro_io_scheduler.cpp) 70 | target_link_libraries(coro_io_scheduler PUBLIC libcoro) 71 | target_compile_options(coro_io_scheduler PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 72 | 73 | add_executable(coro_tcp_echo_server coro_tcp_echo_server.cpp) 74 | target_link_libraries(coro_tcp_echo_server PUBLIC libcoro) 75 | target_compile_options(coro_tcp_echo_server PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 76 | 77 | add_executable(coro_http_200_ok_server coro_http_200_ok_server.cpp) 78 | target_link_libraries(coro_http_200_ok_server PUBLIC libcoro) 79 | target_compile_options(coro_http_200_ok_server PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 80 | 81 | if(NOT EMSCRIPTEN) 82 | add_executable(coro_when_any coro_when_any.cpp) 83 | target_link_libraries(coro_when_any PUBLIC libcoro) 84 | target_compile_options(coro_when_any PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) 85 | endif() 86 | endif() 87 | -------------------------------------------------------------------------------- /examples/coro_condition_variable.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | auto scheduler = coro::io_scheduler::make_shared(); 7 | coro::condition_variable cv{}; 8 | coro::mutex m{}; 9 | std::atomic condition{0}; 10 | std::stop_source ss{}; 11 | 12 | auto make_waiter_task = [](std::shared_ptr scheduler, coro::condition_variable& cv, coro::mutex& m, std::stop_source& ss, std::atomic& condition, int64_t id) -> coro::task 13 | { 14 | co_await scheduler->schedule(); 15 | while (true) 16 | { 17 | // Consume the condition until a stop is requested. 18 | auto lock = co_await m.scoped_lock(); 19 | auto ready = co_await cv.wait(lock, ss.get_token(), [&id, &condition]() -> bool 20 | { 21 | std::cerr << id << " predicate condition = " << condition << "\n"; 22 | return condition > 0; 23 | }); 24 | std::cerr << id << " waiter condition = " << condition << "\n"; 25 | 26 | if (ready) 27 | { 28 | // Handle event. 29 | 30 | // It is worth noting that because condition variables must hold the lock to wake up they are naturally serialized. 31 | // It is wise once the condition data is acquired the lock should be released and then spawn off any work into another task via the scheduler. 32 | condition--; 33 | lock.unlock(); 34 | // We'll yield for a bit to mimic work. 35 | co_await scheduler->yield_for(std::chrono::milliseconds{10}); 36 | } 37 | 38 | // Was this wake-up due to being stopped? 39 | if (ss.stop_requested()) 40 | { 41 | std::cerr << id << " ss.stop_requsted() co_return\n"; 42 | co_return; 43 | } 44 | } 45 | }; 46 | 47 | auto make_notifier_task = [](std::shared_ptr scheduler, coro::condition_variable& cv, coro::mutex& m, std::stop_source& ss, std::atomic& condition) -> coro::task 48 | { 49 | // To make this example more deterministic the notifier will wait between each notify event to showcase 50 | // how exactly the condition variable will behave with the condition in certain states and the notify_one or notify_all. 51 | co_await scheduler->schedule_after(std::chrono::milliseconds{50}); 52 | 53 | std::cerr << "cv.notify_one() condition = 0\n"; 54 | co_await cv.notify_one(); // Predicate will fail condition == 0. 55 | { 56 | // To guarantee the condition is 'updated' in the predicate it must be done behind the lock. 57 | auto lock = co_await m.scoped_lock(); 58 | condition++; 59 | } 60 | // Notifying does not need to hold the lock. 61 | std::cerr << "cv.notify_one() condition = 1\n"; 62 | co_await cv.notify_one(); // Predicate will pass condition == 1. 63 | 64 | co_await scheduler->schedule_after(std::chrono::milliseconds{50}); 65 | { 66 | auto lock = co_await m.scoped_lock(); 67 | condition += 2; 68 | } 69 | std::cerr << "cv.notify_all() condition = 2\n"; 70 | co_await cv.notify_all(); // Predicates will pass condition == 2 then condition == 1. 71 | 72 | co_await scheduler->schedule_after(std::chrono::milliseconds{50}); 73 | { 74 | auto lock = co_await m.scoped_lock(); 75 | condition++; 76 | } 77 | std::cerr << "cv.notify_all() condition = 1\n"; 78 | co_await cv.notify_all(); // Predicates will pass condition == 1 then Predicate will not pass condition == 0. 79 | 80 | co_await scheduler->schedule_after(std::chrono::milliseconds{50}); 81 | { 82 | auto lock = co_await m.scoped_lock(); 83 | } 84 | std::cerr << "ss.request_stop()\n"; 85 | // To stop set the stop source to stop and then notify all waiters. 86 | ss.request_stop(); 87 | co_await cv.notify_all(); 88 | co_return; 89 | }; 90 | 91 | coro::sync_wait( 92 | coro::when_all( 93 | make_waiter_task(scheduler, cv, m, ss, condition, 0), 94 | make_waiter_task(scheduler, cv, m, ss, condition, 1), 95 | make_notifier_task(scheduler, cv, m, ss, condition))); 96 | 97 | return 0; 98 | } 99 | -------------------------------------------------------------------------------- /examples/coro_event.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | coro::event e; 7 | 8 | // These tasks will wait until the given event has been set before advancing. 9 | auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task 10 | { 11 | std::cout << "task " << i << " is waiting on the event...\n"; 12 | co_await e; 13 | std::cout << "task " << i << " event triggered, now resuming.\n"; 14 | co_return; 15 | }; 16 | 17 | // This task will trigger the event allowing all waiting tasks to proceed. 18 | auto make_set_task = [](coro::event& e) -> coro::task 19 | { 20 | std::cout << "set task is triggering the event\n"; 21 | e.set(); 22 | co_return; 23 | }; 24 | 25 | // Given more than a single task to synchronously wait on, use when_all() to execute all the 26 | // tasks concurrently on this thread and then sync_wait() for them all to complete. 27 | coro::sync_wait(coro::when_all(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e))); 28 | } 29 | -------------------------------------------------------------------------------- /examples/coro_generator.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | auto task = [](uint64_t count_to) -> coro::task 7 | { 8 | // Create a generator function that will yield and incrementing 9 | // number each time its called. 10 | auto gen = []() -> coro::generator 11 | { 12 | uint64_t i = 0; 13 | while (true) 14 | { 15 | co_yield i; 16 | ++i; 17 | } 18 | }; 19 | 20 | // Generate the next number until its greater than count to. 21 | for (auto val : gen()) 22 | { 23 | std::cout << val << ", "; 24 | 25 | if (val >= count_to) 26 | { 27 | break; 28 | } 29 | } 30 | co_return; 31 | }; 32 | 33 | coro::sync_wait(task(100)); 34 | } 35 | -------------------------------------------------------------------------------- /examples/coro_http_200_ok_server.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | auto main() -> int 4 | { 5 | auto make_http_200_ok_server = [](std::shared_ptr scheduler) -> coro::task 6 | { 7 | auto make_on_connection_task = [](coro::net::tcp::client client) -> coro::task 8 | { 9 | std::string response = 10 | R"(HTTP/1.1 200 OK 11 | Content-Length: 0 12 | Connection: keep-alive 13 | 14 | )"; 15 | std::string buf(1024, '\0'); 16 | 17 | while (true) 18 | { 19 | // Wait for data to be available to read. 20 | co_await client.poll(coro::poll_op::read); 21 | auto [rstatus, rspan] = client.recv(buf); 22 | switch (rstatus) 23 | { 24 | case coro::net::recv_status::ok: 25 | // Make sure the client socket can be written to. 26 | co_await client.poll(coro::poll_op::write); 27 | client.send(std::span{response}); 28 | break; 29 | case coro::net::recv_status::would_block: 30 | break; 31 | case coro::net::recv_status::closed: 32 | default: 33 | co_return; 34 | } 35 | } 36 | }; 37 | 38 | co_await scheduler->schedule(); 39 | coro::net::tcp::server server{scheduler, coro::net::tcp::server::options{.port = 8888}}; 40 | 41 | while (true) 42 | { 43 | // Wait for a new connection. 44 | auto pstatus = co_await server.poll(); 45 | switch (pstatus) 46 | { 47 | case coro::poll_status::event: 48 | { 49 | auto client = server.accept(); 50 | if (client.socket().is_valid()) 51 | { 52 | scheduler->spawn(make_on_connection_task(std::move(client))); 53 | } // else report error or something if the socket was invalid or could not be accepted. 54 | } 55 | break; 56 | case coro::poll_status::error: 57 | case coro::poll_status::closed: 58 | case coro::poll_status::timeout: 59 | default: 60 | co_return; 61 | } 62 | } 63 | 64 | co_return; 65 | }; 66 | 67 | std::vector> workers{}; 68 | for (size_t i = 0; i < std::thread::hardware_concurrency(); ++i) 69 | { 70 | auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ 71 | .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); 72 | 73 | workers.push_back(make_http_200_ok_server(scheduler)); 74 | } 75 | 76 | coro::sync_wait(coro::when_all(std::move(workers))); 77 | } 78 | -------------------------------------------------------------------------------- /examples/coro_latch.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | // Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker 7 | // tasks can yield for a specific amount of time to mimic difficult work. The pool is only 8 | // setup with a single thread to showcase yield_for(). 9 | auto tp = coro::io_scheduler::make_shared( 10 | coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); 11 | 12 | // This task will wait until the given latch setters have completed. 13 | auto make_latch_task = [](coro::latch& l) -> coro::task 14 | { 15 | // It seems like the dependent worker tasks could be created here, but in that case it would 16 | // be superior to simply do: `co_await coro::when_all(tasks);` 17 | // It is also important to note that the last dependent task will resume the waiting latch 18 | // task prior to actually completing -- thus the dependent task's frame could be destroyed 19 | // by the latch task completing before it gets a chance to finish after calling resume() on 20 | // the latch task! 21 | 22 | std::cout << "latch task is now waiting on all children tasks...\n"; 23 | co_await l; 24 | std::cout << "latch task dependency tasks completed, resuming.\n"; 25 | co_return; 26 | }; 27 | 28 | // This task does 'work' and counts down on the latch when completed. The final child task to 29 | // complete will end up resuming the latch task when the latch's count reaches zero. 30 | auto make_worker_task = [](std::shared_ptr tp, coro::latch& l, int64_t i) -> coro::task 31 | { 32 | // Schedule the worker task onto the thread pool. 33 | co_await tp->schedule(); 34 | std::cout << "worker task " << i << " is working...\n"; 35 | // Do some expensive calculations, yield to mimic work...! Its also important to never use 36 | // std::this_thread::sleep_for() within the context of coroutines, it will block the thread 37 | // and other tasks that are ready to execute will be blocked. 38 | co_await tp->yield_for(std::chrono::milliseconds{i * 20}); 39 | std::cout << "worker task " << i << " is done, counting down on the latch\n"; 40 | l.count_down(); 41 | co_return; 42 | }; 43 | 44 | const int64_t num_tasks{5}; 45 | coro::latch l{num_tasks}; 46 | std::vector> tasks{}; 47 | 48 | // Make the latch task first so it correctly waits for all worker tasks to count down. 49 | tasks.emplace_back(make_latch_task(l)); 50 | for (int64_t i = 1; i <= num_tasks; ++i) 51 | { 52 | tasks.emplace_back(make_worker_task(tp, l, i)); 53 | } 54 | 55 | // Wait for all tasks to complete. 56 | coro::sync_wait(coro::when_all(std::move(tasks))); 57 | } 58 | -------------------------------------------------------------------------------- /examples/coro_mutex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}}; 7 | std::vector output{}; 8 | coro::mutex mutex; 9 | 10 | auto make_critical_section_task = 11 | [](coro::thread_pool& tp, coro::mutex& mutex, std::vector& output, uint64_t i) -> coro::task 12 | { 13 | co_await tp.schedule(); 14 | // To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the 15 | // lock() function returns a coro::scoped_lock that holds the mutex and automatically 16 | // unlocks the mutex upon destruction. This behaves just like std::scoped_lock. 17 | { 18 | auto scoped_lock = co_await mutex.scoped_lock(); 19 | output.emplace_back(i); 20 | } // <-- scoped lock unlocks the mutex here. 21 | co_return; 22 | }; 23 | 24 | const size_t num_tasks{100}; 25 | std::vector> tasks{}; 26 | tasks.reserve(num_tasks); 27 | for (size_t i = 1; i <= num_tasks; ++i) 28 | { 29 | tasks.emplace_back(make_critical_section_task(tp, mutex, output, i)); 30 | } 31 | 32 | coro::sync_wait(coro::when_all(std::move(tasks))); 33 | 34 | // The output will be variable per run depending on how the tasks are picked up on the 35 | // thread pool workers. 36 | for (const auto& value : output) 37 | { 38 | std::cout << value << ", "; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /examples/coro_queue.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | const size_t iterations = 5; 7 | const size_t producers_count = 5; 8 | const size_t consumers_count = 2; 9 | 10 | coro::thread_pool tp{}; 11 | coro::queue q{}; 12 | coro::latch producers_done{producers_count}; 13 | coro::mutex m{}; /// Just for making the console prints look nice. 14 | 15 | auto make_producer_task = 16 | [iterations](coro::thread_pool& tp, coro::queue& q, coro::latch& pd) -> coro::task 17 | { 18 | co_await tp.schedule(); 19 | 20 | for (size_t i = 0; i < iterations; ++i) 21 | { 22 | co_await q.push(i); 23 | } 24 | 25 | pd.count_down(); // Notify the shutdown task this producer is complete. 26 | co_return; 27 | }; 28 | 29 | auto make_shutdown_task = [](coro::thread_pool& tp, coro::queue& q, coro::latch& pd) -> coro::task 30 | { 31 | // This task will wait for all the producers to complete and then for the 32 | // entire queue to be drained before shutting it down. 33 | co_await tp.schedule(); 34 | co_await pd; 35 | co_await q.shutdown_notify_waiters_drain(tp); 36 | co_return; 37 | }; 38 | 39 | auto make_consumer_task = [](coro::thread_pool& tp, coro::queue& q, coro::mutex& m) -> coro::task 40 | { 41 | co_await tp.schedule(); 42 | 43 | while (true) 44 | { 45 | auto expected = co_await q.pop(); 46 | if (!expected) 47 | { 48 | break; // coro::queue is shutting down 49 | } 50 | 51 | auto scoped_lock = co_await m.scoped_lock(); // Only used to make the output look nice. 52 | std::cout << "consumed " << *expected << "\n"; 53 | } 54 | }; 55 | 56 | std::vector> tasks{}; 57 | 58 | for (size_t i = 0; i < producers_count; ++i) 59 | { 60 | tasks.push_back(make_producer_task(tp, q, producers_done)); 61 | } 62 | for (size_t i = 0; i < consumers_count; ++i) 63 | { 64 | tasks.push_back(make_consumer_task(tp, q, m)); 65 | } 66 | tasks.push_back(make_shutdown_task(tp, q, producers_done)); 67 | 68 | coro::sync_wait(coro::when_all(std::move(tasks))); 69 | } 70 | -------------------------------------------------------------------------------- /examples/coro_ring_buffer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | const size_t iterations = 100; 7 | const size_t consumers = 4; 8 | coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}}; 9 | coro::ring_buffer rb{}; 10 | coro::mutex m{}; 11 | 12 | std::vector> tasks{}; 13 | 14 | auto make_producer_task = 15 | [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::mutex& m) -> coro::task 16 | { 17 | co_await tp.schedule(); 18 | 19 | for (size_t i = 1; i <= iterations; ++i) 20 | { 21 | co_await rb.produce(i); 22 | } 23 | 24 | // Now that the ring buffer is empty signal to all the consumers its time to stop. Note that 25 | // the stop signal works on producers as well, but this example only uses 1 producer. 26 | { 27 | auto scoped_lock = co_await m.scoped_lock(); 28 | std::cerr << "\nproducer is sending shutdown signal with drain"; 29 | } 30 | co_await rb.shutdown_drain(tp); 31 | co_return; 32 | }; 33 | 34 | auto make_consumer_task = 35 | [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::mutex& m, size_t id) -> coro::task 36 | { 37 | co_await tp.schedule(); 38 | 39 | while (true) 40 | { 41 | auto expected = co_await rb.consume(); 42 | auto scoped_lock = co_await m.scoped_lock(); // just for synchronizing std::cout/cerr 43 | if (!expected) 44 | { 45 | std::cerr << "\nconsumer " << id << " shutting down, stop signal received"; 46 | break; // while 47 | } 48 | else 49 | { 50 | auto item = std::move(*expected); 51 | std::cout << "(id=" << id << ", v=" << item << "), "; 52 | } 53 | 54 | // Mimic doing some work on the consumed value. 55 | co_await tp.yield(); 56 | } 57 | 58 | co_return; 59 | }; 60 | 61 | // Create N consumers 62 | for (size_t i = 0; i < consumers; ++i) 63 | { 64 | tasks.emplace_back(make_consumer_task(tp, rb, m, i)); 65 | } 66 | // Create 1 producer. 67 | tasks.emplace_back(make_producer_task(tp, rb, m)); 68 | 69 | // Wait for all the values to be produced and consumed through the ring buffer. 70 | coro::sync_wait(coro::when_all(std::move(tasks))); 71 | } 72 | -------------------------------------------------------------------------------- /examples/coro_semaphore.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | // Have more threads/tasks than the semaphore will allow for at any given point in time. 7 | coro::thread_pool tp{coro::thread_pool::options{.thread_count = 8}}; 8 | coro::semaphore semaphore{2}; 9 | 10 | auto make_rate_limited_task = 11 | [](coro::thread_pool& tp, coro::semaphore& semaphore, uint64_t task_num) -> coro::task 12 | { 13 | co_await tp.schedule(); 14 | 15 | // This will only allow 2 tasks through at any given point in time, all other tasks will 16 | // await the resource to be available before proceeding. 17 | auto result = co_await semaphore.acquire(); 18 | if (result == coro::semaphore::acquire_result::acquired) 19 | { 20 | std::cout << task_num << ", "; 21 | semaphore.release(); 22 | } 23 | else 24 | { 25 | std::cout << task_num << " failed to acquire semaphore [" << coro::semaphore::to_string(result) << "],"; 26 | } 27 | co_return; 28 | }; 29 | 30 | const size_t num_tasks{100}; 31 | std::vector> tasks{}; 32 | for (size_t i = 1; i <= num_tasks; ++i) 33 | { 34 | tasks.emplace_back(make_rate_limited_task(tp, semaphore, i)); 35 | } 36 | 37 | coro::sync_wait(coro::when_all(std::move(tasks))); 38 | } 39 | -------------------------------------------------------------------------------- /examples/coro_shared_mutex.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | // Shared mutexes require an excutor type to be able to wake up multiple shared waiters when 7 | // there is an exclusive lock holder releasing the lock. This example uses a single thread 8 | // to also show the interleaving of coroutines acquiring the shared lock in shared and 9 | // exclusive mode as they resume and suspend in a linear manner. Ideally the thread pool 10 | // executor would have more than 1 thread to resume all shared waiters in parallel. 11 | auto tp = std::make_shared(coro::thread_pool::options{.thread_count = 1}); 12 | coro::shared_mutex mutex{tp}; 13 | 14 | auto make_shared_task = [](std::shared_ptr tp, 15 | coro::shared_mutex& mutex, 16 | uint64_t i) -> coro::task 17 | { 18 | co_await tp->schedule(); 19 | { 20 | std::cerr << "shared task " << i << " lock_shared()\n"; 21 | auto scoped_lock = co_await mutex.lock_shared(); 22 | std::cerr << "shared task " << i << " lock_shared() acquired\n"; 23 | /// Immediately yield so the other shared tasks also acquire in shared state 24 | /// while this task currently holds the mutex in shared state. 25 | co_await tp->yield(); 26 | std::cerr << "shared task " << i << " unlock_shared()\n"; 27 | } 28 | co_return; 29 | }; 30 | 31 | auto make_exclusive_task = [](std::shared_ptr tp, 32 | coro::shared_mutex& mutex) -> coro::task 33 | { 34 | co_await tp->schedule(); 35 | 36 | std::cerr << "exclusive task lock()\n"; 37 | auto scoped_lock = co_await mutex.lock(); 38 | std::cerr << "exclusive task lock() acquired\n"; 39 | // Do the exclusive work.. 40 | std::cerr << "exclusive task unlock()\n"; 41 | co_return; 42 | }; 43 | 44 | // Create 3 shared tasks that will acquire the mutex in a shared state. 45 | const size_t num_tasks{3}; 46 | std::vector> tasks{}; 47 | for (size_t i = 1; i <= num_tasks; ++i) 48 | { 49 | tasks.emplace_back(make_shared_task(tp, mutex, i)); 50 | } 51 | // Create an exclusive task. 52 | tasks.emplace_back(make_exclusive_task(tp, mutex)); 53 | // Create 3 more shared tasks that will be blocked until the exclusive task completes. 54 | for (size_t i = num_tasks + 1; i <= num_tasks * 2; ++i) 55 | { 56 | tasks.emplace_back(make_shared_task(tp, mutex, i)); 57 | } 58 | 59 | coro::sync_wait(coro::when_all(std::move(tasks))); 60 | } 61 | -------------------------------------------------------------------------------- /examples/coro_sync_wait.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | // This lambda will create a coro::task that returns a unit64_t. 7 | // It can be invoked many times with different arguments. 8 | auto make_task_inline = [](uint64_t x) -> coro::task { co_return x + x; }; 9 | 10 | // This will block the calling thread until the created task completes. 11 | // Since this task isn't scheduled on any coro::thread_pool or coro::io_scheduler 12 | // it will execute directly on the calling thread. 13 | auto result = coro::sync_wait(make_task_inline(5)); 14 | std::cout << "Inline Result = " << result << "\n"; 15 | 16 | // We'll make a 1 thread coro::thread_pool to demonstrate offloading the task's 17 | // execution to another thread. We'll pass the thread pool as a parameter so 18 | // the task can be scheduled. 19 | // Note that you will need to guarantee the thread pool outlives the coroutine. 20 | coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}}; 21 | 22 | auto make_task_offload = [](coro::thread_pool& tp, uint64_t x) -> coro::task 23 | { 24 | co_await tp.schedule(); // Schedules execution on the thread pool. 25 | co_return x + x; // This will execute on the thread pool. 26 | }; 27 | 28 | // This will still block the calling thread, but it will now offload to the 29 | // coro::thread_pool since the coroutine task is immediately scheduled. 30 | result = coro::sync_wait(make_task_offload(tp, 10)); 31 | std::cout << "Offload Result = " << result << "\n"; 32 | } 33 | -------------------------------------------------------------------------------- /examples/coro_task.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | // Create a task that awaits the doubling of its given value and 7 | // then returns the result after adding 5. 8 | auto double_and_add_5_task = [](uint64_t input) -> coro::task 9 | { 10 | // Task that takes a value and doubles it. 11 | auto double_task = [](uint64_t x) -> coro::task { co_return x * 2; }; 12 | 13 | auto doubled = co_await double_task(input); 14 | co_return doubled + 5; 15 | }; 16 | 17 | auto output = coro::sync_wait(double_and_add_5_task(2)); 18 | std::cout << "Task1 output = " << output << "\n"; 19 | 20 | struct expensive_struct 21 | { 22 | std::string id{}; 23 | std::vector records{}; 24 | 25 | expensive_struct() = default; 26 | ~expensive_struct() = default; 27 | 28 | // Explicitly delete copy constructor and copy assign, force only moves! 29 | // While the default move constructors will work for this struct the example 30 | // inserts explicit print statements to show the task is moving the value 31 | // out correctly. 32 | expensive_struct(const expensive_struct&) = delete; 33 | auto operator=(const expensive_struct&) -> expensive_struct& = delete; 34 | 35 | expensive_struct(expensive_struct&& other) : id(std::move(other.id)), records(std::move(other.records)) 36 | { 37 | std::cout << "expensive_struct() move constructor called\n"; 38 | } 39 | auto operator=(expensive_struct&& other) -> expensive_struct& 40 | { 41 | if (std::addressof(other) != this) 42 | { 43 | id = std::move(other.id); 44 | records = std::move(other.records); 45 | } 46 | std::cout << "expensive_struct() move assignment called\n"; 47 | return *this; 48 | } 49 | }; 50 | 51 | // Create a very large object and return it by moving the value so the 52 | // contents do not have to be copied out. 53 | auto move_output_task = []() -> coro::task 54 | { 55 | expensive_struct data{}; 56 | data.id = "12345678-1234-5678-9012-123456781234"; 57 | for (size_t i = 10'000; i < 100'000; ++i) 58 | { 59 | data.records.emplace_back(std::to_string(i)); 60 | } 61 | 62 | // Because the struct only has move contructors it will be forced to use 63 | // them, no need to explicitly std::move(data). 64 | co_return data; 65 | }; 66 | 67 | auto data = coro::sync_wait(move_output_task()); 68 | std::cout << data.id << " has " << data.records.size() << " records.\n"; 69 | 70 | // std::unique_ptr can also be used to return a larger object. 71 | auto unique_ptr_task = []() -> coro::task> { co_return std::make_unique(42); }; 72 | 73 | auto answer_to_everything = coro::sync_wait(unique_ptr_task()); 74 | if (answer_to_everything != nullptr) 75 | { 76 | std::cout << "Answer to everything = " << *answer_to_everything << "\n"; 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /examples/coro_tcp_echo_server.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | auto main() -> int 4 | { 5 | auto make_tcp_echo_server = [](std::shared_ptr scheduler) -> coro::task 6 | { 7 | auto make_on_connection_task = [](coro::net::tcp::client client) -> coro::task 8 | { 9 | std::string buf(1024, '\0'); 10 | 11 | while (true) 12 | { 13 | // Wait for data to be available to read. 14 | co_await client.poll(coro::poll_op::read); 15 | auto [rstatus, rspan] = client.recv(buf); 16 | switch (rstatus) 17 | { 18 | case coro::net::recv_status::ok: 19 | // Make sure the client socket can be written to. 20 | co_await client.poll(coro::poll_op::write); 21 | client.send(std::span{rspan}); 22 | break; 23 | case coro::net::recv_status::would_block: 24 | break; 25 | case coro::net::recv_status::closed: 26 | default: 27 | co_return; 28 | } 29 | } 30 | }; 31 | 32 | co_await scheduler->schedule(); 33 | coro::net::tcp::server server{scheduler, coro::net::tcp::server::options{.port = 8888}}; 34 | 35 | while (true) 36 | { 37 | // Wait for a new connection. 38 | auto pstatus = co_await server.poll(); 39 | switch (pstatus) 40 | { 41 | case coro::poll_status::event: 42 | { 43 | auto client = server.accept(); 44 | if (client.socket().is_valid()) 45 | { 46 | scheduler->spawn(make_on_connection_task(std::move(client))); 47 | } // else report error or something if the socket was invalid or could not be accepted. 48 | } 49 | break; 50 | case coro::poll_status::error: 51 | case coro::poll_status::closed: 52 | case coro::poll_status::timeout: 53 | default: 54 | co_return; 55 | } 56 | } 57 | 58 | co_return; 59 | }; 60 | 61 | std::vector> workers{}; 62 | for (size_t i = 0; i < std::thread::hardware_concurrency(); ++i) 63 | { 64 | auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ 65 | .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); 66 | 67 | workers.push_back(make_tcp_echo_server(scheduler)); 68 | } 69 | 70 | coro::sync_wait(coro::when_all(std::move(workers))); 71 | } 72 | -------------------------------------------------------------------------------- /examples/coro_thread_pool.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main() 6 | { 7 | coro::thread_pool tp{coro::thread_pool::options{ 8 | // By default all thread pools will create its thread count with the 9 | // std::thread::hardware_concurrency() as the number of worker threads in the pool, 10 | // but this can be changed via this thread_count option. This example will use 4. 11 | .thread_count = 4, 12 | // Upon starting each worker thread an optional lambda callback with the worker's 13 | // index can be called to make thread changes, perhaps priority or change the thread's 14 | // name. 15 | .on_thread_start_functor = [](std::size_t worker_idx) -> void 16 | { std::cout << "thread pool worker " << worker_idx << " is starting up.\n"; }, 17 | // Upon stopping each worker thread an optional lambda callback with the worker's 18 | // index can b called. 19 | .on_thread_stop_functor = [](std::size_t worker_idx) -> void 20 | { std::cout << "thread pool worker " << worker_idx << " is shutting down.\n"; }}}; 21 | 22 | auto primary_task = [](coro::thread_pool& tp) -> coro::task 23 | { 24 | auto offload_task = [](coro::thread_pool& tp, uint64_t child_idx) -> coro::task 25 | { 26 | // Start by scheduling this offload worker task onto the thread pool. 27 | co_await tp.schedule(); 28 | // Now any code below this schedule() line will be executed on one of the thread pools 29 | // worker threads. 30 | 31 | // Mimic some expensive task that should be run on a background thread... 32 | std::random_device rd; 33 | std::mt19937 gen{rd()}; 34 | std::uniform_int_distribution<> d{0, 1}; 35 | 36 | size_t calculation{0}; 37 | for (size_t i = 0; i < 1'000'000; ++i) 38 | { 39 | calculation += d(gen); 40 | 41 | // Lets be nice and yield() to let other coroutines on the thread pool have some cpu 42 | // time. This isn't necessary but is illustrated to show how tasks can cooperatively 43 | // yield control at certain points of execution. Its important to never call the 44 | // std::this_thread::sleep_for() within the context of a coroutine, that will block 45 | // and other coroutines which are ready for execution from starting, always use yield() 46 | // or within the context of a coro::io_scheduler you can use yield_for(amount). 47 | if (i == 500'000) 48 | { 49 | std::cout << "Task " << child_idx << " is yielding()\n"; 50 | co_await tp.yield(); 51 | } 52 | } 53 | co_return calculation; 54 | }; 55 | 56 | const size_t num_children{10}; 57 | std::vector> child_tasks{}; 58 | child_tasks.reserve(num_children); 59 | for (size_t i = 0; i < num_children; ++i) 60 | { 61 | child_tasks.emplace_back(offload_task(tp, i)); 62 | } 63 | 64 | // Wait for the thread pool workers to process all child tasks. 65 | auto results = co_await coro::when_all(std::move(child_tasks)); 66 | 67 | // Sum up the results of the completed child tasks. 68 | size_t calculation{0}; 69 | for (const auto& task : results) 70 | { 71 | calculation += task.return_value(); 72 | } 73 | co_return calculation; 74 | }; 75 | 76 | auto result = coro::sync_wait(primary_task(tp)); 77 | std::cout << "calculated thread pool result = " << result << "\n"; 78 | } 79 | -------------------------------------------------------------------------------- /examples/coro_when_all.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | // Create a thread pool to execute all the tasks in parallel. 7 | coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}}; 8 | // Create the task we want to invoke multiple times and execute in parallel on the thread pool. 9 | auto twice = [](coro::thread_pool& tp, uint64_t x) -> coro::task 10 | { 11 | co_await tp.schedule(); // Schedule onto the thread pool. 12 | co_return x + x; // Executed on the thread pool. 13 | }; 14 | 15 | // Make our tasks to execute, tasks can be passed in via a std::ranges::range type or var args. 16 | std::vector> tasks{}; 17 | for (std::size_t i = 0; i < 5; ++i) 18 | { 19 | tasks.emplace_back(twice(tp, i + 1)); 20 | } 21 | 22 | // Synchronously wait on this thread for the thread pool to finish executing all the tasks in parallel. 23 | auto results = coro::sync_wait(coro::when_all(std::move(tasks))); 24 | for (auto& result : results) 25 | { 26 | // If your task can throw calling return_value() will either return the result or re-throw the exception. 27 | try 28 | { 29 | std::cout << result.return_value() << "\n"; 30 | } 31 | catch (const std::exception& e) 32 | { 33 | std::cerr << e.what() << '\n'; 34 | } 35 | } 36 | 37 | // Use var args instead of a container as input to coro::when_all. 38 | auto square = [](coro::thread_pool& tp, double x) -> coro::task 39 | { 40 | co_await tp.schedule(); 41 | co_return x* x; 42 | }; 43 | 44 | // Var args allows you to pass in tasks with different return types and returns 45 | // the result as a std::tuple. 46 | auto tuple_results = coro::sync_wait(coro::when_all(square(tp, 1.1), twice(tp, 10))); 47 | 48 | auto first = std::get<0>(tuple_results).return_value(); 49 | auto second = std::get<1>(tuple_results).return_value(); 50 | 51 | std::cout << "first: " << first << " second: " << second << "\n"; 52 | } 53 | -------------------------------------------------------------------------------- /examples/coro_when_any.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | int main() 5 | { 6 | // Create a scheduler to execute all tasks in parallel and also so we can 7 | // suspend a task to act like a timeout event. 8 | auto scheduler = coro::io_scheduler::make_shared(); 9 | 10 | // This task will behave like a long running task and will produce a valid result. 11 | auto make_long_running_task = [](std::shared_ptr scheduler, 12 | std::chrono::milliseconds execution_time) -> coro::task 13 | { 14 | // Schedule the task to execute in parallel. 15 | co_await scheduler->schedule(); 16 | // Fake doing some work... 17 | co_await scheduler->yield_for(execution_time); 18 | // Return the result. 19 | co_return 1; 20 | }; 21 | 22 | auto make_timeout_task = [](std::shared_ptr scheduler) -> coro::task 23 | { 24 | // Schedule a timer to be fired so we know the task timed out. 25 | co_await scheduler->schedule_after(std::chrono::milliseconds{100}); 26 | co_return -1; 27 | }; 28 | 29 | // Example showing the long running task completing first. 30 | { 31 | std::vector> tasks{}; 32 | tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{50})); 33 | tasks.emplace_back(make_timeout_task(scheduler)); 34 | 35 | auto result = coro::sync_wait(coro::when_any(std::move(tasks))); 36 | std::cout << "result = " << result << "\n"; 37 | } 38 | 39 | // Example showing the long running task timing out. 40 | { 41 | std::vector> tasks{}; 42 | tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{500})); 43 | tasks.emplace_back(make_timeout_task(scheduler)); 44 | 45 | auto result = coro::sync_wait(coro::when_any(std::move(tasks))); 46 | std::cout << "result = " << result << "\n"; 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /include/coro/attribute.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // This is a GCC extension; define it only for GCC and compilers that emulate GCC. 4 | #if defined(__GNUC__) && !defined(__clang__) 5 | #define __ATTRIBUTE__(attr) __attribute__((attr)) 6 | #else 7 | #define __ATTRIBUTE__(attr) 8 | #endif 9 | -------------------------------------------------------------------------------- /include/coro/concepts/awaitable.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace coro::concepts 9 | { 10 | 11 | template 12 | concept in_types = (std::same_as || ...); 13 | 14 | /** 15 | * This concept declares a type that is required to meet the c++20 coroutine operator co_await() 16 | * retun type. It requires the following three member functions: 17 | * await_ready() -> bool 18 | * await_suspend(std::coroutine_handle<>) -> void|bool|std::coroutine_handle<> 19 | * await_resume() -> decltype(auto) 20 | * Where the return type on await_resume is the requested return of the awaitable. 21 | */ 22 | // clang-format off 23 | template 24 | concept awaiter = requires(type t, std::coroutine_handle<> c) 25 | { 26 | { t.await_ready() } -> std::same_as; 27 | { t.await_suspend(c) } -> in_types>; 28 | { t.await_resume() }; 29 | }; 30 | 31 | template 32 | concept member_co_await_awaitable = requires(type t) 33 | { 34 | { t.operator co_await() } -> awaiter; 35 | }; 36 | 37 | template 38 | concept global_co_await_awaitable = requires(type t) 39 | { 40 | { operator co_await(t) } -> awaiter; 41 | }; 42 | 43 | /** 44 | * This concept declares a type that can be operator co_await()'ed and returns an awaiter_type. 45 | */ 46 | template 47 | concept awaitable = member_co_await_awaitable || global_co_await_awaitable || awaiter; 48 | 49 | template 50 | concept awaiter_void = awaiter && requires(type t) 51 | { 52 | {t.await_resume()} -> std::same_as; 53 | }; 54 | 55 | template 56 | concept member_co_await_awaitable_void = requires(type t) 57 | { 58 | { t.operator co_await() } -> awaiter_void; 59 | }; 60 | 61 | template 62 | concept global_co_await_awaitable_void = requires(type t) 63 | { 64 | { operator co_await(t) } -> awaiter_void; 65 | }; 66 | 67 | template 68 | concept awaitable_void = member_co_await_awaitable_void || global_co_await_awaitable_void || awaiter_void; 69 | 70 | template 71 | struct awaitable_traits 72 | { 73 | }; 74 | 75 | template 76 | static auto get_awaiter(awaitable&& value) 77 | { 78 | if constexpr (member_co_await_awaitable) 79 | return std::forward(value).operator co_await(); 80 | else if constexpr (global_co_await_awaitable) 81 | return operator co_await(std::forward(value)); 82 | else if constexpr (awaiter) { 83 | return std::forward(value); 84 | } 85 | } 86 | 87 | template 88 | struct awaitable_traits 89 | { 90 | using awaiter_type = decltype(get_awaiter(std::declval())); 91 | using awaiter_return_type = decltype(std::declval().await_resume()); 92 | }; 93 | // clang-format on 94 | 95 | } // namespace coro::concepts 96 | -------------------------------------------------------------------------------- /include/coro/concepts/buffer.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace coro::concepts 8 | { 9 | // clang-format off 10 | template 11 | concept const_buffer = requires(const type t) 12 | { 13 | { t.empty() } -> std::same_as; 14 | { t.data() } -> std::same_as; 15 | { t.size() } -> std::same_as; 16 | }; 17 | 18 | template 19 | concept mutable_buffer = requires(type t) 20 | { 21 | { t.empty() } -> std::same_as; 22 | { t.data() } -> std::same_as; 23 | { t.size() } -> std::same_as; 24 | }; 25 | // clang-format on 26 | 27 | } // namespace coro::concepts 28 | -------------------------------------------------------------------------------- /include/coro/concepts/executor.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/concepts/awaitable.hpp" 4 | #include "coro/fd.hpp" 5 | #include "coro/task.hpp" 6 | 7 | #ifdef LIBCORO_FEATURE_NETWORKING 8 | #include "coro/poll.hpp" 9 | #endif // #ifdef LIBCORO_FEATURE_NETWORKING 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | namespace coro::concepts 17 | { 18 | 19 | // clang-format off 20 | template 21 | concept executor = requires(executor_type e, std::coroutine_handle<> c) 22 | { 23 | { e.schedule() } -> coro::concepts::awaiter; 24 | { e.spawn(std::declval>()) } -> std::same_as; 25 | { e.yield() } -> coro::concepts::awaiter; 26 | { e.resume(c) } -> std::same_as; 27 | { e.size() } -> std::same_as; 28 | { e.empty() } -> std::same_as; 29 | { e.shutdown() } -> std::same_as; 30 | }; 31 | 32 | #ifdef LIBCORO_FEATURE_NETWORKING 33 | template 34 | concept io_executor = executor and requires(executor_type e, std::coroutine_handle<> c, fd_t fd, coro::poll_op op, std::chrono::milliseconds timeout) 35 | { 36 | { e.poll(fd, op, timeout) } -> std::same_as>; 37 | }; 38 | #endif // #ifdef LIBCORO_FEATURE_NETWORKING 39 | 40 | // clang-format on 41 | 42 | } // namespace coro::concepts 43 | -------------------------------------------------------------------------------- /include/coro/concepts/promise.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/concepts/awaitable.hpp" 4 | 5 | #include 6 | 7 | namespace coro::concepts 8 | { 9 | // clang-format off 10 | template 11 | concept promise = requires(type t) 12 | { 13 | { t.get_return_object() } -> std::convertible_to>; 14 | { t.initial_suspend() } -> awaiter; 15 | { t.final_suspend() } -> awaiter; 16 | { t.yield_value() } -> awaitable; 17 | } 18 | && requires(type t, return_type return_value) 19 | { 20 | requires std::same_as || 21 | std::same_as || 22 | requires { t.yield_value(return_value); }; 23 | }; 24 | // clang-format on 25 | 26 | } // namespace coro::concepts 27 | -------------------------------------------------------------------------------- /include/coro/concepts/range_of.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace coro::concepts 7 | { 8 | /** 9 | * Concept to require that the range contains a specific type of value. 10 | */ 11 | template 12 | concept range_of = std::ranges::range && std::is_same_v>; 13 | 14 | /** 15 | * Concept to require that a sized range contains a specific type of value. 16 | */ 17 | template 18 | concept sized_range_of = std::ranges::sized_range && std::is_same_v>; 19 | 20 | } // namespace coro::concepts 21 | -------------------------------------------------------------------------------- /include/coro/coro.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/concepts/awaitable.hpp" 4 | #include "coro/concepts/buffer.hpp" 5 | #include "coro/concepts/executor.hpp" 6 | #include "coro/concepts/promise.hpp" 7 | #include "coro/concepts/range_of.hpp" 8 | 9 | #include "coro/expected.hpp" 10 | 11 | #ifdef LIBCORO_FEATURE_NETWORKING 12 | #include "coro/io_scheduler.hpp" 13 | #include "coro/net/dns/resolver.hpp" 14 | #include "coro/net/tcp/client.hpp" 15 | #include "coro/net/tcp/server.hpp" 16 | #include "coro/poll.hpp" 17 | #ifdef LIBCORO_FEATURE_TLS 18 | #include "coro/net/tls/client.hpp" 19 | #include "coro/net/tls/connection_status.hpp" 20 | #include "coro/net/tls/context.hpp" 21 | #include "coro/net/tls/server.hpp" 22 | #endif 23 | #include "coro/net/connect.hpp" 24 | #include "coro/net/hostname.hpp" 25 | #include "coro/net/ip_address.hpp" 26 | #include "coro/net/recv_status.hpp" 27 | #include "coro/net/send_status.hpp" 28 | #include "coro/net/socket.hpp" 29 | #include "coro/net/udp/peer.hpp" 30 | #endif 31 | 32 | #include "coro/condition_variable.hpp" 33 | #include "coro/event.hpp" 34 | #include "coro/default_executor.hpp" 35 | #include "coro/generator.hpp" 36 | #include "coro/latch.hpp" 37 | #include "coro/mutex.hpp" 38 | #include "coro/queue.hpp" 39 | #include "coro/ring_buffer.hpp" 40 | #include "coro/semaphore.hpp" 41 | #include "coro/shared_mutex.hpp" 42 | #include "coro/sync_wait.hpp" 43 | #include "coro/task.hpp" 44 | #include "coro/thread_pool.hpp" 45 | #include "coro/time.hpp" 46 | #include "coro/when_all.hpp" 47 | #include "coro/when_any.hpp" 48 | -------------------------------------------------------------------------------- /include/coro/default_executor.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #ifdef LIBCORO_FEATURE_NETWORKING 4 | #include "coro/io_scheduler.hpp" 5 | #else 6 | #include "coro/thread_pool.hpp" 7 | #endif 8 | 9 | namespace coro::default_executor 10 | { 11 | 12 | /** 13 | * Set up default coro::thread_pool::options before constructing a single instance of coro::thread_pool in 14 | * coro::default_executor::executor() 15 | * @param thread_pool_options thread_pool options 16 | */ 17 | void set_executor_options(thread_pool::options thread_pool_options); 18 | 19 | /** 20 | * Get default coro::thread_pool 21 | */ 22 | std::shared_ptr executor(); 23 | 24 | #ifdef LIBCORO_FEATURE_NETWORKING 25 | /** 26 | * Set up default coro::io_scheduler::options before constructing a single instance of coro::io_scheduler in 27 | * coro::default_executor::io_executor() 28 | * @param io_scheduler_options io_scheduler options 29 | */ 30 | void set_io_executor_options(io_scheduler::options io_scheduler_options); 31 | 32 | /** 33 | * Get default coro::io_scheduler 34 | */ 35 | std::shared_ptr io_executor(); 36 | #endif 37 | 38 | /** 39 | * Get the perfect default executor 40 | * 41 | * This executor is ideal as a default argument in a library, 42 | * in a place where thread_pool functionality is sufficient, 43 | * but you don't want to have two executor instances per application for the same thing, 44 | * one thread_pool and one io_scheduler. 45 | */ 46 | inline auto perfect() 47 | { 48 | #ifdef LIBCORO_FEATURE_NETWORKING 49 | return io_executor(); 50 | #else 51 | return executor(); 52 | #endif 53 | } 54 | 55 | } // namespace coro::default_executor 56 | -------------------------------------------------------------------------------- /include/coro/detail/poll_info.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/fd.hpp" 4 | #include "coro/poll.hpp" 5 | #include "coro/time.hpp" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace coro::detail 13 | { 14 | /** 15 | * Poll Info encapsulates everything about a poll operation for the event as well as its paired 16 | * timeout. This is important since coroutines that are waiting on an event or timeout do not 17 | * immediately execute, they are re-scheduled onto the thread pool, so its possible its pair 18 | * event or timeout also triggers while the coroutine is still waiting to resume. This means that 19 | * the first one to happen, the event itself or its timeout, needs to disable the other pair item 20 | * prior to resuming the coroutine. 21 | * 22 | * Finally, its also important to note that the event and its paired timeout could happen during 23 | * the same epoll_wait and possibly trigger the coroutine to start twice. Only one can win, so the 24 | * first one processed sets m_processed to true and any subsequent events in the same epoll batch 25 | * are effectively discarded. 26 | */ 27 | struct poll_info 28 | { 29 | using timed_events = std::multimap; 30 | 31 | poll_info() = default; 32 | ~poll_info() = default; 33 | 34 | poll_info(const poll_info&) = delete; 35 | poll_info(poll_info&&) = delete; 36 | auto operator=(const poll_info&) -> poll_info& = delete; 37 | auto operator=(poll_info&&) -> poll_info& = delete; 38 | 39 | struct poll_awaiter 40 | { 41 | explicit poll_awaiter(poll_info& pi) noexcept : m_pi(pi) {} 42 | 43 | auto await_ready() const noexcept -> bool { return false; } 44 | auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> void 45 | { 46 | m_pi.m_awaiting_coroutine = awaiting_coroutine; 47 | std::atomic_thread_fence(std::memory_order::release); 48 | } 49 | auto await_resume() noexcept -> coro::poll_status { return m_pi.m_poll_status; } 50 | 51 | poll_info& m_pi; 52 | }; 53 | 54 | auto operator co_await() noexcept -> poll_awaiter { return poll_awaiter{*this}; } 55 | 56 | /// The file descriptor being polled on. This is needed so that if the timeout occurs first then 57 | /// the event loop can immediately disable the event within epoll. 58 | fd_t m_fd{-1}; 59 | /// The timeout's position in the timeout map. A poll() with no timeout or yield() this is empty. 60 | /// This is needed so that if the event occurs first then the event loop can immediately disable 61 | /// the timeout within epoll. 62 | std::optional m_timer_pos{std::nullopt}; 63 | /// The awaiting coroutine for this poll info to resume upon event or timeout. 64 | std::coroutine_handle<> m_awaiting_coroutine; 65 | /// The status of the poll operation. 66 | coro::poll_status m_poll_status{coro::poll_status::error}; 67 | /// Did the timeout and event trigger at the same time on the same epoll_wait call? 68 | /// Once this is set to true all future events on this poll info are null and void. 69 | bool m_processed{false}; 70 | }; 71 | 72 | } // namespace coro::detail 73 | -------------------------------------------------------------------------------- /include/coro/detail/task_self_deleting.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/task.hpp" 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | namespace coro::detail 10 | { 11 | 12 | class task_self_deleting; 13 | 14 | class promise_self_deleting 15 | { 16 | public: 17 | promise_self_deleting(); 18 | ~promise_self_deleting(); 19 | 20 | promise_self_deleting(const promise_self_deleting&) = delete; 21 | promise_self_deleting(promise_self_deleting&&); 22 | auto operator=(const promise_self_deleting&) -> promise_self_deleting& = delete; 23 | auto operator=(promise_self_deleting&&) -> promise_self_deleting&; 24 | 25 | auto get_return_object() -> task_self_deleting; 26 | auto initial_suspend() -> std::suspend_always; 27 | auto final_suspend() noexcept -> std::suspend_never; 28 | auto return_void() noexcept -> void; 29 | auto unhandled_exception() -> void; 30 | 31 | auto executor_size(std::atomic& task_container_size) -> void; 32 | 33 | private: 34 | /** 35 | * The executor m_size member to decrement upon the coroutine completing. 36 | */ 37 | std::atomic* m_executor_size{nullptr}; 38 | }; 39 | 40 | /** 41 | * This task will self delete upon completing. This is useful for usecase that the lifetime of the 42 | * coroutine cannot be determined and it needs to 'self' delete. This is achieved by returning 43 | * std::suspend_never from the promise::final_suspend which then based on the spec tells the 44 | * coroutine to delete itself. This means any classes that use this task cannot have owning 45 | * pointers or relationships to this class and must not use it past its completion. 46 | * 47 | * This class is currently only used by coro::task_container and will decrement its 48 | * m_size internal count when the coroutine completes. 49 | */ 50 | class task_self_deleting 51 | { 52 | public: 53 | using promise_type = promise_self_deleting; 54 | 55 | explicit task_self_deleting(promise_self_deleting& promise); 56 | ~task_self_deleting(); 57 | 58 | task_self_deleting(const task_self_deleting&) = delete; 59 | task_self_deleting(task_self_deleting&&); 60 | auto operator=(const task_self_deleting&) -> task_self_deleting& = delete; 61 | auto operator=(task_self_deleting&&) -> task_self_deleting&; 62 | 63 | auto promise() -> promise_self_deleting& { return *m_promise; } 64 | auto handle() -> std::coroutine_handle 65 | { 66 | return std::coroutine_handle::from_promise(*m_promise); 67 | } 68 | 69 | auto resume() -> bool 70 | { 71 | auto h = handle(); 72 | if (!h.done()) 73 | { 74 | h.resume(); 75 | } 76 | return !h.done(); 77 | } 78 | 79 | private: 80 | promise_self_deleting* m_promise{nullptr}; 81 | }; 82 | 83 | /** 84 | * Turns a coro::task into a self deleting task (detached). 85 | */ 86 | auto make_task_self_deleting(coro::task user_task) -> task_self_deleting; 87 | 88 | } // namespace coro::detail 89 | -------------------------------------------------------------------------------- /include/coro/detail/void_value.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | namespace coro::detail 4 | { 5 | struct void_value 6 | { 7 | }; 8 | 9 | } // namespace coro::detail 10 | -------------------------------------------------------------------------------- /include/coro/event.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/concepts/executor.hpp" 4 | 5 | #include 6 | #include 7 | 8 | namespace coro 9 | { 10 | enum class resume_order_policy 11 | { 12 | /// Last in first out, this is the default policy and will execute the fastest 13 | /// if you do not need the first waiter to execute first upon the event being set. 14 | lifo, 15 | /// First in first out, this policy has an extra overhead to reverse the order of 16 | /// the waiters but will guarantee the ordering is fifo. 17 | fifo 18 | }; 19 | 20 | /** 21 | * Event is a manully triggered thread safe signal that can be co_await()'ed by multiple awaiters. 22 | * Each awaiter should co_await the event and upon the event being set each awaiter will have their 23 | * coroutine resumed. 24 | * 25 | * The event can be manually reset to the un-set state to be re-used. 26 | * \code 27 | t1: coro::event e; 28 | ... 29 | t2: func(coro::event& e) { ... co_await e; ... } 30 | ... 31 | t1: do_work(); 32 | t1: e.set(); 33 | ... 34 | t2: resume() 35 | * \endcode 36 | */ 37 | class event 38 | { 39 | public: 40 | struct awaiter 41 | { 42 | /** 43 | * @param e The event to wait for it to be set. 44 | */ 45 | awaiter(const event& e) noexcept : m_event(e) {} 46 | 47 | /** 48 | * @return True if the event is already set, otherwise false to suspend this coroutine. 49 | */ 50 | auto await_ready() const noexcept -> bool { return m_event.is_set(); } 51 | 52 | /** 53 | * Adds this coroutine to the list of awaiters in a thread safe fashion. If the event 54 | * is set while attempting to add this coroutine to the awaiters then this will return false 55 | * to resume execution immediately. 56 | * @return False if the event is already set, otherwise true to suspend this coroutine. 57 | */ 58 | auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool; 59 | 60 | /** 61 | * Nothing to do on resume. 62 | */ 63 | auto await_resume() noexcept {} 64 | 65 | /// Refernce to the event that this awaiter is waiting on. 66 | const event& m_event; 67 | /// The awaiting continuation coroutine handle. 68 | std::coroutine_handle<> m_awaiting_coroutine; 69 | /// The next awaiter in line for this event, nullptr if this is the end. 70 | awaiter* m_next{nullptr}; 71 | }; 72 | 73 | /** 74 | * Creates an event with the given initial state of being set or not set. 75 | * @param initially_set By default all events start as not set, but if needed this parameter can 76 | * set the event to already be triggered. 77 | */ 78 | explicit event(bool initially_set = false) noexcept; 79 | ~event() = default; 80 | 81 | event(const event&) = delete; 82 | event(event&&) = delete; 83 | auto operator=(const event&) -> event& = delete; 84 | auto operator=(event&&) -> event& = delete; 85 | 86 | /** 87 | * @return True if this event is currently in the set state. 88 | */ 89 | auto is_set() const noexcept -> bool { return m_state.load(std::memory_order::acquire) == this; } 90 | 91 | /** 92 | * Sets this event and resumes all awaiters. Note that all waiters will be resumed onto this 93 | * thread of execution. 94 | * @param policy The order in which the waiters should be resumed, defaults to LIFO since it 95 | * is more efficient, FIFO requires reversing the order of the waiters first. 96 | */ 97 | auto set(resume_order_policy policy = resume_order_policy::lifo) noexcept -> void; 98 | 99 | /** 100 | * Sets this event and resumes all awaiters onto the given executor. This will distribute 101 | * the waiters across the executor's threads. 102 | */ 103 | template 104 | auto set(executor_type& e, resume_order_policy policy = resume_order_policy::lifo) noexcept -> void 105 | { 106 | void* old_value = m_state.exchange(this, std::memory_order::acq_rel); 107 | if (old_value != this) 108 | { 109 | // If FIFO has been requsted then reverse the order upon resuming. 110 | if (policy == resume_order_policy::fifo) 111 | { 112 | old_value = reverse(static_cast(old_value)); 113 | } 114 | // else lifo nothing to do 115 | 116 | auto* waiters = static_cast(old_value); 117 | while (waiters != nullptr) 118 | { 119 | auto* next = waiters->m_next; 120 | e.resume(waiters->m_awaiting_coroutine); 121 | waiters = next; 122 | } 123 | } 124 | } 125 | 126 | /** 127 | * @return An awaiter struct to suspend and resume this coroutine for when the event is set. 128 | */ 129 | auto operator co_await() const noexcept -> awaiter { return awaiter(*this); } 130 | 131 | /** 132 | * Resets the event from set to not set so it can be re-used. If the event is not currently 133 | * set then this function has no effect. 134 | */ 135 | auto reset() noexcept -> void; 136 | 137 | protected: 138 | /// For access to m_state. 139 | friend struct awaiter; 140 | /// The state of the event, nullptr is not set with zero awaiters. Set to an awaiter* there are 141 | /// coroutines awaiting the event to be set, and set to this the event has triggered. 142 | /// 1) nullptr == not set 143 | /// 2) awaiter* == linked list of awaiters waiting for the event to trigger. 144 | /// 3) this == The event is triggered and all awaiters are resumed. 145 | mutable std::atomic m_state; 146 | 147 | private: 148 | /** 149 | * Reverses the set of waiters from LIFO->FIFO and returns the new head. 150 | */ 151 | auto reverse(awaiter* head) -> awaiter*; 152 | }; 153 | 154 | } // namespace coro 155 | -------------------------------------------------------------------------------- /include/coro/expected.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #if (__cpp_lib_expected) 4 | #include 5 | namespace coro 6 | { 7 | template 8 | using expected = std::expected; 9 | 10 | template 11 | using unexpected = std::unexpected; 12 | } // namespace coro 13 | #else 14 | #include "coro/detail/tl_expected.hpp" 15 | namespace coro 16 | { 17 | template 18 | using expected = tl::expected; 19 | 20 | template 21 | using unexpected = tl::unexpected; 22 | } // namespace coro 23 | #endif 24 | -------------------------------------------------------------------------------- /include/coro/fd.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | namespace coro 4 | { 5 | using fd_t = int; 6 | 7 | } // namespace coro 8 | -------------------------------------------------------------------------------- /include/coro/generator.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace coro 9 | { 10 | template 11 | class generator; 12 | 13 | namespace detail 14 | { 15 | template 16 | class generator_promise 17 | { 18 | public: 19 | using value_type = std::remove_reference_t; 20 | using reference_type = std::conditional_t, T, T&>; 21 | using pointer_type = value_type*; 22 | 23 | generator_promise() = default; 24 | 25 | auto get_return_object() noexcept -> generator; 26 | 27 | auto initial_suspend() const { return std::suspend_always{}; } 28 | 29 | auto final_suspend() const noexcept(true) { return std::suspend_always{}; } 30 | 31 | template::value, int> = 0> 32 | auto yield_value(std::remove_reference_t& value) noexcept 33 | { 34 | m_value = std::addressof(value); 35 | return std::suspend_always{}; 36 | } 37 | 38 | auto yield_value(std::remove_reference_t&& value) noexcept 39 | { 40 | m_value = std::addressof(value); 41 | return std::suspend_always{}; 42 | } 43 | 44 | auto unhandled_exception() -> void { m_exception = std::current_exception(); } 45 | 46 | auto return_void() noexcept -> void {} 47 | 48 | auto value() const noexcept -> reference_type { return static_cast(*m_value); } 49 | 50 | template 51 | auto await_transform(U&& value) -> std::suspend_never = delete; 52 | 53 | auto rethrow_if_exception() -> void 54 | { 55 | if (m_exception) 56 | { 57 | std::rethrow_exception(m_exception); 58 | } 59 | } 60 | 61 | private: 62 | pointer_type m_value{nullptr}; 63 | std::exception_ptr m_exception; 64 | }; 65 | 66 | struct generator_sentinel 67 | { 68 | }; 69 | 70 | template 71 | class generator_iterator 72 | { 73 | using coroutine_handle = std::coroutine_handle>; 74 | 75 | public: 76 | using iterator_category = std::input_iterator_tag; 77 | using difference_type = std::ptrdiff_t; 78 | using value_type = typename generator_promise::value_type; 79 | using reference = typename generator_promise::reference_type; 80 | using pointer = typename generator_promise::pointer_type; 81 | 82 | generator_iterator() noexcept {} 83 | 84 | explicit generator_iterator(coroutine_handle coroutine) noexcept : m_coroutine(coroutine) {} 85 | 86 | friend auto operator==(const generator_iterator& it, generator_sentinel) noexcept -> bool 87 | { 88 | return it.m_coroutine == nullptr || it.m_coroutine.done(); 89 | } 90 | 91 | friend auto operator!=(const generator_iterator& it, generator_sentinel s) noexcept -> bool { return !(it == s); } 92 | 93 | friend auto operator==(generator_sentinel s, const generator_iterator& it) noexcept -> bool { return (it == s); } 94 | 95 | friend auto operator!=(generator_sentinel s, const generator_iterator& it) noexcept -> bool { return it != s; } 96 | 97 | generator_iterator& operator++() 98 | { 99 | m_coroutine.resume(); 100 | if (m_coroutine.done()) 101 | { 102 | m_coroutine.promise().rethrow_if_exception(); 103 | } 104 | 105 | return *this; 106 | } 107 | 108 | auto operator++(int) -> void { (void)operator++(); } 109 | 110 | reference operator*() const noexcept { return m_coroutine.promise().value(); } 111 | 112 | pointer operator->() const noexcept { return std::addressof(operator*()); } 113 | 114 | private: 115 | coroutine_handle m_coroutine{nullptr}; 116 | }; 117 | 118 | } // namespace detail 119 | 120 | template 121 | class generator : public std::ranges::view_base 122 | { 123 | public: 124 | using promise_type = detail::generator_promise; 125 | using iterator = detail::generator_iterator; 126 | using sentinel = detail::generator_sentinel; 127 | 128 | generator() noexcept : m_coroutine(nullptr) {} 129 | 130 | generator(const generator&) = delete; 131 | generator(generator&& other) noexcept : m_coroutine(other.m_coroutine) { other.m_coroutine = nullptr; } 132 | 133 | auto operator=(const generator&) = delete; 134 | auto operator=(generator&& other) noexcept -> generator& 135 | { 136 | m_coroutine = other.m_coroutine; 137 | other.m_coroutine = nullptr; 138 | 139 | return *this; 140 | } 141 | 142 | ~generator() 143 | { 144 | if (m_coroutine) 145 | { 146 | m_coroutine.destroy(); 147 | } 148 | } 149 | 150 | auto begin() -> iterator 151 | { 152 | if (m_coroutine != nullptr) 153 | { 154 | m_coroutine.resume(); 155 | if (m_coroutine.done()) 156 | { 157 | m_coroutine.promise().rethrow_if_exception(); 158 | } 159 | } 160 | 161 | return iterator{m_coroutine}; 162 | } 163 | 164 | auto end() noexcept -> sentinel { return sentinel{}; } 165 | 166 | private: 167 | friend class detail::generator_promise; 168 | 169 | explicit generator(std::coroutine_handle coroutine) noexcept : m_coroutine(coroutine) {} 170 | 171 | std::coroutine_handle m_coroutine; 172 | }; 173 | 174 | namespace detail 175 | { 176 | template 177 | auto generator_promise::get_return_object() noexcept -> generator 178 | { 179 | return generator{std::coroutine_handle>::from_promise(*this)}; 180 | } 181 | 182 | } // namespace detail 183 | 184 | } // namespace coro 185 | -------------------------------------------------------------------------------- /include/coro/latch.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/event.hpp" 4 | #include "coro/thread_pool.hpp" 5 | 6 | #include 7 | 8 | namespace coro 9 | { 10 | /** 11 | * The latch is thread safe counter to wait for 1 or more other tasks to complete, they signal their 12 | * completion by calling `count_down()` on the latch and upon the latch counter reaching zero the 13 | * coroutine `co_await`ing the latch then resumes execution. 14 | * 15 | * This is useful for spawning many worker tasks to complete either a computationally complex task 16 | * across a thread pool of workers, or waiting for many asynchronous results like http requests 17 | * to complete. 18 | */ 19 | class latch 20 | { 21 | public: 22 | /** 23 | * Creates a latch with the given count of tasks to wait to complete. 24 | * @param count The number of tasks to wait to complete, if this is zero or negative then the 25 | * latch starts 'completed' immediately and execution is resumed with no suspension. 26 | */ 27 | latch(std::int64_t count) noexcept : m_count(count), m_event(count <= 0) {} 28 | 29 | latch(const latch&) = delete; 30 | latch(latch&&) = delete; 31 | auto operator=(const latch&) -> latch& = delete; 32 | auto operator=(latch&&) -> latch& = delete; 33 | 34 | /** 35 | * @return True if the latch has been counted down to zero. 36 | */ 37 | auto is_ready() const noexcept -> bool { return m_event.is_set(); } 38 | 39 | /** 40 | * @return The number of tasks this latch is still waiting to complete. 41 | */ 42 | auto remaining() const noexcept -> std::size_t { return m_count.load(std::memory_order::acquire); } 43 | 44 | /** 45 | * If the latch counter goes to zero then the task awaiting the latch is resumed. 46 | * @param n The number of tasks to complete towards the latch, defaults to 1. 47 | */ 48 | auto count_down(std::int64_t n = 1) noexcept -> void 49 | { 50 | if (m_count.fetch_sub(n, std::memory_order::acq_rel) <= n) 51 | { 52 | m_event.set(); 53 | } 54 | } 55 | 56 | /** 57 | * If the latch counter goes to zero then the task awaiting the latch is resumed on the given 58 | * thread pool. 59 | * @param tp The thread pool to schedule the task that is waiting on the latch on. 60 | * @param n The number of tasks to complete towards the latch, defaults to 1. 61 | */ 62 | template 63 | auto count_down(executor_type& executor, std::int64_t n = 1) noexcept -> void 64 | { 65 | if (m_count.fetch_sub(n, std::memory_order::acq_rel) <= n) 66 | { 67 | m_event.set(executor); 68 | } 69 | } 70 | 71 | auto operator co_await() const noexcept -> event::awaiter { return m_event.operator co_await(); } 72 | 73 | private: 74 | /// The number of tasks to wait for completion before triggering the event to resume. 75 | std::atomic m_count; 76 | /// The event to trigger when the latch counter reaches zero, this resumes the coroutine that 77 | /// is co_await'ing on the latch. 78 | event m_event; 79 | }; 80 | 81 | } // namespace coro 82 | -------------------------------------------------------------------------------- /include/coro/mutex.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/task.hpp" 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace coro 11 | { 12 | class mutex; 13 | class scoped_lock; 14 | class condition_variable; 15 | 16 | namespace detail 17 | { 18 | 19 | struct lock_operation_base 20 | { 21 | explicit lock_operation_base(coro::mutex& m) : m_mutex(m) {} 22 | virtual ~lock_operation_base() = default; 23 | 24 | lock_operation_base(const lock_operation_base&) = delete; 25 | lock_operation_base(lock_operation_base&&) = delete; 26 | auto operator=(const lock_operation_base&) -> lock_operation_base& = delete; 27 | auto operator=(lock_operation_base&&) -> lock_operation_base& = delete; 28 | 29 | auto await_ready() const noexcept -> bool; 30 | auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool; 31 | 32 | protected: 33 | friend class coro::mutex; 34 | 35 | coro::mutex& m_mutex; 36 | std::coroutine_handle<> m_awaiting_coroutine; 37 | lock_operation_base* m_next{nullptr}; 38 | }; 39 | 40 | template 41 | struct lock_operation : public lock_operation_base 42 | { 43 | explicit lock_operation(coro::mutex& m) : lock_operation_base(m) {} 44 | ~lock_operation() override = default; 45 | 46 | lock_operation(const lock_operation&) = delete; 47 | lock_operation(lock_operation&&) = delete; 48 | auto operator=(const lock_operation&) -> lock_operation& = delete; 49 | auto operator=(lock_operation&&) -> lock_operation& = delete; 50 | 51 | auto await_resume() noexcept -> return_type 52 | { 53 | if constexpr (std::is_same_v) 54 | { 55 | return scoped_lock{m_mutex}; 56 | } 57 | else 58 | { 59 | return; 60 | } 61 | } 62 | }; 63 | 64 | } // namespace detail 65 | 66 | /** 67 | * A scoped RAII lock holder similar to std::unique_lock. 68 | */ 69 | class scoped_lock 70 | { 71 | friend class coro::mutex; 72 | friend class coro::condition_variable; // cv.wait() functions need to be able do unlock and re-lock 73 | 74 | public: 75 | enum class lock_strategy 76 | { 77 | /// The lock is already acquired, adopt it as the new owner. 78 | adopt 79 | }; 80 | 81 | explicit scoped_lock(class coro::mutex& m, lock_strategy strategy = lock_strategy::adopt) : m_mutex(&m) 82 | { 83 | // Future -> support acquiring the lock? Not sure how to do that without being able to 84 | // co_await in the constructor. 85 | (void)strategy; 86 | } 87 | 88 | /** 89 | * Unlocks the mutex upon this shared lock destructing. 90 | */ 91 | ~scoped_lock(); 92 | 93 | scoped_lock(const scoped_lock&) = delete; 94 | scoped_lock(scoped_lock&& other) 95 | : m_mutex(std::exchange(other.m_mutex, nullptr)) {} 96 | auto operator=(const scoped_lock&) -> scoped_lock& = delete; 97 | auto operator=(scoped_lock&& other) noexcept -> scoped_lock& 98 | { 99 | if (std::addressof(other) != this) 100 | { 101 | m_mutex = std::exchange(other.m_mutex, nullptr); 102 | } 103 | return *this; 104 | } 105 | 106 | /** 107 | * Unlocks the scoped lock prior to it going out of scope. 108 | */ 109 | auto unlock() -> void; 110 | 111 | private: 112 | class coro::mutex* m_mutex{nullptr}; 113 | }; 114 | 115 | class mutex 116 | { 117 | public: 118 | explicit mutex() noexcept : m_state(const_cast(unlocked_value())) {} 119 | ~mutex() = default; 120 | 121 | mutex(const mutex&) = delete; 122 | mutex(mutex&&) = delete; 123 | auto operator=(const mutex&) -> mutex& = delete; 124 | auto operator=(mutex&&) -> mutex& = delete; 125 | 126 | /** 127 | * @brief To acquire the mutex's lock co_await this function. Upon acquiring the lock it returns a coro::scoped_lock 128 | * which will hold the mutex until the coro::scoped_lock destructs. 129 | * @return A co_await'able operation to acquire the mutex. 130 | */ 131 | [[nodiscard]] auto scoped_lock() -> detail::lock_operation { return detail::lock_operation{*this}; } 132 | 133 | /** 134 | * @brief Locks the mutex. 135 | * 136 | * @return detail::lock_operation 137 | */ 138 | [[nodiscard]] auto lock() -> detail::lock_operation { return detail::lock_operation{*this}; } 139 | 140 | /** 141 | * Attempts to lock the mutex. 142 | * @return True if the mutex lock was acquired, otherwise false. 143 | */ 144 | [[nodiscard]] auto try_lock() -> bool; 145 | 146 | /** 147 | * Releases the mutex's lock. 148 | */ 149 | auto unlock() -> void; 150 | 151 | private: 152 | friend struct detail::lock_operation_base; 153 | 154 | /// unlocked -> state == unlocked_value() 155 | /// locked but empty waiter list == nullptr 156 | /// locked with waiters == lock_operation* 157 | std::atomic m_state; 158 | 159 | /// A list of grabbed internal waiters that are only accessed by the unlock()'er. 160 | detail::lock_operation_base* m_internal_waiters{nullptr}; 161 | 162 | /// Inactive value, this cannot be nullptr since we want nullptr to signify that the mutex 163 | /// is locked but there are zero waiters, this makes it easy to CAS new waiters into the 164 | /// m_state linked list. 165 | auto unlocked_value() const noexcept -> const void* { return &m_state; } 166 | }; 167 | 168 | } // namespace coro 169 | -------------------------------------------------------------------------------- /include/coro/net/connect.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace coro::net 6 | { 7 | enum class connect_status 8 | { 9 | /// The connection has been established. 10 | connected, 11 | /// The given ip address could not be parsed or is invalid. 12 | invalid_ip_address, 13 | /// The connection operation timed out. 14 | timeout, 15 | /// There was an error, use errno to get more information on the specific error. 16 | error 17 | }; 18 | 19 | /** 20 | * @param status String representation of the connection status. 21 | * @throw std::logic_error If provided an invalid connect_status enum value. 22 | */ 23 | auto to_string(const connect_status& status) -> const std::string&; 24 | 25 | } // namespace coro::net 26 | -------------------------------------------------------------------------------- /include/coro/net/hostname.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace coro::net 6 | { 7 | class hostname 8 | { 9 | public: 10 | hostname() = default; 11 | explicit hostname(std::string hn) : m_hostname(std::move(hn)) {} 12 | hostname(const hostname&) = default; 13 | hostname(hostname&&) = default; 14 | auto operator=(const hostname&) noexcept -> hostname& = default; 15 | auto operator=(hostname&&) noexcept -> hostname& = default; 16 | ~hostname() = default; 17 | 18 | auto data() const -> const std::string& { return m_hostname; } 19 | 20 | auto operator<=>(const hostname& other) const { return m_hostname <=> other.m_hostname; } 21 | 22 | private: 23 | std::string m_hostname; 24 | }; 25 | 26 | } // namespace coro::net 27 | -------------------------------------------------------------------------------- /include/coro/net/ip_address.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace coro::net 11 | { 12 | enum class domain_t : int 13 | { 14 | ipv4 = AF_INET, 15 | ipv6 = AF_INET6 16 | }; 17 | 18 | auto to_string(domain_t domain) -> const std::string&; 19 | 20 | class ip_address 21 | { 22 | public: 23 | static const constexpr size_t ipv4_len{4}; 24 | static const constexpr size_t ipv6_len{16}; 25 | 26 | ip_address() = default; 27 | ip_address(std::span binary_address, domain_t domain = domain_t::ipv4) : m_domain(domain) 28 | { 29 | if (m_domain == domain_t::ipv4 && binary_address.size() > ipv4_len) 30 | { 31 | throw std::runtime_error{"coro::net::ip_address provided binary ip address is too long"}; 32 | } 33 | else if (binary_address.size() > ipv6_len) 34 | { 35 | throw std::runtime_error{"coro::net::ip_address provided binary ip address is too long"}; 36 | } 37 | 38 | std::copy(binary_address.begin(), binary_address.end(), m_data.begin()); 39 | } 40 | ip_address(const ip_address&) = default; 41 | ip_address(ip_address&&) = default; 42 | auto operator=(const ip_address&) noexcept -> ip_address& = default; 43 | auto operator=(ip_address&&) noexcept -> ip_address& = default; 44 | ~ip_address() = default; 45 | 46 | auto domain() const -> domain_t { return m_domain; } 47 | auto data() const -> std::span 48 | { 49 | if (m_domain == domain_t::ipv4) 50 | { 51 | return std::span{m_data.data(), ipv4_len}; 52 | } 53 | else 54 | { 55 | return std::span{m_data.data(), ipv6_len}; 56 | } 57 | } 58 | 59 | static auto from_string(const std::string& address, domain_t domain = domain_t::ipv4) -> ip_address 60 | { 61 | ip_address addr{}; 62 | addr.m_domain = domain; 63 | 64 | auto success = inet_pton(static_cast(addr.m_domain), address.data(), addr.m_data.data()); 65 | if (success != 1) 66 | { 67 | throw std::runtime_error{"coro::net::ip_address faild to convert from string"}; 68 | } 69 | 70 | return addr; 71 | } 72 | 73 | auto to_string() const -> std::string 74 | { 75 | std::string output; 76 | if (m_domain == domain_t::ipv4) 77 | { 78 | output.resize(INET_ADDRSTRLEN, '\0'); 79 | } 80 | else 81 | { 82 | output.resize(INET6_ADDRSTRLEN, '\0'); 83 | } 84 | 85 | auto success = inet_ntop(static_cast(m_domain), m_data.data(), output.data(), output.length()); 86 | if (success != nullptr) 87 | { 88 | auto len = strnlen(success, output.length()); 89 | output.resize(len); 90 | } 91 | else 92 | { 93 | throw std::runtime_error{"coro::net::ip_address failed to convert to string representation"}; 94 | } 95 | 96 | return output; 97 | } 98 | 99 | auto operator<=>(const ip_address& other) const = default; 100 | 101 | private: 102 | domain_t m_domain{domain_t::ipv4}; 103 | std::array m_data{}; 104 | }; 105 | 106 | } // namespace coro::net 107 | -------------------------------------------------------------------------------- /include/coro/net/recv_status.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace coro::net 8 | { 9 | enum class recv_status : int64_t 10 | { 11 | ok = 0, 12 | /// The peer closed the socket. 13 | closed = -1, 14 | /// The udp socket has not been bind()'ed to a local port. 15 | udp_not_bound = -2, 16 | try_again = EAGAIN, 17 | // Note: that only the tcp::client will return this, a tls::client returns the specific ssl_would_block_* status'. 18 | would_block = EWOULDBLOCK, 19 | bad_file_descriptor = EBADF, 20 | connection_refused = ECONNREFUSED, 21 | memory_fault = EFAULT, 22 | interrupted = EINTR, 23 | invalid_argument = EINVAL, 24 | no_memory = ENOMEM, 25 | not_connected = ENOTCONN, 26 | not_a_socket = ENOTSOCK, 27 | }; 28 | 29 | auto to_string(recv_status status) -> const std::string&; 30 | 31 | } // namespace coro::net 32 | -------------------------------------------------------------------------------- /include/coro/net/send_status.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace coro::net 7 | { 8 | enum class send_status : int64_t 9 | { 10 | ok = 0, 11 | closed = -1, 12 | permission_denied = EACCES, 13 | try_again = EAGAIN, 14 | would_block = EWOULDBLOCK, 15 | already_in_progress = EALREADY, 16 | bad_file_descriptor = EBADF, 17 | connection_reset = ECONNRESET, 18 | no_peer_address = EDESTADDRREQ, 19 | memory_fault = EFAULT, 20 | interrupted = EINTR, 21 | is_connection = EISCONN, 22 | message_size = EMSGSIZE, 23 | output_queue_full = ENOBUFS, 24 | no_memory = ENOMEM, 25 | not_connected = ENOTCONN, 26 | not_a_socket = ENOTSOCK, 27 | operationg_not_supported = EOPNOTSUPP, 28 | pipe_closed = EPIPE, 29 | }; 30 | 31 | } // namespace coro::net 32 | -------------------------------------------------------------------------------- /include/coro/net/socket.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/net/ip_address.hpp" 4 | #include "coro/poll.hpp" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | 14 | namespace coro::net 15 | { 16 | class socket 17 | { 18 | public: 19 | enum class type_t 20 | { 21 | /// udp datagram socket 22 | udp, 23 | /// tcp streaming socket 24 | tcp 25 | }; 26 | 27 | enum class blocking_t 28 | { 29 | /// This socket should block on system calls. 30 | yes, 31 | /// This socket should not block on system calls. 32 | no 33 | }; 34 | 35 | struct options 36 | { 37 | /// The domain for the socket. 38 | domain_t domain; 39 | /// The type of socket. 40 | type_t type; 41 | /// If the socket should be blocking or non-blocking. 42 | blocking_t blocking; 43 | }; 44 | 45 | static auto type_to_os(type_t type) -> int; 46 | 47 | socket() = default; 48 | explicit socket(int fd) : m_fd(fd) {} 49 | 50 | socket(const socket& other) : m_fd(dup(other.m_fd)) {} 51 | socket(socket&& other) : m_fd(std::exchange(other.m_fd, -1)) {} 52 | auto operator=(const socket& other) noexcept -> socket&; 53 | auto operator=(socket&& other) noexcept -> socket&; 54 | 55 | ~socket() { close(); } 56 | 57 | /** 58 | * This function returns true if the socket's file descriptor is a valid number, however it does 59 | * not imply if the socket is still usable. 60 | * @return True if the socket file descriptor is > 0. 61 | */ 62 | auto is_valid() const -> bool { return m_fd != -1; } 63 | 64 | /** 65 | * @param block Sets the socket to the given blocking mode. 66 | */ 67 | auto blocking(blocking_t block) -> bool; 68 | 69 | /** 70 | * @param how Shuts the socket down with the given operations. 71 | * @param Returns true if the sockets given operations were shutdown. 72 | */ 73 | auto shutdown(poll_op how = poll_op::read_write) -> bool; 74 | 75 | /** 76 | * Closes the socket and sets this socket to an invalid state. 77 | */ 78 | auto close() -> void; 79 | 80 | /** 81 | * @return The native handle (file descriptor) for this socket. 82 | */ 83 | auto native_handle() const -> int { return m_fd; } 84 | 85 | private: 86 | int m_fd{-1}; 87 | }; 88 | 89 | /** 90 | * Creates a socket with the given socket options, this typically is used for creating sockets to 91 | * use within client objects, e.g. tcp::client and udp::client. 92 | * @param opts See socket::options for more details. 93 | */ 94 | auto make_socket(const socket::options& opts) -> socket; 95 | 96 | /** 97 | * Creates a socket that can accept connections or packets with the given socket options, address, 98 | * port and backlog. This is used for creating sockets to use within server objects, e.g. 99 | * tcp::server and udp::server. 100 | * @param opts See socket::options for more details 101 | * @param address The ip address to bind to. If the type of socket is tcp then it will also listen. 102 | * @param port The port to bind to. 103 | * @param backlog If the type of socket is tcp then the backlog of connections to allow. Does nothing 104 | * for udp types. 105 | */ 106 | auto make_accept_socket( 107 | const socket::options& opts, const net::ip_address& address, uint16_t port, int32_t backlog = 128) -> socket; 108 | 109 | } // namespace coro::net 110 | -------------------------------------------------------------------------------- /include/coro/net/tcp/server.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/net/ip_address.hpp" 4 | #include "coro/net/socket.hpp" 5 | #include "coro/net/tcp/client.hpp" 6 | #include "coro/task.hpp" 7 | 8 | #include 9 | #include 10 | 11 | namespace coro 12 | { 13 | class io_scheduler; 14 | } // namespace coro 15 | 16 | namespace coro::net::tcp 17 | { 18 | 19 | class server 20 | { 21 | public: 22 | struct options 23 | { 24 | /// The ip address for the tcp server to bind and listen on. 25 | net::ip_address address{net::ip_address::from_string("0.0.0.0")}; 26 | /// The port for the tcp server to bind and listen on. 27 | uint16_t port{8080}; 28 | /// The kernel backlog of connections to buffer. 29 | int32_t backlog{128}; 30 | }; 31 | 32 | explicit server( 33 | std::shared_ptr scheduler, 34 | options opts = options{ 35 | .address = net::ip_address::from_string("0.0.0.0"), 36 | .port = 8080, 37 | .backlog = 128, 38 | }); 39 | 40 | server(const server&) = delete; 41 | server(server&& other); 42 | auto operator=(const server&) -> server& = delete; 43 | auto operator=(server&& other) -> server&; 44 | ~server() = default; 45 | 46 | /** 47 | * Polls for new incoming tcp connections. 48 | * @param timeout How long to wait for a new connection before timing out, zero waits indefinitely. 49 | * @return The result of the poll, 'event' means the poll was successful and there is at least 1 50 | * connection ready to be accepted. 51 | */ 52 | auto poll(std::chrono::milliseconds timeout = std::chrono::milliseconds{0}) -> coro::task 53 | { 54 | return m_io_scheduler->poll(m_accept_socket, coro::poll_op::read, timeout); 55 | } 56 | 57 | /** 58 | * Accepts an incoming tcp client connection. On failure the tls clients socket will be set to 59 | * and invalid state, use the socket.is_value() to verify the client was correctly accepted. 60 | * @return The newly connected tcp client connection. 61 | */ 62 | auto accept() -> coro::net::tcp::client; 63 | 64 | private: 65 | friend client; 66 | /// The io scheduler for awaiting new connections. 67 | std::shared_ptr m_io_scheduler{nullptr}; 68 | /// The bind and listen options for this server. 69 | options m_options; 70 | /// The socket for accepting new tcp connections on. 71 | net::socket m_accept_socket{-1}; 72 | }; 73 | 74 | } // namespace coro::net::tcp 75 | -------------------------------------------------------------------------------- /include/coro/net/tls/connection_status.hpp: -------------------------------------------------------------------------------- 1 | #ifdef LIBCORO_FEATURE_TLS 2 | 3 | #pragma once 4 | 5 | #include 6 | 7 | namespace coro::net::tls 8 | { 9 | enum class connection_status 10 | { 11 | /// The tls connection was successful. 12 | connected, 13 | /// The connection hasn't been established yet, use connect() prior to the handshake(). 14 | not_connected, 15 | /// The connection needs a coro::net::tls::context to perform the handshake. 16 | context_required, 17 | /// The internal ssl memory alocation failed. 18 | resource_allocation_failed, 19 | /// Attempting to set the connections ssl socket/file descriptor failed. 20 | set_fd_failure, 21 | /// The handshake had an error. 22 | handshake_failed, 23 | /// The connection timed out. 24 | timeout, 25 | /// An error occurred while polling for read or write operations on the socket. 26 | poll_error, 27 | /// The socket was unexpectedly closed while attempting the handshake. 28 | unexpected_close, 29 | /// The given ip address could not be parsed or is invalid. 30 | invalid_ip_address, 31 | /// There was an unrecoverable error, use errno to get more information on the specific error. 32 | error 33 | }; 34 | 35 | auto to_string(connection_status status) -> const std::string&; 36 | 37 | } // namespace coro::net::tls 38 | 39 | #endif // #ifdef LIBCORO_FEATURE_TLS 40 | -------------------------------------------------------------------------------- /include/coro/net/tls/context.hpp: -------------------------------------------------------------------------------- 1 | #ifdef LIBCORO_FEATURE_TLS 2 | 3 | #pragma once 4 | 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace coro::net::tls 14 | { 15 | class client; 16 | 17 | enum class tls_file_type : int 18 | { 19 | /// The file is of type ASN1 20 | asn1 = SSL_FILETYPE_ASN1, 21 | /// The file is of type PEM 22 | pem = SSL_FILETYPE_PEM 23 | }; 24 | 25 | enum class verify_peer_t : int 26 | { 27 | yes, 28 | no 29 | }; 30 | 31 | /** 32 | * TLS context, used with client or server types to provide secure connections. 33 | */ 34 | class context 35 | { 36 | public: 37 | /** 38 | * Creates a context with no certificate and no private key, maybe useful for testing. 39 | * @param verify_peer Should the peer be verified? Defaults to true. 40 | */ 41 | explicit context(verify_peer_t verify_peer = verify_peer_t::yes); 42 | 43 | /** 44 | * Creates a context with the given certificate and the given private key. 45 | * @param certificate The location of the certificate file. 46 | * @param certificate_type See `tls_file_type`. 47 | * @param private_key The location of the private key file. 48 | * @param private_key_type See `tls_file_type`. 49 | * @param verify_perr Should the peer be verified? Defaults to true. 50 | */ 51 | context( 52 | std::filesystem::path certificate, 53 | tls_file_type certificate_type, 54 | std::filesystem::path private_key, 55 | tls_file_type private_key_type, 56 | verify_peer_t verify_peer = verify_peer_t::yes); 57 | ~context(); 58 | 59 | private: 60 | SSL_CTX* m_ssl_ctx{nullptr}; 61 | 62 | /// The following classes use the underlying SSL_CTX* object for performing SSL functions. 63 | friend client; 64 | 65 | auto native_handle() -> SSL_CTX* { return m_ssl_ctx; } 66 | auto native_handle() const -> const SSL_CTX* { return m_ssl_ctx; } 67 | }; 68 | 69 | } // namespace coro::net::tls 70 | 71 | #endif // #ifdef LIBCORO_FEATURE_TLS 72 | -------------------------------------------------------------------------------- /include/coro/net/tls/recv_status.hpp: -------------------------------------------------------------------------------- 1 | #ifdef LIBCORO_FEATURE_TLS 2 | 3 | #pragma once 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace coro::net::tls 11 | { 12 | 13 | enum class recv_status : int64_t 14 | { 15 | ok = SSL_ERROR_NONE, 16 | // The user provided an 0 length buffer. 17 | buffer_is_empty = -3, 18 | timeout = -4, 19 | /// The peer closed the socket. 20 | closed = SSL_ERROR_ZERO_RETURN, 21 | error = SSL_ERROR_SSL, 22 | want_read = SSL_ERROR_WANT_READ, 23 | want_write = SSL_ERROR_WANT_WRITE, 24 | want_connect = SSL_ERROR_WANT_CONNECT, 25 | want_accept = SSL_ERROR_WANT_ACCEPT, 26 | want_x509_lookup = SSL_ERROR_WANT_X509_LOOKUP, 27 | error_syscall = SSL_ERROR_SYSCALL, 28 | 29 | }; 30 | 31 | auto to_string(recv_status status) -> const std::string&; 32 | 33 | } // namespace coro::net::tls 34 | 35 | #endif // #ifdef LIBCORO_FEATURE_TLS -------------------------------------------------------------------------------- /include/coro/net/tls/send_status.hpp: -------------------------------------------------------------------------------- 1 | #ifdef LIBCORO_FEATURE_TLS 2 | 3 | #pragma once 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace coro::net::tls 11 | { 12 | 13 | enum class send_status : int64_t 14 | { 15 | ok = SSL_ERROR_NONE, 16 | // The user provided an 0 length buffer. 17 | buffer_is_empty = -3, 18 | // The operation timed out. 19 | timeout = -4, 20 | /// The peer closed the socket. 21 | closed = SSL_ERROR_ZERO_RETURN, 22 | error = SSL_ERROR_SSL, 23 | want_read = SSL_ERROR_WANT_READ, 24 | want_write = SSL_ERROR_WANT_WRITE, 25 | want_connect = SSL_ERROR_WANT_CONNECT, 26 | want_accept = SSL_ERROR_WANT_ACCEPT, 27 | want_x509_lookup = SSL_ERROR_WANT_X509_LOOKUP, 28 | error_syscall = SSL_ERROR_SYSCALL, 29 | 30 | }; 31 | 32 | auto to_string(send_status status) -> const std::string&; 33 | 34 | } // namespace coro::net::tls 35 | 36 | #endif // #ifdef LIBCORO_FEATURE_TLS -------------------------------------------------------------------------------- /include/coro/net/tls/server.hpp: -------------------------------------------------------------------------------- 1 | #ifdef LIBCORO_FEATURE_TLS 2 | 3 | #pragma once 4 | 5 | #include "coro/net/ip_address.hpp" 6 | #include "coro/net/socket.hpp" 7 | #include "coro/net/tls/client.hpp" 8 | #include "coro/task.hpp" 9 | 10 | #include 11 | #include 12 | 13 | namespace coro 14 | { 15 | class io_scheduler; 16 | } // namespace coro 17 | 18 | namespace coro::net::tls 19 | { 20 | class context; 21 | 22 | class server 23 | { 24 | public: 25 | struct options 26 | { 27 | /// The ip address for the tls server to bind and listen on. 28 | net::ip_address address{net::ip_address::from_string("0.0.0.0")}; 29 | /// The port for the tls server to bind and listen on. 30 | uint16_t port{8080}; 31 | /// The kernel backlog of connections to buffer. 32 | int32_t backlog{128}; 33 | }; 34 | 35 | explicit server( 36 | std::shared_ptr scheduler, 37 | std::shared_ptr tls_ctx, 38 | options opts = options{ 39 | .address = net::ip_address::from_string("0.0.0.0"), 40 | .port = 8080, 41 | .backlog = 128, 42 | }); 43 | 44 | server(const server&) = delete; 45 | server(server&& other); 46 | auto operator=(const server&) -> server& = delete; 47 | auto operator=(server&& other) -> server&; 48 | ~server() = default; 49 | 50 | /** 51 | * Polls for new incoming tcp connections. 52 | * @param timeout How long to wait for a new connection before timing out, zero waits indefinitely. 53 | * @return The result of the poll, 'event' means the poll was successful and there is at least 1 54 | * connection ready to be accepted. 55 | */ 56 | auto poll(std::chrono::milliseconds timeout = std::chrono::milliseconds{0}) -> coro::task 57 | { 58 | return m_io_scheduler->poll(m_accept_socket, coro::poll_op::read, timeout); 59 | } 60 | 61 | /** 62 | * Accepts an incoming tcp client connection. On failure the tcp clients socket will be set to 63 | * and invalid state, use the socket.is_value() to verify the client was correctly accepted. 64 | * @param timeout The timeout to complete the TLS handshake. 65 | * @return The newly connected tcp client connection. 66 | */ 67 | auto accept(std::chrono::milliseconds timeout = std::chrono::seconds{30}) -> coro::task; 68 | 69 | private: 70 | /// The io scheduler for awaiting new connections. 71 | std::shared_ptr m_io_scheduler{nullptr}; 72 | // The tls context. 73 | std::shared_ptr m_tls_ctx{nullptr}; 74 | /// The bind and listen options for this server. 75 | options m_options; 76 | /// The socket for accepting new tcp connections on. 77 | net::socket m_accept_socket{-1}; 78 | }; 79 | 80 | } // namespace coro::net::tls 81 | 82 | #endif // #ifdef LIBCORO_FEATURE_TLS 83 | -------------------------------------------------------------------------------- /include/coro/net/udp/peer.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "coro/concepts/buffer.hpp" 4 | #include "coro/io_scheduler.hpp" 5 | #include "coro/net/ip_address.hpp" 6 | #include "coro/net/recv_status.hpp" 7 | #include "coro/net/send_status.hpp" 8 | #include "coro/net/socket.hpp" 9 | #include "coro/task.hpp" 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | namespace coro 16 | { 17 | class io_scheduler; 18 | } // namespace coro 19 | 20 | namespace coro::net::udp 21 | { 22 | class peer 23 | { 24 | public: 25 | struct info 26 | { 27 | /// The ip address of the peer. 28 | net::ip_address address{net::ip_address::from_string("127.0.0.1")}; 29 | /// The port of the peer. 30 | uint16_t port{8080}; 31 | 32 | auto operator<=>(const info& other) const = default; 33 | }; 34 | 35 | /** 36 | * Creates a udp peer that can send packets but not receive them. This udp peer will not explicitly 37 | * bind to a local ip+port. 38 | */ 39 | explicit peer(std::shared_ptr scheduler, net::domain_t domain = net::domain_t::ipv4); 40 | 41 | /** 42 | * Creates a udp peer that can send and receive packets. This peer will bind to the given ip_port. 43 | */ 44 | explicit peer(std::shared_ptr scheduler, const info& bind_info); 45 | 46 | peer(const peer&) = default; 47 | peer(peer&&) = default; 48 | auto operator=(const peer&) noexcept -> peer& = default; 49 | auto operator=(peer&&) noexcept -> peer& = default; 50 | ~peer() = default; 51 | 52 | /** 53 | * @param op The poll operation to perform on the udp socket. Note that if this is a send only 54 | * udp socket (did not bind) then polling for read will not work. 55 | * @param timeout The timeout for the poll operation to be ready. 56 | * @return The result status of the poll operation. 57 | */ 58 | auto poll(poll_op op, std::chrono::milliseconds timeout = std::chrono::milliseconds{0}) 59 | -> coro::task 60 | { 61 | co_return co_await m_io_scheduler->poll(m_socket, op, timeout); 62 | } 63 | 64 | /** 65 | * @param peer_info The peer to send the data to. 66 | * @param buffer The data to send. 67 | * @return The status of send call and a span view of any data that wasn't sent. This data if 68 | * un-sent will correspond to bytes at the end of the given buffer. 69 | */ 70 | template 71 | auto sendto(const info& peer_info, const buffer_type& buffer) -> std::pair> 72 | { 73 | if (buffer.empty()) 74 | { 75 | return {send_status::ok, std::span{}}; 76 | } 77 | 78 | sockaddr_in peer{}; 79 | peer.sin_family = static_cast(peer_info.address.domain()); 80 | peer.sin_port = htons(peer_info.port); 81 | peer.sin_addr = *reinterpret_cast(peer_info.address.data().data()); 82 | 83 | socklen_t peer_len{sizeof(peer)}; 84 | 85 | auto bytes_sent = ::sendto( 86 | m_socket.native_handle(), buffer.data(), buffer.size(), 0, reinterpret_cast(&peer), peer_len); 87 | 88 | if (bytes_sent >= 0) 89 | { 90 | return {send_status::ok, std::span{buffer.data() + bytes_sent, buffer.size() - bytes_sent}}; 91 | } 92 | else 93 | { 94 | return {static_cast(errno), std::span{}}; 95 | } 96 | } 97 | 98 | /** 99 | * @param buffer The buffer to receive data into. 100 | * @return The receive status, if ok then also the peer who sent the data and the data. 101 | * The span view of the data will be set to the size of the received data, this will 102 | * always start at the beggining of the buffer but depending on how large the data was 103 | * it might not fill the entire buffer. 104 | */ 105 | template 106 | auto recvfrom(buffer_type&& buffer) -> std::tuple> 107 | { 108 | // The user must bind locally to be able to receive packets. 109 | if (!m_bound) 110 | { 111 | return {recv_status::udp_not_bound, peer::info{}, std::span{}}; 112 | } 113 | 114 | sockaddr_in peer{}; 115 | socklen_t peer_len{sizeof(peer)}; 116 | 117 | auto bytes_read = ::recvfrom( 118 | m_socket.native_handle(), buffer.data(), buffer.size(), 0, reinterpret_cast(&peer), &peer_len); 119 | 120 | if (bytes_read < 0) 121 | { 122 | return {static_cast(errno), peer::info{}, std::span{}}; 123 | } 124 | 125 | std::span ip_addr_view{ 126 | reinterpret_cast(&peer.sin_addr.s_addr), 127 | sizeof(peer.sin_addr.s_addr), 128 | }; 129 | 130 | return { 131 | recv_status::ok, 132 | peer::info{ 133 | .address = net::ip_address{ip_addr_view, static_cast(peer.sin_family)}, 134 | .port = ntohs(peer.sin_port)}, 135 | std::span{buffer.data(), static_cast(bytes_read)}}; 136 | } 137 | 138 | private: 139 | /// The scheduler that will drive this udp client. 140 | std::shared_ptr m_io_scheduler; 141 | /// The udp socket. 142 | net::socket m_socket{-1}; 143 | /// Did the user request this udp socket is bound locally to receive packets? 144 | bool m_bound{false}; 145 | }; 146 | 147 | } // namespace coro::net::udp 148 | -------------------------------------------------------------------------------- /include/coro/poll.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace coro 7 | { 8 | enum class poll_op : uint64_t 9 | { 10 | /// Poll for read operations. 11 | read = EPOLLIN, 12 | /// Poll for write operations. 13 | write = EPOLLOUT, 14 | /// Poll for read and write operations. 15 | read_write = EPOLLIN | EPOLLOUT 16 | }; 17 | 18 | inline auto poll_op_readable(poll_op op) -> bool 19 | { 20 | return (static_cast(op) & EPOLLIN); 21 | } 22 | 23 | inline auto poll_op_writeable(poll_op op) -> bool 24 | { 25 | return (static_cast(op) & EPOLLOUT); 26 | } 27 | 28 | auto to_string(poll_op op) -> const std::string&; 29 | 30 | enum class poll_status 31 | { 32 | /// The poll operation was was successful. 33 | event, 34 | /// The poll operation timed out. 35 | timeout, 36 | /// The file descriptor had an error while polling. 37 | error, 38 | /// The file descriptor has been closed by the remote or an internal error/close. 39 | closed 40 | }; 41 | 42 | auto to_string(poll_status status) -> const std::string&; 43 | 44 | } // namespace coro 45 | -------------------------------------------------------------------------------- /include/coro/semaphore.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | namespace coro 12 | { 13 | class semaphore 14 | { 15 | public: 16 | enum class acquire_result 17 | { 18 | acquired, 19 | semaphore_stopped 20 | }; 21 | 22 | static CORO_EXPORT std::string acquire_result_acquired; 23 | static CORO_EXPORT std::string acquire_result_semaphore_stopped; 24 | static CORO_EXPORT std::string acquire_result_unknown; 25 | 26 | static auto to_string(acquire_result ar) -> const std::string& 27 | { 28 | switch (ar) 29 | { 30 | case acquire_result::acquired: 31 | return acquire_result_acquired; 32 | case acquire_result::semaphore_stopped: 33 | return acquire_result_semaphore_stopped; 34 | } 35 | 36 | return acquire_result_unknown; 37 | } 38 | 39 | explicit semaphore(std::ptrdiff_t least_max_value_and_starting_value); 40 | explicit semaphore(std::ptrdiff_t least_max_value, std::ptrdiff_t starting_value); 41 | ~semaphore(); 42 | 43 | semaphore(const semaphore&) = delete; 44 | semaphore(semaphore&&) = delete; 45 | 46 | auto operator=(const semaphore&) noexcept -> semaphore& = delete; 47 | auto operator=(semaphore&&) noexcept -> semaphore& = delete; 48 | 49 | class acquire_operation 50 | { 51 | public: 52 | explicit acquire_operation(semaphore& s); 53 | 54 | auto await_ready() const noexcept -> bool; 55 | auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool; 56 | auto await_resume() const -> acquire_result; 57 | 58 | private: 59 | friend semaphore; 60 | 61 | semaphore& m_semaphore; 62 | std::coroutine_handle<> m_awaiting_coroutine; 63 | acquire_operation* m_next{nullptr}; 64 | }; 65 | 66 | auto release() -> void; 67 | 68 | /** 69 | * Acquires a resource from the semaphore, if the semaphore has no resources available then 70 | * this will wait until a resource becomes available. 71 | */ 72 | [[nodiscard]] auto acquire() -> acquire_operation { return acquire_operation{*this}; } 73 | 74 | /** 75 | * Attemtps to acquire a resource if there is any resources available. 76 | * @return True if the acquire operation was able to acquire a resource. 77 | */ 78 | auto try_acquire() -> bool; 79 | 80 | /** 81 | * @return The maximum number of resources the semaphore can contain. 82 | */ 83 | auto max() const noexcept -> std::ptrdiff_t { return m_least_max_value; } 84 | 85 | /** 86 | * The current number of resources available in this semaphore. 87 | */ 88 | auto value() const noexcept -> std::ptrdiff_t { return m_counter.load(std::memory_order::relaxed); } 89 | 90 | /** 91 | * Stops the semaphore and will notify all release/acquire waiters to wake up in a failed state. 92 | * Once this is set it cannot be un-done and all future oprations on the semaphore will fail. 93 | */ 94 | auto notify_waiters() noexcept -> void; 95 | 96 | private: 97 | friend class release_operation; 98 | friend class acquire_operation; 99 | 100 | const std::ptrdiff_t m_least_max_value; 101 | std::atomic m_counter; 102 | 103 | std::mutex m_waiter_mutex{}; 104 | acquire_operation* m_acquire_waiters{nullptr}; 105 | 106 | std::atomic m_notify_all_set{false}; 107 | }; 108 | 109 | } // namespace coro 110 | -------------------------------------------------------------------------------- /include/coro/time.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace coro 6 | { 7 | using clock = std::chrono::steady_clock; 8 | using time_point = clock::time_point; 9 | } // namespace coro 10 | -------------------------------------------------------------------------------- /libcoro.pc.in: -------------------------------------------------------------------------------- 1 | prefix="@CMAKE_INSTALL_PREFIX@" 2 | libdir="${prefix}/lib" 3 | includedir="${prefix}/include" 4 | 5 | Name: @PROJECT_NAME@ 6 | Description: @CMAKE_PROJECT_DESCRIPTION@ 7 | Version: @PROJECT_VERSION@ 8 | Cflags: -I${includedir} 9 | Libs: -L${libdir} -l@target1@ 10 | -------------------------------------------------------------------------------- /src/default_executor.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/default_executor.hpp" 2 | #include 3 | #include 4 | 5 | static const auto s_initialization_check_interval = std::chrono::milliseconds(1); 6 | 7 | static coro::thread_pool::options s_default_executor_options; 8 | static std::atomic s_default_executor = {nullptr}; 9 | static std::shared_ptr s_default_executor_shared; 10 | static const auto s_default_executor_initializing = reinterpret_cast(&s_default_executor); 11 | 12 | #ifdef LIBCORO_FEATURE_NETWORKING 13 | static coro::io_scheduler::options s_default_io_executor_options; 14 | static std::atomic s_default_io_executor = {nullptr}; 15 | static std::shared_ptr s_default_io_executor_shared; 16 | static const auto s_default_io_executor_initializing = reinterpret_cast(&s_default_io_executor); 17 | #endif 18 | 19 | void coro::default_executor::set_executor_options(thread_pool::options thread_pool_options) 20 | { 21 | s_default_executor_options = thread_pool_options; 22 | } 23 | 24 | std::shared_ptr coro::default_executor::executor() 25 | { 26 | do 27 | { 28 | auto result = s_default_executor.load(std::memory_order::acquire); 29 | while (result == s_default_executor_initializing) 30 | { 31 | std::this_thread::sleep_for(s_initialization_check_interval); 32 | result = s_default_executor.load(std::memory_order::acquire); 33 | } 34 | 35 | if (result) 36 | { 37 | return result->shared_from_this(); 38 | } 39 | 40 | if (s_default_executor.compare_exchange_strong( 41 | result, s_default_executor_initializing, std::memory_order::release, std::memory_order::acquire)) 42 | { 43 | break; 44 | } 45 | } while (true); 46 | 47 | s_default_executor_shared = std::make_shared(s_default_executor_options); 48 | s_default_executor.store(s_default_executor_shared.get(), std::memory_order::release); 49 | return s_default_executor_shared; 50 | } 51 | 52 | #ifdef LIBCORO_FEATURE_NETWORKING 53 | void coro::default_executor::set_io_executor_options(io_scheduler::options io_scheduler_options) 54 | { 55 | s_default_io_executor_options = io_scheduler_options; 56 | } 57 | 58 | std::shared_ptr coro::default_executor::io_executor() 59 | { 60 | do 61 | { 62 | auto result = s_default_io_executor.load(std::memory_order::acquire); 63 | while (result == s_default_io_executor_initializing) 64 | { 65 | std::this_thread::sleep_for(s_initialization_check_interval); 66 | result = s_default_io_executor.load(std::memory_order::acquire); 67 | } 68 | 69 | if (result) 70 | { 71 | return result->shared_from_this(); 72 | } 73 | 74 | if (s_default_io_executor.compare_exchange_strong( 75 | result, s_default_io_executor_initializing, std::memory_order::release, std::memory_order::acquire)) 76 | { 77 | break; 78 | } 79 | } while (true); 80 | 81 | s_default_io_executor_shared = coro::io_scheduler::make_shared(s_default_io_executor_options); 82 | s_default_io_executor.store(s_default_io_executor_shared.get(), std::memory_order::release); 83 | return s_default_io_executor_shared; 84 | } 85 | #endif 86 | -------------------------------------------------------------------------------- /src/detail/task_self_deleting.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/detail/task_self_deleting.hpp" 2 | 3 | #include 4 | 5 | namespace coro::detail 6 | { 7 | 8 | promise_self_deleting::promise_self_deleting() 9 | { 10 | } 11 | 12 | promise_self_deleting::~promise_self_deleting() 13 | { 14 | } 15 | 16 | promise_self_deleting::promise_self_deleting(promise_self_deleting&& other) 17 | : m_executor_size(std::exchange(other.m_executor_size, nullptr)) 18 | { 19 | } 20 | 21 | auto promise_self_deleting::operator=(promise_self_deleting&& other) -> promise_self_deleting& 22 | { 23 | if (std::addressof(other) != this) 24 | { 25 | m_executor_size = std::exchange(other.m_executor_size, nullptr); 26 | } 27 | 28 | return *this; 29 | } 30 | 31 | auto promise_self_deleting::get_return_object() -> task_self_deleting 32 | { 33 | return task_self_deleting{*this}; 34 | } 35 | 36 | auto promise_self_deleting::initial_suspend() -> std::suspend_always 37 | { 38 | return std::suspend_always{}; 39 | } 40 | 41 | auto promise_self_deleting::final_suspend() noexcept -> std::suspend_never 42 | { 43 | // Notify the task_container that this coroutine has completed. 44 | if (m_executor_size != nullptr) 45 | { 46 | m_executor_size->fetch_sub(1, std::memory_order::release); 47 | } 48 | 49 | // By not suspending this lets the coroutine destroy itself. 50 | return std::suspend_never{}; 51 | } 52 | 53 | auto promise_self_deleting::return_void() noexcept -> void 54 | { 55 | // no-op 56 | } 57 | 58 | auto promise_self_deleting::unhandled_exception() -> void 59 | { 60 | // The user cannot access the promise anyways, ignore the exception. 61 | } 62 | 63 | auto promise_self_deleting::executor_size(std::atomic& executor_size) -> void 64 | { 65 | m_executor_size = &executor_size; 66 | } 67 | 68 | task_self_deleting::task_self_deleting(promise_self_deleting& promise) : m_promise(&promise) 69 | { 70 | } 71 | 72 | task_self_deleting::~task_self_deleting() 73 | { 74 | } 75 | 76 | task_self_deleting::task_self_deleting(task_self_deleting&& other) : m_promise(other.m_promise) 77 | { 78 | } 79 | 80 | auto task_self_deleting::operator=(task_self_deleting&& other) -> task_self_deleting& 81 | { 82 | if (std::addressof(other) != this) 83 | { 84 | m_promise = other.m_promise; 85 | } 86 | 87 | return *this; 88 | } 89 | 90 | auto make_task_self_deleting(coro::task user_task) -> task_self_deleting 91 | { 92 | co_await user_task; 93 | co_return; 94 | } 95 | 96 | } // namespace coro::detail 97 | -------------------------------------------------------------------------------- /src/event.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/event.hpp" 2 | #include "coro/thread_pool.hpp" 3 | 4 | namespace coro 5 | { 6 | event::event(bool initially_set) noexcept : m_state((initially_set) ? static_cast(this) : nullptr) 7 | { 8 | } 9 | 10 | auto event::set(resume_order_policy policy) noexcept -> void 11 | { 12 | // Exchange the state to this, if the state was previously not this, then traverse the list 13 | // of awaiters and resume their coroutines. 14 | void* old_value = m_state.exchange(this, std::memory_order::acq_rel); 15 | if (old_value != this) 16 | { 17 | // If FIFO has been requsted then reverse the order upon resuming. 18 | if (policy == resume_order_policy::fifo) 19 | { 20 | old_value = reverse(static_cast(old_value)); 21 | } 22 | // else lifo nothing to do 23 | 24 | auto* waiters = static_cast(old_value); 25 | while (waiters != nullptr) 26 | { 27 | auto* next = waiters->m_next; 28 | waiters->m_awaiting_coroutine.resume(); 29 | waiters = next; 30 | } 31 | } 32 | } 33 | 34 | auto event::reverse(awaiter* curr) -> awaiter* 35 | { 36 | if (curr == nullptr || curr->m_next == nullptr) 37 | { 38 | return curr; 39 | } 40 | 41 | awaiter* prev = nullptr; 42 | awaiter* next = nullptr; 43 | while (curr != nullptr) 44 | { 45 | next = curr->m_next; 46 | curr->m_next = prev; 47 | prev = curr; 48 | curr = next; 49 | } 50 | 51 | return prev; 52 | } 53 | 54 | auto event::awaiter::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool 55 | { 56 | const void* const set_state = &m_event; 57 | 58 | m_awaiting_coroutine = awaiting_coroutine; 59 | 60 | // This value will update if other threads write to it via acquire. 61 | void* old_value = m_event.m_state.load(std::memory_order::acquire); 62 | do 63 | { 64 | // Resume immediately if already in the set state. 65 | if (old_value == set_state) 66 | { 67 | return false; 68 | } 69 | 70 | m_next = static_cast(old_value); 71 | } while (!m_event.m_state.compare_exchange_weak( 72 | old_value, this, std::memory_order::release, std::memory_order::acquire)); 73 | 74 | return true; 75 | } 76 | 77 | auto event::reset() noexcept -> void 78 | { 79 | void* old_value = this; 80 | m_state.compare_exchange_strong(old_value, nullptr, std::memory_order::acquire); 81 | } 82 | 83 | } // namespace coro 84 | -------------------------------------------------------------------------------- /src/mutex.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/mutex.hpp" 2 | 3 | #include 4 | 5 | namespace coro 6 | { 7 | namespace detail 8 | { 9 | auto lock_operation_base::await_ready() const noexcept -> bool 10 | { 11 | if (m_mutex.try_lock()) 12 | { 13 | // Since there is no mutex acquired, insert a memory fence to act like it. 14 | std::atomic_thread_fence(std::memory_order::acquire); 15 | return true; 16 | } 17 | return false; 18 | } 19 | 20 | auto lock_operation_base::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool 21 | { 22 | m_awaiting_coroutine = awaiting_coroutine; 23 | void* current = m_mutex.m_state.load(std::memory_order::acquire); 24 | void* new_value; 25 | 26 | const void* unlocked_value = m_mutex.unlocked_value(); 27 | do 28 | { 29 | if (current == unlocked_value) 30 | { 31 | // If the current value is 'unlocked' then attempt to lock it. 32 | new_value = nullptr; 33 | } 34 | else 35 | { 36 | // If the current value is a waiting lock operation, or nullptr, set our next to that 37 | // lock op and attempt to set ourself as the head of the waiter list. 38 | m_next = static_cast(current); 39 | new_value = static_cast(this); 40 | } 41 | } while (!m_mutex.m_state.compare_exchange_weak(current, new_value, std::memory_order::acq_rel)); 42 | 43 | // Don't suspend if the state went from unlocked -> locked with zero waiters. 44 | if (current == unlocked_value) 45 | { 46 | std::atomic_thread_fence(std::memory_order::acquire); 47 | m_awaiting_coroutine = nullptr; // nothing to await later since this doesn't suspend 48 | return false; 49 | } 50 | 51 | return true; 52 | } 53 | 54 | } // namespace detail 55 | 56 | scoped_lock::~scoped_lock() 57 | { 58 | unlock(); 59 | } 60 | 61 | auto scoped_lock::unlock() -> void 62 | { 63 | if (m_mutex != nullptr) 64 | { 65 | std::atomic_thread_fence(std::memory_order::acq_rel); 66 | m_mutex->unlock(); 67 | m_mutex = nullptr; 68 | } 69 | } 70 | 71 | auto mutex::try_lock() -> bool 72 | { 73 | void* expected = const_cast(unlocked_value()); 74 | return m_state.compare_exchange_strong(expected, nullptr, std::memory_order::acq_rel, std::memory_order::relaxed); 75 | } 76 | 77 | auto mutex::unlock() -> void 78 | { 79 | if (m_internal_waiters == nullptr) 80 | { 81 | void* current = m_state.load(std::memory_order::relaxed); 82 | if (current == nullptr) 83 | { 84 | // If there are no internal waiters and there are no atomic waiters, attempt to set the 85 | // mutex as unlocked. 86 | if (m_state.compare_exchange_strong( 87 | current, 88 | const_cast(unlocked_value()), 89 | std::memory_order::release, 90 | std::memory_order::relaxed)) 91 | { 92 | return; // The mutex is now unlocked with zero waiters. 93 | } 94 | // else we failed to unlock, someone added themself as a waiter. 95 | } 96 | 97 | // There are waiters on the atomic list, acquire them and update the state for all others. 98 | m_internal_waiters = static_cast(m_state.exchange(nullptr, std::memory_order::acq_rel)); 99 | 100 | // Should internal waiters be reversed to allow for true FIFO, or should they be resumed 101 | // in this reverse order to maximum throuhgput? If this list ever gets 'long' the reversal 102 | // will take some time, but it might guarantee better latency across waiters. This LIFO 103 | // middle ground on the atomic waiters means the best throughput at the cost of the first 104 | // waiter possibly having added latency based on the queue length of waiters. Either way 105 | // incurs a cost but this way for short lists will most likely be faster even though it 106 | // isn't completely fair. 107 | } 108 | 109 | // assert m_internal_waiters != nullptr 110 | 111 | detail::lock_operation_base* to_resume = m_internal_waiters; 112 | m_internal_waiters = m_internal_waiters->m_next; 113 | to_resume->m_awaiting_coroutine.resume(); 114 | } 115 | 116 | } // namespace coro 117 | -------------------------------------------------------------------------------- /src/net/connect.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/connect.hpp" 2 | 3 | #include 4 | 5 | namespace coro::net 6 | { 7 | const static std::string connect_status_connected{"connected"}; 8 | const static std::string connect_status_invalid_ip_address{"invalid_ip_address"}; 9 | const static std::string connect_status_timeout{"timeout"}; 10 | const static std::string connect_status_error{"error"}; 11 | 12 | auto to_string(const connect_status& status) -> const std::string& 13 | { 14 | switch (status) 15 | { 16 | case connect_status::connected: 17 | return connect_status_connected; 18 | case connect_status::invalid_ip_address: 19 | return connect_status_invalid_ip_address; 20 | case connect_status::timeout: 21 | return connect_status_timeout; 22 | case connect_status::error: 23 | return connect_status_error; 24 | } 25 | 26 | throw std::logic_error{"Invalid/unknown connect status."}; 27 | } 28 | 29 | } // namespace coro::net 30 | -------------------------------------------------------------------------------- /src/net/dns/resolver.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/dns/resolver.hpp" 2 | 3 | namespace coro::net::dns 4 | { 5 | uint64_t m_ares_count{0}; 6 | std::mutex m_ares_mutex{}; 7 | } // namespace coro::net::dns 8 | -------------------------------------------------------------------------------- /src/net/ip_address.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/ip_address.hpp" 2 | 3 | namespace coro::net 4 | { 5 | static std::string domain_ipv4{"ipv4"}; 6 | static std::string domain_ipv6{"ipv6"}; 7 | 8 | auto to_string(domain_t domain) -> const std::string& 9 | { 10 | switch (domain) 11 | { 12 | case domain_t::ipv4: 13 | return domain_ipv4; 14 | case domain_t::ipv6: 15 | return domain_ipv6; 16 | } 17 | throw std::runtime_error{"coro::net::to_string(domain_t) unknown domain"}; 18 | } 19 | 20 | } // namespace coro::net 21 | -------------------------------------------------------------------------------- /src/net/recv_status.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/recv_status.hpp" 2 | 3 | namespace coro::net 4 | { 5 | static const std::string recv_status_ok{"ok"}; 6 | static const std::string recv_status_closed{"closed"}; 7 | static const std::string recv_status_udp_not_bound{"udp_not_bound"}; 8 | static const std::string recv_status_would_block{"would_block"}; 9 | static const std::string recv_status_bad_file_descriptor{"bad_file_descriptor"}; 10 | static const std::string recv_status_connection_refused{"connection_refused"}; 11 | static const std::string recv_status_memory_fault{"memory_fault"}; 12 | static const std::string recv_status_interrupted{"interrupted"}; 13 | static const std::string recv_status_invalid_argument{"invalid_argument"}; 14 | static const std::string recv_status_no_memory{"no_memory"}; 15 | static const std::string recv_status_not_connected{"not_connected"}; 16 | static const std::string recv_status_not_a_socket{"not_a_socket"}; 17 | static const std::string recv_status_unknown{"unknown"}; 18 | 19 | auto to_string(recv_status status) -> const std::string& 20 | { 21 | switch (status) 22 | { 23 | case recv_status::ok: 24 | return recv_status_ok; 25 | case recv_status::closed: 26 | return recv_status_closed; 27 | case recv_status::udp_not_bound: 28 | return recv_status_udp_not_bound; 29 | // case recv_status::try_again: return recv_status_try_again; 30 | case recv_status::would_block: 31 | return recv_status_would_block; 32 | case recv_status::bad_file_descriptor: 33 | return recv_status_bad_file_descriptor; 34 | case recv_status::connection_refused: 35 | return recv_status_connection_refused; 36 | case recv_status::memory_fault: 37 | return recv_status_memory_fault; 38 | case recv_status::interrupted: 39 | return recv_status_interrupted; 40 | case recv_status::invalid_argument: 41 | return recv_status_invalid_argument; 42 | case recv_status::no_memory: 43 | return recv_status_no_memory; 44 | case recv_status::not_connected: 45 | return recv_status_not_connected; 46 | case recv_status::not_a_socket: 47 | return recv_status_not_a_socket; 48 | } 49 | 50 | return recv_status_unknown; 51 | } 52 | 53 | } // namespace coro::net 54 | -------------------------------------------------------------------------------- /src/net/send_status.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/send_status.hpp" 2 | 3 | namespace coro::net 4 | { 5 | } // namespace coro::net 6 | -------------------------------------------------------------------------------- /src/net/socket.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/socket.hpp" 2 | 3 | namespace coro::net 4 | { 5 | auto socket::type_to_os(type_t type) -> int 6 | { 7 | switch (type) 8 | { 9 | case type_t::udp: 10 | return SOCK_DGRAM; 11 | case type_t::tcp: 12 | return SOCK_STREAM; 13 | default: 14 | throw std::runtime_error{"Unknown socket::type_t."}; 15 | } 16 | } 17 | 18 | auto socket::operator=(const socket& other) noexcept -> socket& 19 | { 20 | this->close(); 21 | this->m_fd = dup(other.m_fd); 22 | return *this; 23 | } 24 | 25 | auto socket::operator=(socket&& other) noexcept -> socket& 26 | { 27 | if (std::addressof(other) != this) 28 | { 29 | m_fd = std::exchange(other.m_fd, -1); 30 | } 31 | 32 | return *this; 33 | } 34 | 35 | auto socket::blocking(blocking_t block) -> bool 36 | { 37 | if (m_fd < 0) 38 | { 39 | return false; 40 | } 41 | 42 | int flags = fcntl(m_fd, F_GETFL, 0); 43 | if (flags == -1) 44 | { 45 | return false; 46 | } 47 | 48 | // Add or subtract non-blocking flag. 49 | flags = (block == blocking_t::yes) ? flags & ~O_NONBLOCK : (flags | O_NONBLOCK); 50 | 51 | return (fcntl(m_fd, F_SETFL, flags) == 0); 52 | } 53 | 54 | auto socket::shutdown(poll_op how) -> bool 55 | { 56 | if (m_fd != -1) 57 | { 58 | int h{0}; 59 | switch (how) 60 | { 61 | case poll_op::read: 62 | h = SHUT_RD; 63 | break; 64 | case poll_op::write: 65 | h = SHUT_WR; 66 | break; 67 | case poll_op::read_write: 68 | h = SHUT_RDWR; 69 | break; 70 | } 71 | 72 | return (::shutdown(m_fd, h) == 0); 73 | } 74 | return false; 75 | } 76 | 77 | auto socket::close() -> void 78 | { 79 | if (m_fd != -1) 80 | { 81 | ::close(m_fd); 82 | m_fd = -1; 83 | } 84 | } 85 | 86 | auto make_socket(const socket::options& opts) -> socket 87 | { 88 | socket s{::socket(static_cast(opts.domain), socket::type_to_os(opts.type), 0)}; 89 | if (s.native_handle() < 0) 90 | { 91 | throw std::runtime_error{"Failed to create socket."}; 92 | } 93 | 94 | if (opts.blocking == socket::blocking_t::no) 95 | { 96 | if (s.blocking(socket::blocking_t::no) == false) 97 | { 98 | throw std::runtime_error{"Failed to set socket to non-blocking mode."}; 99 | } 100 | } 101 | 102 | return s; 103 | } 104 | 105 | auto make_accept_socket(const socket::options& opts, const net::ip_address& address, uint16_t port, int32_t backlog) 106 | -> socket 107 | { 108 | socket s = make_socket(opts); 109 | 110 | int sock_opt{1}; 111 | if (setsockopt(s.native_handle(), SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &sock_opt, sizeof(sock_opt)) < 0) 112 | { 113 | throw std::runtime_error{"Failed to setsockopt(SO_REUSEADDR | SO_REUSEPORT)"}; 114 | } 115 | 116 | sockaddr_in server{}; 117 | server.sin_family = static_cast(opts.domain); 118 | server.sin_port = htons(port); 119 | server.sin_addr = *reinterpret_cast(address.data().data()); 120 | 121 | if (bind(s.native_handle(), (struct sockaddr*)&server, sizeof(server)) < 0) 122 | { 123 | throw std::runtime_error{"Failed to bind."}; 124 | } 125 | 126 | if (opts.type == socket::type_t::tcp) 127 | { 128 | if (listen(s.native_handle(), backlog) < 0) 129 | { 130 | throw std::runtime_error{"Failed to listen."}; 131 | } 132 | } 133 | 134 | return s; 135 | } 136 | 137 | } // namespace coro::net 138 | -------------------------------------------------------------------------------- /src/net/tcp/client.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/tcp/client.hpp" 2 | 3 | namespace coro::net::tcp 4 | { 5 | using namespace std::chrono_literals; 6 | 7 | client::client(std::shared_ptr scheduler, options opts) 8 | : m_io_scheduler(std::move(scheduler)), 9 | m_options(std::move(opts)), 10 | m_socket(net::make_socket( 11 | net::socket::options{m_options.address.domain(), net::socket::type_t::tcp, net::socket::blocking_t::no})) 12 | { 13 | if (m_io_scheduler == nullptr) 14 | { 15 | throw std::runtime_error{"tcp::client cannot have nullptr io_scheduler"}; 16 | } 17 | } 18 | 19 | client::client(std::shared_ptr scheduler, net::socket socket, options opts) 20 | : m_io_scheduler(std::move(scheduler)), 21 | m_options(std::move(opts)), 22 | m_socket(std::move(socket)), 23 | m_connect_status(connect_status::connected) 24 | { 25 | // io_scheduler is assumed good since it comes from a tcp::server. 26 | 27 | // Force the socket to be non-blocking. 28 | m_socket.blocking(coro::net::socket::blocking_t::no); 29 | } 30 | 31 | client::client(const client& other) 32 | : m_io_scheduler(other.m_io_scheduler), 33 | m_options(other.m_options), 34 | m_socket(other.m_socket), 35 | m_connect_status(other.m_connect_status) 36 | { 37 | } 38 | 39 | client::client(client&& other) 40 | : m_io_scheduler(std::move(other.m_io_scheduler)), 41 | m_options(std::move(other.m_options)), 42 | m_socket(std::move(other.m_socket)), 43 | m_connect_status(std::exchange(other.m_connect_status, std::nullopt)) 44 | { 45 | } 46 | 47 | client::~client() 48 | { 49 | } 50 | 51 | auto client::operator=(const client& other) noexcept -> client& 52 | { 53 | if (std::addressof(other) != this) 54 | { 55 | m_io_scheduler = other.m_io_scheduler; 56 | m_options = other.m_options; 57 | m_socket = other.m_socket; 58 | m_connect_status = other.m_connect_status; 59 | } 60 | return *this; 61 | } 62 | 63 | auto client::operator=(client&& other) noexcept -> client& 64 | { 65 | if (std::addressof(other) != this) 66 | { 67 | m_io_scheduler = std::move(other.m_io_scheduler); 68 | m_options = std::move(other.m_options); 69 | m_socket = std::move(other.m_socket); 70 | m_connect_status = std::exchange(other.m_connect_status, std::nullopt); 71 | } 72 | return *this; 73 | } 74 | 75 | auto client::connect(std::chrono::milliseconds timeout) -> coro::task 76 | { 77 | // Only allow the user to connect per tcp client once, if they need to re-connect they should 78 | // make a new tcp::client. 79 | if (m_connect_status.has_value()) 80 | { 81 | co_return m_connect_status.value(); 82 | } 83 | 84 | // This enforces the connection status is aways set on the client object upon returning. 85 | auto return_value = [this](connect_status s) -> connect_status 86 | { 87 | m_connect_status = s; 88 | return s; 89 | }; 90 | 91 | sockaddr_in server{}; 92 | server.sin_family = static_cast(m_options.address.domain()); 93 | server.sin_port = htons(m_options.port); 94 | server.sin_addr = *reinterpret_cast(m_options.address.data().data()); 95 | 96 | auto cret = ::connect(m_socket.native_handle(), reinterpret_cast(&server), sizeof(server)); 97 | if (cret == 0) 98 | { 99 | co_return return_value(connect_status::connected); 100 | } 101 | else if (cret == -1) 102 | { 103 | // If the connect is happening in the background poll for write on the socket to trigger 104 | // when the connection is established. 105 | if (errno == EAGAIN || errno == EINPROGRESS) 106 | { 107 | auto pstatus = co_await m_io_scheduler->poll(m_socket, poll_op::write, timeout); 108 | if (pstatus == poll_status::event) 109 | { 110 | int result{0}; 111 | socklen_t result_length{sizeof(result)}; 112 | if (getsockopt(m_socket.native_handle(), SOL_SOCKET, SO_ERROR, &result, &result_length) < 0) 113 | { 114 | std::cerr << "connect failed to getsockopt after write poll event\n"; 115 | } 116 | 117 | if (result == 0) 118 | { 119 | co_return return_value(connect_status::connected); 120 | } 121 | } 122 | else if (pstatus == poll_status::timeout) 123 | { 124 | co_return return_value(connect_status::timeout); 125 | } 126 | } 127 | } 128 | 129 | co_return return_value(connect_status::error); 130 | } 131 | 132 | } // namespace coro::net::tcp 133 | -------------------------------------------------------------------------------- /src/net/tcp/server.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/tcp/server.hpp" 2 | 3 | #include "coro/io_scheduler.hpp" 4 | 5 | namespace coro::net::tcp 6 | { 7 | server::server(std::shared_ptr scheduler, options opts) 8 | : m_io_scheduler(std::move(scheduler)), 9 | m_options(std::move(opts)), 10 | m_accept_socket(net::make_accept_socket( 11 | net::socket::options{net::domain_t::ipv4, net::socket::type_t::tcp, net::socket::blocking_t::no}, 12 | m_options.address, 13 | m_options.port, 14 | m_options.backlog)) 15 | { 16 | if (m_io_scheduler == nullptr) 17 | { 18 | throw std::runtime_error{"tcp::server cannot have a nullptr io_scheduler"}; 19 | } 20 | } 21 | 22 | server::server(server&& other) 23 | : m_io_scheduler(std::move(other.m_io_scheduler)), 24 | m_options(std::move(other.m_options)), 25 | m_accept_socket(std::move(other.m_accept_socket)) 26 | { 27 | } 28 | 29 | auto server::operator=(server&& other) -> server& 30 | { 31 | if (std::addressof(other) != this) 32 | { 33 | m_io_scheduler = std::move(other.m_io_scheduler); 34 | m_options = std::move(other.m_options); 35 | m_accept_socket = std::move(other.m_accept_socket); 36 | } 37 | return *this; 38 | } 39 | 40 | auto server::accept() -> coro::net::tcp::client 41 | { 42 | sockaddr_in client{}; 43 | constexpr const int len = sizeof(struct sockaddr_in); 44 | net::socket s{::accept( 45 | m_accept_socket.native_handle(), 46 | reinterpret_cast(&client), 47 | const_cast(reinterpret_cast(&len)))}; 48 | 49 | std::span ip_addr_view{ 50 | reinterpret_cast(&client.sin_addr.s_addr), 51 | sizeof(client.sin_addr.s_addr), 52 | }; 53 | 54 | return tcp::client{ 55 | m_io_scheduler, 56 | std::move(s), 57 | client::options{ 58 | .address = net::ip_address{ip_addr_view, static_cast(client.sin_family)}, 59 | .port = ntohs(client.sin_port), 60 | }}; 61 | }; 62 | 63 | } // namespace coro::net::tcp 64 | -------------------------------------------------------------------------------- /src/net/tls/connection_status.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/tls/connection_status.hpp" 2 | 3 | namespace coro::net::tls 4 | { 5 | static const std::string connection_status_connected = {"connected"}; 6 | static const std::string connection_status_not_connected = {"not_connected"}; 7 | static const std::string connection_status_context_required = {"context_required"}; 8 | static const std::string connection_status_resource_allocation_failed = {"resource_allocation_failed"}; 9 | static const std::string connection_status_set_fd_failure = {"set_fd_failure"}; 10 | static const std::string connection_status_handshake_failed = {"handshake_failed"}; 11 | static const std::string connection_status_timeout = {"timeout"}; 12 | static const std::string connection_status_poll_error = {"poll_error"}; 13 | static const std::string connection_status_unexpected_close = {"unexpected_close"}; 14 | static const std::string connection_status_invalid_ip_address = {"invalid_ip_address"}; 15 | static const std::string connection_status_error = {"error"}; 16 | static const std::string connection_status_unknown = {"unknown"}; 17 | 18 | auto to_string(connection_status status) -> const std::string& 19 | { 20 | switch (status) 21 | { 22 | case connection_status::connected: 23 | return connection_status_connected; 24 | case connection_status::not_connected: 25 | return connection_status_not_connected; 26 | case connection_status::context_required: 27 | return connection_status_context_required; 28 | case connection_status::resource_allocation_failed: 29 | return connection_status_resource_allocation_failed; 30 | case connection_status::set_fd_failure: 31 | return connection_status_set_fd_failure; 32 | case connection_status::handshake_failed: 33 | return connection_status_handshake_failed; 34 | case connection_status::timeout: 35 | return connection_status_timeout; 36 | case connection_status::poll_error: 37 | return connection_status_poll_error; 38 | case connection_status::unexpected_close: 39 | return connection_status_unexpected_close; 40 | case connection_status::invalid_ip_address: 41 | return connection_status_invalid_ip_address; 42 | case connection_status::error: 43 | return connection_status_error; 44 | default: 45 | return connection_status_unknown; 46 | } 47 | } 48 | 49 | } // namespace coro::net::tls -------------------------------------------------------------------------------- /src/net/tls/context.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/tls/context.hpp" 2 | 3 | #include 4 | 5 | namespace coro::net::tls 6 | { 7 | static uint64_t g_tls_context_count{0}; 8 | static std::mutex g_tls_context_mutex{}; 9 | 10 | context::context(verify_peer_t verify_peer) 11 | { 12 | { 13 | std::scoped_lock g{g_tls_context_mutex}; 14 | if (g_tls_context_count == 0) 15 | { 16 | #if defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x10100000L 17 | OPENSSL_init_ssl(0, nullptr); 18 | #else 19 | SSL_library_init(); 20 | #endif 21 | } 22 | ++g_tls_context_count; 23 | } 24 | 25 | #if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x10100000L 26 | m_ssl_ctx = SSL_CTX_new(TLS_method()); 27 | #else 28 | m_ssl_ctx = SSL_CTX_new(SSLv23_method()); 29 | #endif 30 | if (m_ssl_ctx == nullptr) 31 | { 32 | throw std::runtime_error{"Failed to initialize OpenSSL Context object."}; 33 | } 34 | 35 | // Disable SSLv3 36 | SSL_CTX_set_options(m_ssl_ctx, SSL_OP_ALL | SSL_OP_NO_SSLv3); 37 | // Abort handshake if certificate verification fails. 38 | if (verify_peer == verify_peer_t::yes) 39 | { 40 | SSL_CTX_set_verify(m_ssl_ctx, SSL_VERIFY_PEER, NULL); 41 | } 42 | // Set the minimum TLS version, as of this TLSv1.1 or earlier are deprecated. 43 | SSL_CTX_set_min_proto_version(m_ssl_ctx, TLS1_2_VERSION); 44 | } 45 | 46 | context::context( 47 | std::filesystem::path certificate, 48 | tls_file_type certificate_type, 49 | std::filesystem::path private_key, 50 | tls_file_type private_key_type, 51 | verify_peer_t verify_peer) 52 | : context(verify_peer) 53 | { 54 | if (auto r = SSL_CTX_use_certificate_file(m_ssl_ctx, certificate.c_str(), static_cast(certificate_type)); 55 | r != 1) 56 | { 57 | throw std::runtime_error{"Failed to load certificate file " + certificate.string()}; 58 | } 59 | 60 | if (auto r = SSL_CTX_use_PrivateKey_file(m_ssl_ctx, private_key.c_str(), static_cast(private_key_type)); 61 | r != 1) 62 | { 63 | throw std::runtime_error{"Failed to load private key file " + private_key.string()}; 64 | } 65 | 66 | if (auto r = SSL_CTX_check_private_key(m_ssl_ctx); r != 1) 67 | { 68 | throw std::runtime_error{"Certificate and private key do not match."}; 69 | } 70 | } 71 | 72 | context::~context() 73 | { 74 | if (m_ssl_ctx != nullptr) 75 | { 76 | SSL_CTX_free(m_ssl_ctx); 77 | m_ssl_ctx = nullptr; 78 | } 79 | } 80 | 81 | } // namespace coro::net::tls 82 | -------------------------------------------------------------------------------- /src/net/tls/recv_status.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace coro::net::tls 4 | { 5 | 6 | static std::string recv_status_ok{"ok"}; 7 | static std::string recv_status_buffer_is_empty{"buffer_is_empty"}; 8 | static std::string recv_status_timeout{"timeout"}; 9 | static std::string recv_status_closed{"closed"}; 10 | static std::string recv_status_error{"error"}; 11 | static std::string recv_status_want_read{"want_read"}; 12 | static std::string recv_status_want_write{"want_write"}; 13 | static std::string recv_status_want_connect{"want_connect"}; 14 | static std::string recv_status_want_accept{"want_accept"}; 15 | static std::string recv_status_want_x509_lookup{"want_x509_lookup"}; 16 | static std::string recv_status_error_syscall{"error_syscall"}; 17 | static std::string recv_status_unknown{"unknown"}; 18 | 19 | auto to_string(recv_status status) -> const std::string& 20 | { 21 | switch (status) 22 | { 23 | case recv_status::ok: 24 | return recv_status_ok; 25 | case recv_status::buffer_is_empty: 26 | return recv_status_buffer_is_empty; 27 | case recv_status::timeout: 28 | return recv_status_timeout; 29 | case recv_status::closed: 30 | return recv_status_closed; 31 | case recv_status::error: 32 | return recv_status_error; 33 | case recv_status::want_read: 34 | return recv_status_want_read; 35 | case recv_status::want_write: 36 | return recv_status_want_write; 37 | case recv_status::want_connect: 38 | return recv_status_want_connect; 39 | case recv_status::want_accept: 40 | return recv_status_want_accept; 41 | case recv_status::want_x509_lookup: 42 | return recv_status_want_x509_lookup; 43 | case recv_status::error_syscall: 44 | return recv_status_error_syscall; 45 | } 46 | 47 | return recv_status_unknown; 48 | } 49 | 50 | } // namespace coro::net::tls 51 | -------------------------------------------------------------------------------- /src/net/tls/send_status.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace coro::net::tls 4 | { 5 | 6 | static std::string send_status_ok{"ok"}; 7 | static std::string send_status_buffer_is_empty{"buffer_is_empty"}; 8 | static std::string send_status_timeout{"timeout"}; 9 | static std::string send_status_closed{"closed"}; 10 | static std::string send_status_error{"error"}; 11 | static std::string send_status_want_read{"want_read"}; 12 | static std::string send_status_want_write{"want_write"}; 13 | static std::string send_status_want_connect{"want_connect"}; 14 | static std::string send_status_want_accept{"want_accept"}; 15 | static std::string send_status_want_x509_lookup{"want_x509_lookup"}; 16 | static std::string send_status_error_syscall{"error_syscall"}; 17 | static std::string send_status_unknown{"unknown"}; 18 | 19 | auto to_string(send_status status) -> const std::string& 20 | { 21 | switch (status) 22 | { 23 | case send_status::ok: 24 | return send_status_ok; 25 | case send_status::buffer_is_empty: 26 | return send_status_buffer_is_empty; 27 | case send_status::timeout: 28 | return send_status_timeout; 29 | case send_status::closed: 30 | return send_status_closed; 31 | case send_status::error: 32 | return send_status_error; 33 | case send_status::want_read: 34 | return send_status_want_read; 35 | case send_status::want_write: 36 | return send_status_want_write; 37 | case send_status::want_connect: 38 | return send_status_want_connect; 39 | case send_status::want_accept: 40 | return send_status_want_accept; 41 | case send_status::want_x509_lookup: 42 | return send_status_want_x509_lookup; 43 | case send_status::error_syscall: 44 | return send_status_error_syscall; 45 | } 46 | 47 | return send_status_unknown; 48 | } 49 | 50 | } // namespace coro::net::tls 51 | -------------------------------------------------------------------------------- /src/net/tls/server.cpp: -------------------------------------------------------------------------------- 1 | #ifdef LIBCORO_FEATURE_TLS 2 | 3 | #include "coro/net/tls/server.hpp" 4 | 5 | #include "coro/io_scheduler.hpp" 6 | 7 | namespace coro::net::tls 8 | { 9 | server::server(std::shared_ptr scheduler, std::shared_ptr tls_ctx, options opts) 10 | : m_io_scheduler(std::move(scheduler)), 11 | m_tls_ctx(std::move(tls_ctx)), 12 | m_options(std::move(opts)), 13 | m_accept_socket(net::make_accept_socket( 14 | net::socket::options{net::domain_t::ipv4, net::socket::type_t::tcp, net::socket::blocking_t::no}, 15 | m_options.address, 16 | m_options.port, 17 | m_options.backlog)) 18 | { 19 | if (m_io_scheduler == nullptr) 20 | { 21 | throw std::runtime_error{"tls::server cannot have a nullptr io_scheduler"}; 22 | } 23 | 24 | if (m_tls_ctx == nullptr) 25 | { 26 | throw std::runtime_error{"tls::server cannot have a nullptr tls_ctx"}; 27 | } 28 | } 29 | 30 | server::server(server&& other) 31 | : m_io_scheduler(std::move(other.m_io_scheduler)), 32 | m_tls_ctx(std::move(other.m_tls_ctx)), 33 | m_options(std::move(other.m_options)), 34 | m_accept_socket(std::move(other.m_accept_socket)) 35 | { 36 | } 37 | 38 | auto server::operator=(server&& other) -> server& 39 | { 40 | if (std::addressof(other) != this) 41 | { 42 | m_io_scheduler = std::move(other.m_io_scheduler); 43 | m_tls_ctx = std::move(other.m_tls_ctx); 44 | m_options = std::move(other.m_options); 45 | m_accept_socket = std::move(other.m_accept_socket); 46 | } 47 | return *this; 48 | } 49 | 50 | auto server::accept(std::chrono::milliseconds timeout) -> coro::task 51 | { 52 | sockaddr_in client{}; 53 | constexpr const int len = sizeof(struct sockaddr_in); 54 | net::socket s{::accept( 55 | m_accept_socket.native_handle(), 56 | reinterpret_cast(&client), 57 | const_cast(reinterpret_cast(&len)))}; 58 | 59 | std::span ip_addr_view{ 60 | reinterpret_cast(&client.sin_addr.s_addr), 61 | sizeof(client.sin_addr.s_addr), 62 | }; 63 | 64 | auto tls_client = tls::client{ 65 | m_io_scheduler, 66 | m_tls_ctx, 67 | std::move(s), 68 | tls::client::options{ 69 | .address = net::ip_address{ip_addr_view, static_cast(client.sin_family)}, 70 | .port = ntohs(client.sin_port), 71 | }}; 72 | 73 | auto hstatus = co_await tls_client.handshake(timeout); 74 | (void)hstatus; // user must check result. 75 | co_return std::move(tls_client); 76 | }; 77 | 78 | } // namespace coro::net::tls 79 | 80 | #endif // #ifdef LIBCORO_FEATURE_TLS 81 | -------------------------------------------------------------------------------- /src/net/udp/peer.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/net/udp/peer.hpp" 2 | 3 | namespace coro::net::udp 4 | { 5 | peer::peer(std::shared_ptr scheduler, net::domain_t domain) 6 | : m_io_scheduler(std::move(scheduler)), 7 | m_socket(net::make_socket(net::socket::options{domain, net::socket::type_t::udp, net::socket::blocking_t::no})) 8 | { 9 | } 10 | 11 | peer::peer(std::shared_ptr scheduler, const info& bind_info) 12 | : m_io_scheduler(std::move(scheduler)), 13 | m_socket(net::make_accept_socket( 14 | net::socket::options{bind_info.address.domain(), net::socket::type_t::udp, net::socket::blocking_t::no}, 15 | bind_info.address, 16 | bind_info.port)), 17 | m_bound(true) 18 | { 19 | } 20 | 21 | } // namespace coro::net::udp 22 | -------------------------------------------------------------------------------- /src/poll.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace coro 4 | { 5 | 6 | static const std::string poll_unknown{"unknown"}; 7 | 8 | static const std::string poll_op_read{"read"}; 9 | static const std::string poll_op_write{"write"}; 10 | static const std::string poll_op_read_write{"read_write"}; 11 | 12 | auto to_string(poll_op op) -> const std::string& 13 | { 14 | switch (op) 15 | { 16 | case poll_op::read: 17 | return poll_op_read; 18 | case poll_op::write: 19 | return poll_op_write; 20 | case poll_op::read_write: 21 | return poll_op_read_write; 22 | default: 23 | return poll_unknown; 24 | } 25 | } 26 | 27 | static const std::string poll_status_event{"event"}; 28 | static const std::string poll_status_timeout{"timeout"}; 29 | static const std::string poll_status_error{"error"}; 30 | static const std::string poll_status_closed{"closed"}; 31 | 32 | auto to_string(poll_status status) -> const std::string& 33 | { 34 | switch (status) 35 | { 36 | case poll_status::event: 37 | return poll_status_event; 38 | case poll_status::timeout: 39 | return poll_status_timeout; 40 | case poll_status::error: 41 | return poll_status_error; 42 | case poll_status::closed: 43 | return poll_status_closed; 44 | default: 45 | return poll_unknown; 46 | } 47 | } 48 | 49 | } // namespace coro 50 | -------------------------------------------------------------------------------- /src/semaphore.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/semaphore.hpp" 2 | 3 | namespace coro 4 | { 5 | using namespace std::string_literals; 6 | 7 | std::string semaphore::acquire_result_acquired = "acquired"s; 8 | std::string semaphore::acquire_result_semaphore_stopped = "semaphore_stopped"s; 9 | std::string semaphore::acquire_result_unknown = "unknown"s; 10 | 11 | semaphore::semaphore(std::ptrdiff_t least_max_value_and_starting_value) 12 | : semaphore(least_max_value_and_starting_value, least_max_value_and_starting_value) 13 | { 14 | } 15 | 16 | semaphore::semaphore(std::ptrdiff_t least_max_value, std::ptrdiff_t starting_value) 17 | : m_least_max_value(least_max_value), 18 | m_counter(starting_value <= least_max_value ? starting_value : least_max_value) 19 | { 20 | } 21 | 22 | semaphore::~semaphore() 23 | { 24 | notify_waiters(); 25 | } 26 | 27 | semaphore::acquire_operation::acquire_operation(semaphore& s) : m_semaphore(s) 28 | { 29 | } 30 | 31 | auto semaphore::acquire_operation::await_ready() const noexcept -> bool 32 | { 33 | if (m_semaphore.m_notify_all_set.load(std::memory_order::relaxed)) 34 | { 35 | return true; 36 | } 37 | return m_semaphore.try_acquire(); 38 | } 39 | 40 | auto semaphore::acquire_operation::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool 41 | { 42 | std::unique_lock lk{m_semaphore.m_waiter_mutex}; 43 | if (m_semaphore.m_notify_all_set.load(std::memory_order::relaxed)) 44 | { 45 | return false; 46 | } 47 | 48 | if (m_semaphore.try_acquire()) 49 | { 50 | return false; 51 | } 52 | 53 | if (m_semaphore.m_acquire_waiters == nullptr) 54 | { 55 | m_semaphore.m_acquire_waiters = this; 56 | } 57 | else 58 | { 59 | // This is LIFO, but semaphores are not meant to be fair. 60 | 61 | // Set our next to the current head. 62 | m_next = m_semaphore.m_acquire_waiters; 63 | // Set the semaphore head to this. 64 | m_semaphore.m_acquire_waiters = this; 65 | } 66 | 67 | m_awaiting_coroutine = awaiting_coroutine; 68 | return true; 69 | } 70 | 71 | auto semaphore::acquire_operation::await_resume() const -> acquire_result 72 | { 73 | if (m_semaphore.m_notify_all_set.load(std::memory_order::relaxed)) 74 | { 75 | return acquire_result::semaphore_stopped; 76 | } 77 | return acquire_result::acquired; 78 | } 79 | 80 | auto semaphore::release() -> void 81 | { 82 | // It seems like the atomic counter could be incremented, but then resuming a waiter could have 83 | // a race between a new acquirer grabbing the just incremented resource value from us. So its 84 | // best to check if there are any waiters first, and transfer owernship of the resource thats 85 | // being released directly to the waiter to avoid this problem. 86 | 87 | std::unique_lock lk{m_waiter_mutex}; 88 | if (m_acquire_waiters != nullptr) 89 | { 90 | acquire_operation* to_resume = m_acquire_waiters; 91 | m_acquire_waiters = m_acquire_waiters->m_next; 92 | lk.unlock(); 93 | 94 | // This will transfer ownership of the resource to the resumed waiter. 95 | to_resume->m_awaiting_coroutine.resume(); 96 | } 97 | else 98 | { 99 | // Normally would be release but within a lock use releaxed. 100 | m_counter.fetch_add(1, std::memory_order::relaxed); 101 | } 102 | } 103 | 104 | auto semaphore::try_acquire() -> bool 105 | { 106 | // Optimistically grab the resource. 107 | auto previous = m_counter.fetch_sub(1, std::memory_order::acq_rel); 108 | if (previous <= 0) 109 | { 110 | // If it wasn't available undo the acquisition. 111 | m_counter.fetch_add(1, std::memory_order::release); 112 | return false; 113 | } 114 | return true; 115 | } 116 | 117 | auto semaphore::notify_waiters() noexcept -> void 118 | { 119 | m_notify_all_set.exchange(true, std::memory_order::release); 120 | while (true) 121 | { 122 | std::unique_lock lk{m_waiter_mutex}; 123 | if (m_acquire_waiters != nullptr) 124 | { 125 | acquire_operation* to_resume = m_acquire_waiters; 126 | m_acquire_waiters = m_acquire_waiters->m_next; 127 | lk.unlock(); 128 | 129 | to_resume->m_awaiting_coroutine.resume(); 130 | } 131 | else 132 | { 133 | break; 134 | } 135 | } 136 | } 137 | 138 | } // namespace coro 139 | -------------------------------------------------------------------------------- /src/sync_wait.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/sync_wait.hpp" 2 | 3 | namespace coro::detail 4 | { 5 | sync_wait_event::sync_wait_event(bool initially_set) : m_set(initially_set) 6 | { 7 | } 8 | 9 | auto sync_wait_event::set() noexcept -> void 10 | { 11 | // issue-270 100~ task's on a thread_pool within sync_wait(when_all(tasks)) can cause a deadlock/hang if using 12 | // release/acquire or even seq_cst. 13 | { 14 | std::unique_lock lk{m_mutex}; 15 | m_set.exchange(true, std::memory_order::seq_cst); 16 | } 17 | m_cv.notify_all(); 18 | } 19 | 20 | auto sync_wait_event::reset() noexcept -> void 21 | { 22 | m_set.exchange(false, std::memory_order::seq_cst); 23 | } 24 | 25 | auto sync_wait_event::wait() noexcept -> void 26 | { 27 | std::unique_lock lk{m_mutex}; 28 | m_cv.wait(lk, [this] { return m_set.load(std::memory_order::seq_cst); }); 29 | } 30 | 31 | } // namespace coro::detail 32 | -------------------------------------------------------------------------------- /src/thread_pool.cpp: -------------------------------------------------------------------------------- 1 | #include "coro/thread_pool.hpp" 2 | #include "coro/detail/task_self_deleting.hpp" 3 | 4 | namespace coro 5 | { 6 | thread_pool::operation::operation(thread_pool& tp) noexcept : m_thread_pool(tp) 7 | { 8 | } 9 | 10 | auto thread_pool::operation::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> void 11 | { 12 | m_awaiting_coroutine = awaiting_coroutine; 13 | m_thread_pool.schedule_impl(m_awaiting_coroutine); 14 | 15 | // void return on await_suspend suspends the _this_ coroutine, which is now scheduled on the 16 | // thread pool and returns control to the caller. They could be sync_wait'ing or go do 17 | // something else while this coroutine gets picked up by the thread pool. 18 | } 19 | 20 | thread_pool::thread_pool(options opts) : m_opts(std::move(opts)) 21 | { 22 | m_threads.reserve(m_opts.thread_count); 23 | 24 | for (uint32_t i = 0; i < m_opts.thread_count; ++i) 25 | { 26 | m_threads.emplace_back([this, i]() { executor(i); }); 27 | } 28 | } 29 | 30 | thread_pool::~thread_pool() 31 | { 32 | shutdown(); 33 | } 34 | 35 | auto thread_pool::schedule() -> operation 36 | { 37 | m_size.fetch_add(1, std::memory_order::release); 38 | if (!m_shutdown_requested.load(std::memory_order::acquire)) 39 | { 40 | return operation{*this}; 41 | } 42 | else 43 | { 44 | m_size.fetch_sub(1, std::memory_order::release); 45 | throw std::runtime_error("coro::thread_pool is shutting down, unable to schedule new tasks."); 46 | } 47 | } 48 | 49 | auto thread_pool::spawn(coro::task&& task) noexcept -> bool 50 | { 51 | m_size.fetch_add(1, std::memory_order::release); 52 | auto wrapper_task = detail::make_task_self_deleting(std::move(task)); 53 | wrapper_task.promise().executor_size(m_size); 54 | return resume(wrapper_task.handle()); 55 | } 56 | 57 | auto thread_pool::resume(std::coroutine_handle<> handle) noexcept -> bool 58 | { 59 | if (handle == nullptr || handle.done()) 60 | { 61 | return false; 62 | } 63 | 64 | m_size.fetch_add(1, std::memory_order::release); 65 | if (m_shutdown_requested.load(std::memory_order::acquire)) 66 | { 67 | m_size.fetch_sub(1, std::memory_order::release); 68 | return false; 69 | } 70 | 71 | schedule_impl(handle); 72 | return true; 73 | } 74 | 75 | auto thread_pool::shutdown() noexcept -> void 76 | { 77 | // Only allow shutdown to occur once. 78 | if (m_shutdown_requested.exchange(true, std::memory_order::acq_rel) == false) 79 | { 80 | { 81 | // There is a race condition if we are not holding the lock with the executors 82 | // to always receive this. std::jthread stop token works without this properly. 83 | std::unique_lock lk{m_wait_mutex}; 84 | m_wait_cv.notify_all(); 85 | } 86 | 87 | for (auto& thread : m_threads) 88 | { 89 | if (thread.joinable()) 90 | { 91 | thread.join(); 92 | } 93 | } 94 | } 95 | } 96 | 97 | auto thread_pool::executor(std::size_t idx) -> void 98 | { 99 | if (m_opts.on_thread_start_functor != nullptr) 100 | { 101 | m_opts.on_thread_start_functor(idx); 102 | } 103 | 104 | // Process until shutdown is requested. 105 | while (!m_shutdown_requested.load(std::memory_order::acquire)) 106 | { 107 | std::unique_lock lk{m_wait_mutex}; 108 | m_wait_cv.wait(lk, [&]() { return !m_queue.empty() || m_shutdown_requested.load(std::memory_order::acquire); }); 109 | 110 | if (m_queue.empty()) 111 | { 112 | continue; 113 | } 114 | 115 | auto handle = m_queue.front(); 116 | m_queue.pop_front(); 117 | lk.unlock(); 118 | 119 | // Release the lock while executing the coroutine. 120 | handle.resume(); 121 | m_size.fetch_sub(1, std::memory_order::release); 122 | } 123 | 124 | // Process until there are no ready tasks left. 125 | while (m_size.load(std::memory_order::acquire) > 0) 126 | { 127 | std::unique_lock lk{m_wait_mutex}; 128 | // m_size will only drop to zero once all executing coroutines are finished 129 | // but the queue could be empty for threads that finished early. 130 | if (m_queue.empty()) 131 | { 132 | break; 133 | } 134 | 135 | auto handle = m_queue.front(); 136 | m_queue.pop_front(); 137 | lk.unlock(); 138 | 139 | // Release the lock while executing the coroutine. 140 | handle.resume(); 141 | m_size.fetch_sub(1, std::memory_order::release); 142 | } 143 | 144 | if (m_opts.on_thread_stop_functor != nullptr) 145 | { 146 | m_opts.on_thread_stop_functor(idx); 147 | } 148 | } 149 | 150 | auto thread_pool::schedule_impl(std::coroutine_handle<> handle) noexcept -> void 151 | { 152 | if (handle == nullptr || handle.done()) 153 | { 154 | return; 155 | } 156 | 157 | { 158 | std::scoped_lock lk{m_wait_mutex}; 159 | m_queue.emplace_back(handle); 160 | m_wait_cv.notify_one(); 161 | } 162 | } 163 | 164 | } // namespace coro 165 | -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.12) 2 | project(libcoro_test) 3 | 4 | set(LIBCORO_TEST_SOURCE_FILES 5 | test_condition_variable.cpp 6 | test_event.cpp 7 | test_generator.cpp 8 | test_latch.cpp 9 | test_mutex.cpp 10 | test_ring_buffer.cpp 11 | test_queue.cpp 12 | test_semaphore.cpp 13 | test_shared_mutex.cpp 14 | test_sync_wait.cpp 15 | test_task.cpp 16 | test_thread_pool.cpp 17 | test_when_all.cpp 18 | 19 | catch_amalgamated.hpp catch_amalgamated.cpp 20 | catch_extensions.hpp catch_extensions.cpp 21 | ) 22 | 23 | if(NOT EMSCRIPTEN) 24 | list(APPEND LIBCORO_TEST_SOURCE_FILES 25 | test_when_any.cpp 26 | ) 27 | endif() 28 | 29 | if(LIBCORO_FEATURE_NETWORKING) 30 | list(APPEND LIBCORO_TEST_SOURCE_FILES 31 | net/test_ip_address.cpp 32 | ) 33 | 34 | # These tests require coro::io_scheduler 35 | list(APPEND LIBCORO_TEST_SOURCE_FILES 36 | net/test_dns_resolver.cpp 37 | net/test_tcp_server.cpp 38 | net/test_tls_server.cpp 39 | net/test_udp_peers.cpp 40 | ) 41 | endif() 42 | 43 | if(LIBCORO_FEATURE_NETWORKING) 44 | list(APPEND LIBCORO_TEST_SOURCE_FILES 45 | bench.cpp 46 | test_io_scheduler.cpp 47 | ) 48 | endif() 49 | 50 | add_executable(${PROJECT_NAME} main.cpp ${LIBCORO_TEST_SOURCE_FILES}) 51 | target_compile_features(${PROJECT_NAME} PRIVATE cxx_std_20) 52 | target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) 53 | target_link_libraries(${PROJECT_NAME} PRIVATE libcoro) 54 | 55 | if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU") 56 | target_compile_options(${PROJECT_NAME} PRIVATE 57 | $<$:-std=c++20> 58 | $<$:-fcoroutines> 59 | $<$:-fconcepts> 60 | $<$:-fexceptions> 61 | $<$:-Wall> 62 | $<$:-Wextra> 63 | $<$:-pipe> 64 | ) 65 | elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") 66 | target_compile_options(${PROJECT_NAME} PRIVATE 67 | $<$:-std=c++20> 68 | $<$:-fexceptions> 69 | $<$:-Wall> 70 | $<$:-Wextra> 71 | $<$:-pipe> 72 | ) 73 | elseif(MSVC) 74 | target_compile_options(${PROJECT_NAME} PRIVATE /W4) 75 | else() 76 | message(FATAL_ERROR "Unsupported compiler.") 77 | endif() 78 | 79 | if(LIBCORO_CODE_COVERAGE) 80 | target_link_libraries(${PROJECT_NAME} PRIVATE gcov) 81 | target_compile_options(${PROJECT_NAME} PRIVATE --coverage) 82 | endif() 83 | 84 | add_test(NAME libcoro_tests COMMAND ${PROJECT_NAME}) 85 | set_tests_properties(libcoro_tests PROPERTIES ENVIRONMENT_MODIFICATION "PATH=path_list_prepend:$<$:$>") 86 | -------------------------------------------------------------------------------- /test/catch_extensions.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_extensions.hpp" 2 | 3 | std::mutex g_catch2_thread_safe_mutex = std::mutex{}; 4 | -------------------------------------------------------------------------------- /test/catch_extensions.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | extern std::mutex g_catch2_thread_safe_mutex; 6 | #define REQUIRE_THREAD_SAFE(expr) {std::lock_guard lock_guard{g_catch2_thread_safe_mutex}; REQUIRE(expr);} 7 | -------------------------------------------------------------------------------- /test/main.cpp: -------------------------------------------------------------------------------- 1 | #define CATCH_CONFIG_MAIN 2 | #include "catch_amalgamated.hpp" 3 | 4 | #include 5 | 6 | #ifdef LIBCORO_FEATURE_NETWORKING 7 | /** 8 | * This structure invokes a constructor to setup some global test settings that are needed prior 9 | * to executing the tests. 10 | */ 11 | struct test_setup_networking 12 | { 13 | test_setup_networking() 14 | { 15 | // Ignore SIGPIPE, the library should be handling these gracefully. 16 | signal(SIGPIPE, SIG_IGN); 17 | 18 | #ifdef LIBCORO_FEATURE_TLS 19 | // For SSL/TLS tests create a localhost cert.pem and key.pem, tests expected these files 20 | // to be generated into the same directory that the tests are running in. 21 | auto unused = system( 22 | "openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -subj '/CN=localhost' -nodes"); 23 | (void)unused; 24 | #endif 25 | } 26 | 27 | ~test_setup_networking() 28 | { 29 | #ifdef LIBCORO_FEATURE_TLS 30 | // Cleanup the temporary key.pem and cert.pem files. 31 | auto unused = system("rm key.pem cert.pem"); 32 | (void)unused; 33 | #endif 34 | } 35 | }; 36 | 37 | static test_setup_networking g_test_setup_networking{}; 38 | #endif 39 | -------------------------------------------------------------------------------- /test/net/test_dns_resolver.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_amalgamated.hpp" 2 | 3 | #ifdef LIBCORO_FEATURE_NETWORKING 4 | 5 | #include 6 | 7 | #include 8 | 9 | TEST_CASE("dns_resolver basic", "[dns]") 10 | { 11 | auto scheduler = coro::io_scheduler::make_shared( 12 | coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); 13 | coro::net::dns::resolver dns_resolver{scheduler, std::chrono::milliseconds{5000}}; 14 | 15 | auto make_host_by_name_task = [](std::shared_ptr scheduler, 16 | coro::net::dns::resolver& dns_resolver, 17 | coro::net::hostname hn) -> coro::task 18 | { 19 | co_await scheduler->schedule(); 20 | auto result_ptr = co_await std::move(dns_resolver.host_by_name(hn)); 21 | 22 | if (result_ptr->status() == coro::net::dns::status::complete) 23 | { 24 | for (const auto& ip_addr : result_ptr->ip_addresses()) 25 | { 26 | std::cerr << coro::net::to_string(ip_addr.domain()) << " " << ip_addr.to_string() << "\n"; 27 | } 28 | } 29 | 30 | co_return; 31 | }; 32 | 33 | coro::sync_wait(make_host_by_name_task(scheduler, dns_resolver, coro::net::hostname{"www.example.com"})); 34 | 35 | std::cerr << "io_scheduler.size() before shutdown = " << scheduler->size() << "\n"; 36 | scheduler->shutdown(); 37 | std::cerr << "io_scheduler.size() after shutdown = " << scheduler->size() << "\n"; 38 | REQUIRE(scheduler->empty()); 39 | } 40 | 41 | #endif // LIBCORO_FEATURE_NETWORKING 42 | -------------------------------------------------------------------------------- /test/net/test_ip_address.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_amalgamated.hpp" 2 | 3 | #ifdef LIBCORO_FEATURE_NETWORKING 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | TEST_CASE("net::ip_address from_string() ipv4") 11 | { 12 | { 13 | auto ip_addr = coro::net::ip_address::from_string("127.0.0.1"); 14 | REQUIRE(ip_addr.to_string() == "127.0.0.1"); 15 | REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv4); 16 | std::array expected{127, 0, 0, 1}; 17 | REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin())); 18 | } 19 | 20 | { 21 | auto ip_addr = coro::net::ip_address::from_string("255.255.0.0"); 22 | REQUIRE(ip_addr.to_string() == "255.255.0.0"); 23 | REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv4); 24 | std::array expected{255, 255, 0, 0}; 25 | REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin())); 26 | } 27 | } 28 | 29 | TEST_CASE("net::ip_address from_string() ipv6") 30 | { 31 | { 32 | auto ip_addr = 33 | coro::net::ip_address::from_string("0123:4567:89ab:cdef:0123:4567:89ab:cdef", coro::net::domain_t::ipv6); 34 | REQUIRE(ip_addr.to_string() == "123:4567:89ab:cdef:123:4567:89ab:cdef"); 35 | REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6); 36 | std::array expected{ 37 | 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}; 38 | REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin())); 39 | } 40 | 41 | { 42 | auto ip_addr = coro::net::ip_address::from_string("::", coro::net::domain_t::ipv6); 43 | REQUIRE(ip_addr.to_string() == "::"); 44 | REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6); 45 | std::array expected{}; 46 | REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin())); 47 | } 48 | 49 | { 50 | auto ip_addr = coro::net::ip_address::from_string("::1", coro::net::domain_t::ipv6); 51 | REQUIRE(ip_addr.to_string() == "::1"); 52 | REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6); 53 | std::array expected{ 54 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 55 | REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin())); 56 | } 57 | 58 | { 59 | auto ip_addr = coro::net::ip_address::from_string("1::1", coro::net::domain_t::ipv6); 60 | REQUIRE(ip_addr.to_string() == "1::1"); 61 | REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6); 62 | std::array expected{ 63 | 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 64 | REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin())); 65 | } 66 | 67 | { 68 | auto ip_addr = coro::net::ip_address::from_string("1::", coro::net::domain_t::ipv6); 69 | REQUIRE(ip_addr.to_string() == "1::"); 70 | REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6); 71 | std::array expected{ 72 | 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 73 | REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin())); 74 | } 75 | } 76 | 77 | #endif //# LIBCORO_FEATURE_NETWORKING -------------------------------------------------------------------------------- /test/net/test_tls_server.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_amalgamated.hpp" 2 | 3 | #ifdef LIBCORO_FEATURE_NETWORKING 4 | #ifdef LIBCORO_FEATURE_TLS 5 | 6 | #include 7 | 8 | #include 9 | 10 | TEST_CASE("tls_server hello world server", "[tls_server]") 11 | { 12 | auto scheduler = coro::io_scheduler::make_shared( 13 | coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); 14 | 15 | const std::string client_msg = "Hello world from TLS client!"; 16 | const std::string server_msg = "Hello world from TLS server!!"; 17 | 18 | auto make_client_task = [](std::shared_ptr scheduler, 19 | const std::string& client_msg, 20 | const std::string& server_msg) -> coro::task 21 | { 22 | co_await scheduler->schedule(); 23 | 24 | coro::net::tls::client client{ 25 | scheduler, std::make_shared(coro::net::tls::verify_peer_t::no)}; 26 | 27 | std::cerr << "client.connect()\n"; 28 | auto cstatus = co_await client.connect(); 29 | REQUIRE(cstatus == coro::net::tls::connection_status::connected); 30 | std::cerr << "client.connected\n"; 31 | 32 | std::cerr << "client.send()\n"; 33 | auto [sstatus, remaining] = co_await client.send(client_msg); 34 | REQUIRE(sstatus == coro::net::tls::send_status::ok); 35 | REQUIRE(remaining.empty()); 36 | 37 | std::string response; 38 | response.resize(256, '\0'); 39 | 40 | std::cerr << "client.recv()\n"; 41 | auto [rstatus, rspan] = co_await client.recv(response); 42 | REQUIRE(rstatus == coro::net::tls::recv_status::ok); 43 | REQUIRE(rspan.size() == server_msg.size()); 44 | response.resize(rspan.size()); 45 | 46 | REQUIRE(response == server_msg); 47 | std::cerr << "client received message: " << response << "\n"; 48 | 49 | std::cerr << "client finished\n"; 50 | co_return; 51 | }; 52 | 53 | auto make_server_task = [](std::shared_ptr scheduler, 54 | const std::string& client_msg, 55 | const std::string& server_msg) -> coro::task 56 | { 57 | co_await scheduler->schedule(); 58 | 59 | coro::net::tls::server server{ 60 | scheduler, 61 | std::make_shared( 62 | "cert.pem", coro::net::tls::tls_file_type::pem, "key.pem", coro::net::tls::tls_file_type::pem)}; 63 | 64 | std::cerr << "server.poll()\n"; 65 | auto pstatus = co_await server.poll(); 66 | REQUIRE(pstatus == coro::poll_status::event); 67 | 68 | std::cerr << "server.accept()\n"; 69 | auto client = co_await server.accept(); 70 | REQUIRE(client.socket().is_valid()); 71 | 72 | std::string buffer; 73 | buffer.resize(256, '\0'); 74 | std::cerr << "server client.recv()\n"; 75 | auto [rstatus, rspan] = co_await client.recv(buffer); 76 | REQUIRE(rstatus == coro::net::tls::recv_status::ok); 77 | REQUIRE(rspan.size() == client_msg.size()); 78 | buffer.resize(rspan.size()); 79 | REQUIRE(buffer == client_msg); 80 | std::cerr << "server received message: " << buffer << "\n"; 81 | 82 | std::cerr << "server client.send()\n"; 83 | auto [sstatus, remaining] = co_await client.send(server_msg); 84 | REQUIRE(sstatus == coro::net::tls::send_status::ok); 85 | REQUIRE(remaining.empty()); 86 | 87 | std::cerr << "server finished\n"; 88 | co_return; 89 | }; 90 | 91 | coro::sync_wait(coro::when_all( 92 | make_server_task(scheduler, client_msg, server_msg), make_client_task(scheduler, client_msg, server_msg))); 93 | } 94 | 95 | #endif // LIBCORO_FEATURE_TLS 96 | #endif // LIBCORO_FEATURE_NETWORKING 97 | -------------------------------------------------------------------------------- /test/net/test_udp_peers.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_amalgamated.hpp" 2 | 3 | #ifdef LIBCORO_FEATURE_NETWORKING 4 | 5 | #include 6 | 7 | TEST_CASE("udp one way") 8 | { 9 | const std::string msg{"aaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccccc"}; 10 | 11 | auto scheduler = coro::io_scheduler::make_shared( 12 | coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); 13 | 14 | auto make_send_task = [](std::shared_ptr scheduler, const std::string& msg) -> coro::task 15 | { 16 | co_await scheduler->schedule(); 17 | coro::net::udp::peer peer{scheduler}; 18 | coro::net::udp::peer::info peer_info{}; 19 | 20 | auto [sstatus, remaining] = peer.sendto(peer_info, msg); 21 | REQUIRE(sstatus == coro::net::send_status::ok); 22 | REQUIRE(remaining.empty()); 23 | 24 | co_return; 25 | }; 26 | 27 | auto make_recv_task = [](std::shared_ptr scheduler, const std::string& msg) -> coro::task 28 | { 29 | co_await scheduler->schedule(); 30 | coro::net::udp::peer::info self_info{.address = coro::net::ip_address::from_string("0.0.0.0")}; 31 | 32 | coro::net::udp::peer self{scheduler, self_info}; 33 | 34 | auto pstatus = co_await self.poll(coro::poll_op::read); 35 | REQUIRE(pstatus == coro::poll_status::event); 36 | 37 | std::string buffer(64, '\0'); 38 | auto [rstatus, peer_info, rspan] = self.recvfrom(buffer); 39 | REQUIRE(rstatus == coro::net::recv_status::ok); 40 | REQUIRE(peer_info.address == coro::net::ip_address::from_string("127.0.0.1")); 41 | // The peer's port will be randomly picked by the kernel since it wasn't bound. 42 | REQUIRE(rspan.size() == msg.size()); 43 | buffer.resize(rspan.size()); 44 | REQUIRE(buffer == msg); 45 | 46 | co_return; 47 | }; 48 | 49 | coro::sync_wait(coro::when_all(make_recv_task(scheduler, msg), make_send_task(scheduler, msg))); 50 | } 51 | 52 | TEST_CASE("udp echo peers") 53 | { 54 | const std::string peer1_msg{"Hello from peer1!"}; 55 | const std::string peer2_msg{"Hello from peer2!!"}; 56 | 57 | auto scheduler = coro::io_scheduler::make_shared( 58 | coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); 59 | 60 | auto make_peer_task = [](std::shared_ptr scheduler, 61 | uint16_t my_port, 62 | uint16_t peer_port, 63 | bool send_first, 64 | const std::string my_msg, 65 | const std::string peer_msg) -> coro::task 66 | { 67 | co_await scheduler->schedule(); 68 | coro::net::udp::peer::info my_info{.address = coro::net::ip_address::from_string("0.0.0.0"), .port = my_port}; 69 | coro::net::udp::peer::info peer_info{ 70 | .address = coro::net::ip_address::from_string("127.0.0.1"), .port = peer_port}; 71 | 72 | coro::net::udp::peer me{scheduler, my_info}; 73 | 74 | if (send_first) 75 | { 76 | // Send my message to my peer first. 77 | auto [sstatus, remaining] = me.sendto(peer_info, my_msg); 78 | REQUIRE(sstatus == coro::net::send_status::ok); 79 | REQUIRE(remaining.empty()); 80 | } 81 | else 82 | { 83 | // Poll for my peers message first. 84 | auto pstatus = co_await me.poll(coro::poll_op::read); 85 | REQUIRE(pstatus == coro::poll_status::event); 86 | 87 | std::string buffer(64, '\0'); 88 | auto [rstatus, recv_peer_info, rspan] = me.recvfrom(buffer); 89 | REQUIRE(rstatus == coro::net::recv_status::ok); 90 | REQUIRE(recv_peer_info == peer_info); 91 | REQUIRE(rspan.size() == peer_msg.size()); 92 | buffer.resize(rspan.size()); 93 | REQUIRE(buffer == peer_msg); 94 | } 95 | 96 | if (send_first) 97 | { 98 | // I sent first so now I need to await my peer's message. 99 | auto pstatus = co_await me.poll(coro::poll_op::read); 100 | REQUIRE(pstatus == coro::poll_status::event); 101 | 102 | std::string buffer(64, '\0'); 103 | auto [rstatus, recv_peer_info, rspan] = me.recvfrom(buffer); 104 | REQUIRE(rstatus == coro::net::recv_status::ok); 105 | REQUIRE(recv_peer_info == peer_info); 106 | REQUIRE(rspan.size() == peer_msg.size()); 107 | buffer.resize(rspan.size()); 108 | REQUIRE(buffer == peer_msg); 109 | } 110 | else 111 | { 112 | auto [sstatus, remaining] = me.sendto(peer_info, my_msg); 113 | REQUIRE(sstatus == coro::net::send_status::ok); 114 | REQUIRE(remaining.empty()); 115 | } 116 | 117 | co_return; 118 | }; 119 | 120 | coro::sync_wait(coro::when_all( 121 | make_peer_task(scheduler, 8081, 8080, false, peer2_msg, peer1_msg), 122 | make_peer_task(scheduler, 8080, 8081, true, peer1_msg, peer2_msg))); 123 | } 124 | 125 | #endif // LIBCORO_FEATURE_NETWORKING 126 | -------------------------------------------------------------------------------- /test/test_generator.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_amalgamated.hpp" 2 | 3 | #include 4 | 5 | TEST_CASE("generator single yield", "[generator]") 6 | { 7 | const std::string msg{"Hello World Generator!"}; 8 | auto func = [](const std::string& msg) -> coro::generator { co_yield std::string{msg}; }; 9 | 10 | for (const auto& v : func(msg)) 11 | { 12 | REQUIRE(v == msg); 13 | } 14 | } 15 | 16 | TEST_CASE("generator infinite incrementing integer yield", "[generator]") 17 | { 18 | constexpr const int64_t max = 1024; 19 | 20 | auto func = []() -> coro::generator 21 | { 22 | int64_t i{0}; 23 | while (true) 24 | { 25 | ++i; 26 | co_yield i; 27 | } 28 | }; 29 | 30 | int64_t v{1}; 31 | for (const auto& v_1 : func()) 32 | { 33 | REQUIRE(v == v_1); 34 | ++v; 35 | 36 | if (v > max) 37 | { 38 | break; 39 | } 40 | } 41 | } 42 | 43 | TEST_CASE("generator satisfies view concept for compatibility with std::views::take") 44 | { 45 | auto counter = size_t{0}; 46 | auto natural = [](size_t n) mutable -> coro::generator 47 | { 48 | while (true) 49 | co_yield ++n; 50 | }; 51 | auto nat = natural(counter); 52 | static_assert(std::ranges::view, "does not satisfy view concept"); 53 | SECTION("Count the items") 54 | { 55 | for (auto&& n : natural(counter) | std::views::take(5)) 56 | { 57 | ++counter; 58 | REQUIRE(n == counter); 59 | } 60 | REQUIRE(counter == 5); 61 | } 62 | SECTION("Not supported when std::ranges::view is satisfied, see issue 261") 63 | { 64 | /// the following may fail to compile to prevent loss of items in the std::views:take: 65 | /* 66 | for (auto&& n : nat | std::views::take(3)) { 67 | ++counter; 68 | REQUIRE(n == counter); // expect 1, 2, 3 69 | } 70 | for (auto&& n : nat | std::views::take(3)) { 71 | ++counter; 72 | REQUIRE(n == counter); // expect 4, 5, 6 (4 may get lost if view is not enabled) 73 | } 74 | */ 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /test/test_latch.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_amalgamated.hpp" 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | TEST_CASE("latch count=0", "[latch]") 9 | { 10 | coro::latch l{0}; 11 | 12 | auto make_task = [](coro::latch& l) -> coro::task 13 | { 14 | co_await l; 15 | co_return 42; 16 | }; 17 | 18 | auto task = make_task(l); 19 | 20 | task.resume(); 21 | REQUIRE(task.is_ready()); // The latch never waits due to zero count. 22 | REQUIRE(task.promise().result() == 42); 23 | } 24 | 25 | TEST_CASE("latch count=1", "[latch]") 26 | { 27 | coro::latch l{1}; 28 | 29 | auto make_task = [](coro::latch& l) -> coro::task 30 | { 31 | auto workers = l.remaining(); 32 | co_await l; 33 | co_return workers; 34 | }; 35 | 36 | auto task = make_task(l); 37 | 38 | task.resume(); 39 | REQUIRE_FALSE(task.is_ready()); 40 | 41 | l.count_down(); 42 | REQUIRE(task.is_ready()); 43 | REQUIRE(task.promise().result() == 1); 44 | } 45 | 46 | TEST_CASE("latch count=1 count_down=5", "[latch]") 47 | { 48 | coro::latch l{1}; 49 | 50 | auto make_task = [](coro::latch& l) -> coro::task 51 | { 52 | auto workers = l.remaining(); 53 | co_await l; 54 | co_return workers; 55 | }; 56 | 57 | auto task = make_task(l); 58 | 59 | task.resume(); 60 | REQUIRE_FALSE(task.is_ready()); 61 | 62 | l.count_down(5); 63 | REQUIRE(task.is_ready()); 64 | REQUIRE(task.promise().result() == 1); 65 | } 66 | 67 | TEST_CASE("latch count=5 count_down=1 x5", "[latch]") 68 | { 69 | coro::latch l{5}; 70 | 71 | auto make_task = [](coro::latch& l) -> coro::task 72 | { 73 | auto workers = l.remaining(); 74 | co_await l; 75 | co_return workers; 76 | }; 77 | 78 | auto task = make_task(l); 79 | 80 | task.resume(); 81 | REQUIRE_FALSE(task.is_ready()); 82 | 83 | l.count_down(1); 84 | REQUIRE_FALSE(task.is_ready()); 85 | l.count_down(1); 86 | REQUIRE_FALSE(task.is_ready()); 87 | l.count_down(1); 88 | REQUIRE_FALSE(task.is_ready()); 89 | l.count_down(1); 90 | REQUIRE_FALSE(task.is_ready()); 91 | l.count_down(1); 92 | REQUIRE(task.is_ready()); 93 | REQUIRE(task.promise().result() == 5); 94 | } 95 | 96 | TEST_CASE("latch count=5 count_down=5", "[latch]") 97 | { 98 | coro::latch l{5}; 99 | 100 | auto make_task = [](coro::latch& l) -> coro::task 101 | { 102 | auto workers = l.remaining(); 103 | co_await l; 104 | co_return workers; 105 | }; 106 | 107 | auto task = make_task(l); 108 | 109 | task.resume(); 110 | REQUIRE_FALSE(task.is_ready()); 111 | 112 | l.count_down(5); 113 | REQUIRE(task.is_ready()); 114 | REQUIRE(task.promise().result() == 5); 115 | } 116 | -------------------------------------------------------------------------------- /test/test_mutex.cpp: -------------------------------------------------------------------------------- 1 | #include "catch_amalgamated.hpp" 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | TEST_CASE("mutex single waiter not locked", "[mutex]") 10 | { 11 | std::vector output; 12 | 13 | coro::mutex m; 14 | 15 | auto make_emplace_task = [](coro::mutex& m, std::vector& output) -> coro::task 16 | { 17 | std::cerr << "Acquiring lock\n"; 18 | { 19 | auto scoped_lock = co_await m.scoped_lock(); 20 | REQUIRE_FALSE(m.try_lock()); 21 | std::cerr << "lock acquired, emplacing back 1\n"; 22 | output.emplace_back(1); 23 | std::cerr << "coroutine done\n"; 24 | } 25 | 26 | // The scoped lock should release the lock upon destructing. 27 | REQUIRE(m.try_lock()); 28 | REQUIRE_FALSE(m.try_lock()); 29 | m.unlock(); 30 | 31 | co_return; 32 | }; 33 | 34 | coro::sync_wait(make_emplace_task(m, output)); 35 | 36 | REQUIRE(m.try_lock()); 37 | m.unlock(); 38 | 39 | REQUIRE(output.size() == 1); 40 | REQUIRE(output[0] == 1); 41 | } 42 | 43 | TEST_CASE("mutex many waiters until event", "[mutex]") 44 | { 45 | std::atomic value{0}; 46 | std::vector> tasks; 47 | 48 | coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}}; 49 | 50 | coro::mutex m; // acquires and holds the lock until the event is triggered 51 | coro::event e; // triggers the blocking thread to release the lock 52 | 53 | auto make_task = 54 | [](coro::thread_pool& tp, coro::mutex& m, std::atomic& value, uint64_t id) -> coro::task 55 | { 56 | co_await tp.schedule(); 57 | std::cerr << "id = " << id << " waiting to acquire the lock\n"; 58 | auto scoped_lock = co_await m.scoped_lock(); 59 | 60 | // Should always be locked upon acquiring the locks. 61 | REQUIRE_FALSE(m.try_lock()); 62 | 63 | std::cerr << "id = " << id << " lock acquired\n"; 64 | value.fetch_add(1, std::memory_order::relaxed); 65 | std::cerr << "id = " << id << " coroutine done\n"; 66 | co_return; 67 | }; 68 | 69 | auto make_block_task = [](coro::thread_pool& tp, coro::mutex& m, coro::event& e) -> coro::task 70 | { 71 | co_await tp.schedule(); 72 | std::cerr << "block task acquiring lock\n"; 73 | auto scoped_lock = co_await m.scoped_lock(); 74 | REQUIRE_FALSE(m.try_lock()); 75 | std::cerr << "block task acquired lock, waiting on event\n"; 76 | co_await e; 77 | co_return; 78 | }; 79 | 80 | auto make_set_task = [](coro::thread_pool& tp, coro::event& e) -> coro::task 81 | { 82 | co_await tp.schedule(); 83 | std::cerr << "set task setting event\n"; 84 | e.set(); 85 | co_return; 86 | }; 87 | 88 | // Grab mutex so all threads block. 89 | tasks.emplace_back(make_block_task(tp, m, e)); 90 | 91 | // Create N tasks that attempt to lock the mutex. 92 | for (uint64_t i = 1; i <= 4; ++i) 93 | { 94 | tasks.emplace_back(make_task(tp, m, value, i)); 95 | } 96 | 97 | tasks.emplace_back(make_set_task(tp, e)); 98 | 99 | coro::sync_wait(coro::when_all(std::move(tasks))); 100 | 101 | REQUIRE(value == 4); 102 | } 103 | 104 | TEST_CASE("mutex scoped_lock unlock prior to scope exit", "[mutex]") 105 | { 106 | coro::mutex m; 107 | 108 | auto make_task = [](coro::mutex& m) -> coro::task 109 | { 110 | { 111 | auto lk = co_await m.scoped_lock(); 112 | REQUIRE_FALSE(m.try_lock()); 113 | lk.unlock(); 114 | REQUIRE(m.try_lock()); 115 | } 116 | co_return; 117 | }; 118 | 119 | coro::sync_wait(make_task(m)); 120 | } 121 | 122 | TEST_CASE("mutex lock", "[mutex]") 123 | { 124 | coro::mutex m; 125 | 126 | auto make_task = [](coro::mutex& m) -> coro::task 127 | { 128 | { 129 | co_await m.lock(); 130 | REQUIRE_FALSE(m.try_lock()); 131 | m.unlock(); 132 | REQUIRE(m.try_lock()); 133 | } 134 | co_return; 135 | }; 136 | 137 | coro::sync_wait(make_task(m)); 138 | } 139 | --------------------------------------------------------------------------------