├── .git-blame-ignore-revs
├── .github
├── labeler.yml
├── release-drafter.yml
└── workflows
│ ├── ci.yml
│ ├── rebase-cmd-dispatch.yml
│ ├── rebase-cmd.yml
│ ├── scala-steward.yml
│ └── test-report.yml
├── .gitignore
├── .readthedocs.yaml
├── .scala-steward.conf
├── .scalafmt.conf
├── LICENSE
├── README.md
├── build.sbt
├── core
└── src
│ ├── main
│ └── scala
│ │ └── ox
│ │ ├── Chunk.scala
│ │ ├── ErrorMode.scala
│ │ ├── Ox.scala
│ │ ├── OxApp.scala
│ │ ├── channels
│ │ ├── BufferCapacity.scala
│ │ ├── Channel.scala
│ │ ├── ChannelClosed.scala
│ │ ├── ChannelClosedUnion.scala
│ │ ├── SourceCompanionOps.scala
│ │ ├── SourceDrainOps.scala
│ │ ├── SourceOps.scala
│ │ ├── actor.scala
│ │ ├── forkPropagate.scala
│ │ └── select.scala
│ │ ├── collections.scala
│ │ ├── control.scala
│ │ ├── either.scala
│ │ ├── flow
│ │ ├── Flow.scala
│ │ ├── FlowCompanionIOOps.scala
│ │ ├── FlowCompanionOps.scala
│ │ ├── FlowCompanionReactiveOps.scala
│ │ ├── FlowIOOps.scala
│ │ ├── FlowOps.scala
│ │ ├── FlowReactiveOps.scala
│ │ ├── FlowRunOps.scala
│ │ ├── FlowTextOps.scala
│ │ └── internal
│ │ │ ├── WeightedHeap.scala
│ │ │ └── groupByImpl.scala
│ │ ├── fork.scala
│ │ ├── inScopeRunner.scala
│ │ ├── internal
│ │ ├── ScopeContext.scala
│ │ └── ThreadHerd.scala
│ │ ├── local.scala
│ │ ├── oxThreadFactory.scala
│ │ ├── par.scala
│ │ ├── race.scala
│ │ ├── resilience
│ │ ├── AdaptiveRetry.scala
│ │ ├── CircuitBreaker.scala
│ │ ├── CircuitBreakerConfig.scala
│ │ ├── CircuitBreakerStateMachine.scala
│ │ ├── DurationRateLimiterAlgorithm.scala
│ │ ├── RateLimiter.scala
│ │ ├── RateLimiterAlgorithm.scala
│ │ ├── ResultPolicy.scala
│ │ ├── RetryConfig.scala
│ │ ├── StartTimeRateLimiterAlgorithm.scala
│ │ ├── TokenBucket.scala
│ │ └── retry.scala
│ │ ├── resource.scala
│ │ ├── scheduling
│ │ ├── Jitter.scala
│ │ ├── RepeatConfig.scala
│ │ ├── Schedule.scala
│ │ ├── repeat.scala
│ │ └── scheduled.scala
│ │ ├── supervised.scala
│ │ ├── unsupervised.scala
│ │ └── util.scala
│ └── test
│ └── scala
│ └── ox
│ ├── AppErrorTest.scala
│ ├── CancelTest.scala
│ ├── CollectParTest.scala
│ ├── ControlTest.scala
│ ├── EitherTest.scala
│ ├── ExceptionTest.scala
│ ├── FilterParTest.scala
│ ├── ForeachParTest.scala
│ ├── ForkTest.scala
│ ├── LocalTest.scala
│ ├── MapParTest.scala
│ ├── OxAppTest.scala
│ ├── ParTest.scala
│ ├── RaceTest.scala
│ ├── ResourceTest.scala
│ ├── SupervisedTest.scala
│ ├── UtilTest.scala
│ ├── channels
│ ├── ActorTest.scala
│ ├── ChannelTest.scala
│ ├── SourceOpsEmptyTest.scala
│ ├── SourceOpsFactoryMethodsTest.scala
│ ├── SourceOpsFailedTest.scala
│ ├── SourceOpsForeachTest.scala
│ ├── SourceOpsFutureSourceTest.scala
│ ├── SourceOpsFutureTest.scala
│ ├── SourceOpsTest.scala
│ └── SourceOpsTransformTest.scala
│ ├── flow
│ ├── FlowCompanionIOOpsTest.scala
│ ├── FlowCompanionOpsTest.scala
│ ├── FlowIOOpsTest.scala
│ ├── FlowOpsAlsoToTapTest.scala
│ ├── FlowOpsAlsoToTest.scala
│ ├── FlowOpsBufferTest.scala
│ ├── FlowOpsCollectTest.scala
│ ├── FlowOpsConcatPrependTest.scala
│ ├── FlowOpsConcatTest.scala
│ ├── FlowOpsDebounceByTest.scala
│ ├── FlowOpsDebounceTest.scala
│ ├── FlowOpsDrainTest.scala
│ ├── FlowOpsDropTest.scala
│ ├── FlowOpsEmptyTest.scala
│ ├── FlowOpsFactoryMethodsTest.scala
│ ├── FlowOpsFailedTest.scala
│ ├── FlowOpsFilterTest.scala
│ ├── FlowOpsFlatMapTest.scala
│ ├── FlowOpsFlattenParTest.scala
│ ├── FlowOpsFlattenTest.scala
│ ├── FlowOpsFoldTest.scala
│ ├── FlowOpsForeachTest.scala
│ ├── FlowOpsFutureSourceTest.scala
│ ├── FlowOpsFutureTest.scala
│ ├── FlowOpsGroupByTest.scala
│ ├── FlowOpsGroupedTest.scala
│ ├── FlowOpsInterleaveAllTest.scala
│ ├── FlowOpsInterleaveTest.scala
│ ├── FlowOpsIntersperseTest.scala
│ ├── FlowOpsLastOptionTest.scala
│ ├── FlowOpsLastTest.scala
│ ├── FlowOpsMapConcatTest.scala
│ ├── FlowOpsMapParTest.scala
│ ├── FlowOpsMapParUnorderedTest.scala
│ ├── FlowOpsMapStatefulConcatTest.scala
│ ├── FlowOpsMapStatefulTest.scala
│ ├── FlowOpsMapTest.scala
│ ├── FlowOpsMapUsingSinkTest.scala
│ ├── FlowOpsMergeTest.scala
│ ├── FlowOpsOnCompleteTest.scala
│ ├── FlowOpsOrElseTest.scala
│ ├── FlowOpsPipeToTest.scala
│ ├── FlowOpsReduceTest.scala
│ ├── FlowOpsRepeatEvalTest.scala
│ ├── FlowOpsRunToChannelTest.scala
│ ├── FlowOpsSampleTest.scala
│ ├── FlowOpsScanTest.scala
│ ├── FlowOpsSlidingTest.scala
│ ├── FlowOpsTakeLastTest.scala
│ ├── FlowOpsTakeTest.scala
│ ├── FlowOpsTakeWhileTest.scala
│ ├── FlowOpsTapTest.scala
│ ├── FlowOpsThrottleTest.scala
│ ├── FlowOpsTickTest.scala
│ ├── FlowOpsTimeoutTest.scala
│ ├── FlowOpsUsingSink.scala
│ ├── FlowOpsZipAllTest.scala
│ ├── FlowOpsZipTest.scala
│ ├── FlowOpsZipWithIndexTest.scala
│ ├── FlowTextOpsTest.scala
│ ├── internal
│ │ └── WeightedHeapTest.scala
│ └── reactive
│ │ ├── FlowPublisherPekkoTest.scala
│ │ └── FlowPublisherTckTest.scala
│ ├── resilience
│ ├── AfterAttemptTest.scala
│ ├── BackoffRetryTest.scala
│ ├── CircuitBreakerStateMachineTest.scala
│ ├── CircuitBreakerTest.scala
│ ├── FixedIntervalRetryTest.scala
│ ├── ImmediateRetryTest.scala
│ ├── RateLimiterInterfaceTest.scala
│ ├── RateLimiterTest.scala
│ └── ScheduleFallingBackRetryTest.scala
│ ├── scheduling
│ ├── FixedRateRepeatTest.scala
│ ├── ImmediateRepeatTest.scala
│ └── JitterTest.scala
│ └── util
│ ├── ElapsedTime.scala
│ ├── MaxCounter.scala
│ └── Trail.scala
├── cron
└── src
│ ├── main
│ └── scala
│ │ └── ox
│ │ └── scheduling
│ │ └── cron
│ │ └── CronSchedule.scala
│ └── test
│ └── scala
│ └── ox
│ └── scheduling
│ └── cron
│ └── CronScheduleTest.scala
├── doc
├── .gitignore
├── .python-version
├── Makefile
├── _static
│ └── state-diagram-cb.svg
├── adr
│ ├── 0001-error-propagation-in-channels.md
│ ├── 0002-retries.md
│ ├── 0003-why-source-operators-do-not-throw.md
│ ├── 0004-channels-safe-unsafe-operations.md
│ ├── 0005-application-errors.md
│ ├── 0006-actors.md
│ ├── 0007-supervised-unsupervised-scopes.md
│ └── 0008-scheduled-repeat-retry.md
├── basics
│ ├── direct-style.md
│ └── error-handling.md
├── conf.py
├── flake.lock
├── flake.nix
├── high-level-concurrency
│ ├── collections.md
│ ├── par.md
│ ├── race.md
│ └── timeout.md
├── index.md
├── info
│ ├── community-support.md
│ ├── dependency.md
│ └── scope.md
├── integrations
│ ├── cron4s.md
│ ├── kafka.md
│ ├── mdc-logback.md
│ └── otel-context.md
├── make.bat
├── other
│ ├── best-practices.md
│ ├── compare-funeff.md
│ ├── compare-gears.md
│ ├── dictionary.md
│ ├── links.md
│ ├── performance.md
│ └── stability.md
├── requirements.txt
├── scheduling
│ ├── repeat.md
│ ├── retries.md
│ └── scheduled.md
├── streaming
│ ├── backpressure.md
│ ├── channels.md
│ ├── errors.md
│ ├── flows.md
│ ├── index.md
│ ├── io.md
│ ├── selecting-from-channels.md
│ └── transforming-channels.md
├── structured-concurrency
│ ├── error-handling-scopes.md
│ ├── fork-join.md
│ ├── fork-local.md
│ ├── index.md
│ └── interruptions.md
├── tour.md
├── utils
│ ├── actors.md
│ ├── circuit-breaker.md
│ ├── control-flow.md
│ ├── oxapp.md
│ ├── rate-limiter.md
│ ├── resources.md
│ └── utility.md
└── watch.sh
├── examples
└── src
│ └── test
│ ├── resources
│ └── logback.xml
│ └── scala
│ └── ox
│ ├── crawler
│ ├── Crawler.scala
│ ├── Http.scala
│ └── test
│ │ ├── CrawlerTest.scala
│ │ └── CrawlerTestData.scala
│ ├── main.scala
│ ├── ratelimiter
│ ├── RateLimiter.scala
│ ├── RateLimiterQueue.scala
│ └── test
│ │ └── RateLimiterTest.scala
│ ├── sockets
│ ├── Router.scala
│ ├── socket.scala
│ └── test
│ │ └── RouterTest.scala
│ └── supervise
│ ├── Broadcast.scala
│ ├── model.scala
│ └── test
│ └── BroadcastTest.scala
├── flow-reactive-streams
└── src
│ └── main
│ └── scala
│ └── ox
│ └── flow
│ └── reactive
│ └── flowReactiveStreamsExtension.scala
├── generated-doc
└── out
│ ├── .gitignore
│ ├── .python-version
│ ├── Makefile
│ ├── _static
│ └── state-diagram-cb.svg
│ ├── adr
│ ├── 0001-error-propagation-in-channels.md
│ ├── 0002-retries.md
│ ├── 0003-why-source-operators-do-not-throw.md
│ ├── 0004-channels-safe-unsafe-operations.md
│ ├── 0005-application-errors.md
│ ├── 0006-actors.md
│ ├── 0007-supervised-unsupervised-scopes.md
│ └── 0008-scheduled-repeat-retry.md
│ ├── basics
│ ├── direct-style.md
│ └── error-handling.md
│ ├── conf.py
│ ├── flake.lock
│ ├── flake.nix
│ ├── high-level-concurrency
│ ├── collections.md
│ ├── par.md
│ ├── race.md
│ └── timeout.md
│ ├── index.md
│ ├── info
│ ├── community-support.md
│ ├── dependency.md
│ └── scope.md
│ ├── integrations
│ ├── cron4s.md
│ ├── kafka.md
│ ├── mdc-logback.md
│ └── otel-context.md
│ ├── make.bat
│ ├── other
│ ├── best-practices.md
│ ├── compare-funeff.md
│ ├── compare-gears.md
│ ├── dictionary.md
│ ├── links.md
│ ├── performance.md
│ └── stability.md
│ ├── requirements.txt
│ ├── scheduling
│ ├── repeat.md
│ ├── retries.md
│ └── scheduled.md
│ ├── streaming
│ ├── backpressure.md
│ ├── channels.md
│ ├── errors.md
│ ├── flows.md
│ ├── index.md
│ ├── io.md
│ ├── selecting-from-channels.md
│ └── transforming-channels.md
│ ├── structured-concurrency
│ ├── error-handling-scopes.md
│ ├── fork-join.md
│ ├── fork-local.md
│ ├── index.md
│ └── interruptions.md
│ ├── tour.md
│ ├── utils
│ ├── actors.md
│ ├── circuit-breaker.md
│ ├── control-flow.md
│ ├── oxapp.md
│ ├── rate-limiter.md
│ ├── resources.md
│ └── utility.md
│ └── watch.sh
├── kafka
├── docker-tests
│ └── docker-compose.yml
└── src
│ ├── main
│ └── scala
│ │ └── ox
│ │ └── kafka
│ │ ├── ConsumerSettings.scala
│ │ ├── KafkaConsumerWrapper.scala
│ │ ├── KafkaDrain.scala
│ │ ├── KafkaFlow.scala
│ │ ├── KafkaStage.scala
│ │ ├── ProducerSettings.scala
│ │ ├── ReceivedMessage.scala
│ │ ├── kafkaOffsetCommit.scala
│ │ └── package.scala
│ └── test
│ ├── resources
│ └── logback.xml
│ └── scala
│ └── ox
│ └── kafka
│ ├── KafkaTest.scala
│ └── manual
│ ├── pekko
│ ├── publishPekko.scala
│ └── transferPekko.scala
│ ├── publish.scala
│ ├── transfer.scala
│ └── util.scala
├── mdc-logback
└── src
│ ├── main
│ └── scala
│ │ └── ox
│ │ └── logback
│ │ └── InheritableMDC.scala
│ └── test
│ └── scala
│ └── ox
│ └── logback
│ └── InheritableMDCTest.scala
├── otel-context
└── src
│ └── main
│ └── scala
│ └── ox
│ └── otel
│ └── context
│ └── PropagatingVirtualThreadFactory.scala
└── project
├── build.properties
└── plugins.sbt
/.git-blame-ignore-revs:
--------------------------------------------------------------------------------
1 | # Scala Steward: Reformat with scalafmt 3.7.17
2 | e988553eda72e5b4ea0760ff5e95bf0af12e9921
3 |
4 | # Scala Steward: Reformat with scalafmt 3.8.1
5 | 5c6ea70f91018a2c0d16116c82cab92ee4d507f0
6 |
7 | # Scala Steward: Reformat with scalafmt 3.8.2
8 | b73b3c3f8fcaaa70595cd267f7242b0a0c345039
9 |
10 | # Scala Steward: Reformat with scalafmt 3.8.3
11 | 9cc844f181b6f41a327bbff0c18a8c6621532ef1
12 |
13 | # Scala Steward: Reformat with scalafmt 3.8.5
14 | 49688a6ac2f4d2b05006524970e3758aa84002c0
15 |
--------------------------------------------------------------------------------
/.github/labeler.yml:
--------------------------------------------------------------------------------
1 | version: 1
2 | labels:
3 | - label: "automerge"
4 | authors: ["softwaremill-ci"]
5 | files:
6 | - "build.sbt"
7 | - "project/build.properties"
8 | - "project/Versions.scala"
9 | - "project/plugins.sbt"
10 | - label: "dependency"
11 | authors: ["softwaremill-ci"]
12 | files:
13 | - "build.sbt"
14 | - "project/build.properties"
15 | - "project/Versions.scala"
16 | - "project/plugins.sbt"
17 |
--------------------------------------------------------------------------------
/.github/release-drafter.yml:
--------------------------------------------------------------------------------
1 | template: |
2 | ## What’s Changed
3 |
4 | $CHANGES
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | pull_request:
4 | branches: [ master ]
5 | push:
6 | branches: [ master ]
7 | tags: [ v* ]
8 | jobs:
9 | ci:
10 | runs-on: ubuntu-24.04
11 | strategy:
12 | fail-fast: false
13 | matrix:
14 | java: [ "21", "24" ]
15 | steps:
16 | - name: Checkout
17 | uses: actions/checkout@v4
18 | - name: Set up JDK
19 | uses: actions/setup-java@v4
20 | with:
21 | distribution: 'temurin'
22 | java-version: ${{ matrix.java }}
23 | cache: 'sbt'
24 | - uses: sbt/setup-sbt@v1
25 | - name: Check formatting
26 | run: sbt -v scalafmtCheckAll
27 | - name: Compile
28 | run: sbt -v compile
29 | - name: Compile documentation
30 | run: sbt -v compileDocumentation
31 | - name: Test
32 | run: sbt -v test
33 | - uses: actions/upload-artifact@v4 # upload test results
34 | if: success() || failure() # run this step even if previous step failed
35 | with:
36 | name: 'tests-results-java-${{ matrix.java }}'
37 | path: '**/test-reports/TEST*.xml'
38 |
39 | publish:
40 | uses: softwaremill/github-actions-workflows/.github/workflows/publish-release.yml@main
41 | needs: [ci]
42 | if: github.event_name != 'pull_request' && (startsWith(github.ref, 'refs/tags/v'))
43 | secrets: inherit
44 | with:
45 | java-version: '21'
46 |
47 | label:
48 | # only for PRs by softwaremill-ci
49 | if: github.event.pull_request.user.login == 'softwaremill-ci'
50 | uses: softwaremill/github-actions-workflows/.github/workflows/label.yml@main
51 |
52 | auto-merge:
53 | # only for PRs by softwaremill-ci
54 | if: github.event.pull_request.user.login == 'softwaremill-ci'
55 | needs: [ ci, label ]
56 | uses: softwaremill/github-actions-workflows/.github/workflows/auto-merge.yml@main
--------------------------------------------------------------------------------
/.github/workflows/rebase-cmd-dispatch.yml:
--------------------------------------------------------------------------------
1 | # On any comment, it will look for '/rebase' in the comment body and in case of hit, it dispatches rebase cmd
2 | # with event type 'rebase-command' which triggers 'rebase-command` WF that performs the rebase operation.
3 | name: Slash Command Dispatch
4 | on:
5 | issue_comment:
6 | types: [created]
7 | jobs:
8 | rebase-cmd-dispatch:
9 | uses: softwaremill/github-actions-workflows/.github/workflows/rebase-cmd-dispatch.yml@main
10 | secrets:
11 | repo-github-token: ${{ secrets.REPO_GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/rebase-cmd.yml:
--------------------------------------------------------------------------------
1 | name: rebase-command
2 | on:
3 | repository_dispatch:
4 | types: [rebase-command]
5 | jobs:
6 | rebase:
7 | uses: softwaremill/github-actions-workflows/.github/workflows/rebase-cmd.yml@main
8 | secrets:
9 | repo-github-token: ${{ secrets.REPO_GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/scala-steward.yml:
--------------------------------------------------------------------------------
1 | name: Scala Steward
2 |
3 | # This workflow will launch at 00:00 every day
4 | on:
5 | schedule:
6 | - cron: '0 0 * * *'
7 | workflow_dispatch:
8 |
9 | jobs:
10 | scala-steward:
11 | uses: softwaremill/github-actions-workflows/.github/workflows/scala-steward.yml@main
12 | secrets:
13 | repo-github-token: ${{secrets.REPO_GITHUB_TOKEN}}
14 | with:
15 | java-version: '21'
--------------------------------------------------------------------------------
/.github/workflows/test-report.yml:
--------------------------------------------------------------------------------
1 | name: 'Test Report'
2 | on:
3 | workflow_run:
4 | workflows: ['CI']
5 | types:
6 | - completed
7 |
8 | permissions:
9 | contents: read
10 | actions: read
11 | checks: write
12 |
13 | jobs:
14 | test-report:
15 | uses: softwaremill/github-actions-workflows/.github/workflows/test-report.yml@main
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | *.log
3 |
4 | .cache
5 | .history
6 | .env/
7 | .lib/
8 | dist/*
9 | target/
10 | lib_managed/
11 | src_managed/
12 | project/boot/
13 | project/plugins/project/
14 |
15 | .idea*
16 | .bsp
17 | .metals
18 | .vscode
19 | .bloop
20 | metals.sbt
21 |
22 | /notes.md
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | sphinx:
4 | configuration: generated-doc/out/conf.py
5 |
6 | python:
7 | install:
8 | - requirements: generated-doc/out/requirements.txt
9 |
10 | build:
11 | os: ubuntu-22.04
12 | tools:
13 | python: "3.12"
14 |
--------------------------------------------------------------------------------
/.scala-steward.conf:
--------------------------------------------------------------------------------
1 | updates.pin = [
2 | { groupId = "org.scala-lang", artifactId = "scala3-library", version = "3.3." }
3 | ]
4 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | version = 3.9.4
2 | maxColumn = 140
3 | runner.dialect = scala3
4 |
5 | rewrite.scala3 {
6 | convertToNewSyntax = true
7 | removeOptionalBraces.enabled = true
8 | insertEndMarkerMinLines = 8
9 | }
--------------------------------------------------------------------------------
/core/src/main/scala/ox/channels/BufferCapacity.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | /** Used to determine the capacity of buffers, when new channels are created by channel or flow-transforming operations, such as
4 | * [[Source.map]], [[Flow.buffer]], [[Flow.runToChannel]]. If not in scope, the default of 16 is used.
5 | */
6 | opaque type BufferCapacity = Int
7 |
8 | extension (c: BufferCapacity) def toInt: Int = c
9 |
10 | object BufferCapacity:
11 | def apply(c: Int): BufferCapacity = c
12 | def newChannel[T](using BufferCapacity): Channel[T] = Channel.withCapacity[T](summon[BufferCapacity])
13 | given default: BufferCapacity = 16
14 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/channels/ChannelClosed.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import com.softwaremill.jox.{ChannelDone as JChannelDone, ChannelError as JChannelError}
4 |
5 | /** Returned by channel methods (e.g. [[Source.receiveOrClosed]], [[Sink.sendOrClosed]], [[selectOrClosed]]) when the channel is closed. */
6 | sealed trait ChannelClosed:
7 | def toThrowable: Throwable = this match
8 | case ChannelClosed.Error(reason) => ChannelClosedException.Error(reason)
9 | case ChannelClosed.Done => ChannelClosedException.Done()
10 |
11 | object ChannelClosed:
12 | case class Error(reason: Throwable) extends ChannelClosed
13 | case object Done extends ChannelClosed
14 |
15 | private[ox] def fromJoxOrT[T](joxResult: AnyRef): T | ChannelClosed = fromJox(joxResult).asInstanceOf[T | ChannelClosed]
16 | private[ox] def fromJoxOrUnit(joxResult: AnyRef): Unit | ChannelClosed =
17 | if joxResult == null then () else fromJox(joxResult).asInstanceOf[ChannelClosed]
18 |
19 | private def fromJox(joxResult: AnyRef): AnyRef | ChannelClosed =
20 | joxResult match
21 | case _: JChannelDone => Done
22 | case e: JChannelError => Error(e.cause())
23 | case _ => joxResult
24 | end ChannelClosed
25 |
26 | enum ChannelClosedException(cause: Option[Throwable]) extends Exception(cause.orNull):
27 | case Error(cause: Throwable) extends ChannelClosedException(Some(cause))
28 | case Done() extends ChannelClosedException(None)
29 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/channels/ChannelClosedUnion.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import scala.util.{Failure, Success, Try}
4 |
5 | /** Extension methods on union types which includes [[ChannelClosed]]. */
6 | object ChannelClosedUnion:
7 |
8 | extension [T](v: T | ChannelClosed)
9 | inline def map[U](f: T => U): U | ChannelClosed = v match
10 | case ChannelClosed.Done => ChannelClosed.Done
11 | case e: ChannelClosed.Error => e
12 | case t: T @unchecked => f(t)
13 |
14 | /** Throw a [[ChannelClosedException]] if the provided value represents a closed channel (one of [[ChannelClosed]] values). */
15 | inline def orThrow: T = v match
16 | case c: ChannelClosed => throw c.toThrowable
17 | case t: T @unchecked => t
18 |
19 | inline def toEither: Either[ChannelClosed, T] = v match
20 | case c: ChannelClosed => Left(c)
21 | case t: T @unchecked => Right(t)
22 |
23 | inline def toTry: Try[T] = v match
24 | case c: ChannelClosed => Failure(c.toThrowable)
25 | case t: T @unchecked => Success(t)
26 |
27 | inline def isValue: Boolean = v match
28 | case _: ChannelClosed => false
29 | case _: T @unchecked => true
30 | end extension
31 |
32 | extension [T](v: T | ChannelClosed.Error)(using DummyImplicit)
33 | inline def mapUnlessError[U](f: T => U): U | ChannelClosed.Error = v match
34 | case e: ChannelClosed.Error => e
35 | case t: T @unchecked => f(t)
36 | end ChannelClosedUnion
37 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/channels/forkPropagate.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import ox.*
4 |
5 | /** Fork the given computation, propagating any exceptions to the given sink. The propagated exceptions are not rethrown. The fork is run
6 | * only for its side effects, and the result is discarded (can't be joined, same as [[forkDiscard]]).
7 | *
8 | * Designed to be used in stream operators.
9 | *
10 | * @see
11 | * ADR#1, ADR#3, implementation note in [[SourceOps]].
12 | */
13 | def forkPropagate[T](propagateExceptionsTo: Sink[?])(f: => Unit)(using OxUnsupervised): Unit =
14 | forkUnsupervised:
15 | try f
16 | catch case t: Throwable => propagateExceptionsTo.errorOrClosed(t).discard
17 | .discard
18 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/control.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | import java.util.concurrent.locks.LockSupport
4 |
5 | /** Repeat evaluating `f` forever. */
6 | inline def forever(inline f: Unit): Nothing =
7 | while true do f
8 | throw new RuntimeException("can't get here")
9 |
10 | /** Repeat evaluating `f` while it evaluates to `true`. */
11 | inline def repeatWhile(inline f: Boolean): Unit =
12 | var loop = true
13 | while loop do loop = f
14 |
15 | /** Repeat evaluating `f` until it evaluates to `true`. */
16 | inline def repeatUntil(inline f: Boolean): Unit =
17 | var loop = true
18 | while loop do loop = !f
19 |
20 | /** Blocks the current thread indefinitely, until it is interrupted. */
21 | inline def never: Nothing = forever {
22 | LockSupport.park()
23 | if Thread.interrupted() then throw new InterruptedException()
24 | }
25 |
26 | /** Checks if the current thread is interrupted. Useful in compute-intensive code, which wants to cooperate in the cancellation protocol,
27 | * e.g. when run in a [[supervised]] scope.
28 | *
29 | * @throws InterruptedException
30 | * if the current thread is interrupted.
31 | */
32 | inline def checkInterrupt(): Unit =
33 | if Thread.interrupted() then throw new InterruptedException()
34 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/flow/FlowCompanionIOOps.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import ox.*
4 |
5 | import java.io.IOException
6 | import java.io.InputStream
7 | import java.nio.ByteBuffer
8 | import java.nio.channels.FileChannel
9 | import java.nio.file.Files
10 | import java.nio.file.Path
11 | import java.nio.file.StandardOpenOption
12 |
13 | trait FlowCompanionIOOps:
14 | this: FlowCompanionOps =>
15 |
16 | /** Converts a [[java.io.InputStream]] into a `Flow[Chunk[Bytes]]`.
17 | *
18 | * @param is
19 | * an `InputStream` to read bytes from.
20 | * @param chunkSize
21 | * maximum number of bytes to read from the underlying `InputStream` before emitting a new chunk.
22 | */
23 | def fromInputStream(is: InputStream, chunkSize: Int = 1024): Flow[Chunk[Byte]] = usingEmitInline: emit =>
24 | try
25 | repeatWhile:
26 | val buf = new Array[Byte](chunkSize)
27 | val readBytes = is.read(buf)
28 | if readBytes == -1 then false
29 | else
30 | if readBytes > 0 then emit.apply(if readBytes == chunkSize then Chunk.fromArray(buf) else Chunk.fromArray(buf.take(readBytes)))
31 | true
32 | finally is.close()
33 | end fromInputStream
34 |
35 | /** Creates a flow that emits byte chunks read from a file.
36 | *
37 | * @param path
38 | * path the file to read from.
39 | * @param chunkSize
40 | * maximum number of bytes to read from the file before emitting a new chunk.
41 | */
42 |
43 | def fromFile(path: Path, chunkSize: Int = 1024): Flow[Chunk[Byte]] = usingEmitInline: emit =>
44 | if Files.isDirectory(path) then throw new IOException(s"Path $path is a directory")
45 | val jFileChannel =
46 | try FileChannel.open(path, StandardOpenOption.READ)
47 | catch
48 | case _: UnsupportedOperationException =>
49 | // Some file systems don't support file channels
50 | Files.newByteChannel(path, StandardOpenOption.READ)
51 |
52 | try
53 | repeatWhile:
54 | val buf = ByteBuffer.allocate(chunkSize)
55 | val readBytes = jFileChannel.read(buf)
56 | if readBytes < 0 then false
57 | else
58 | if readBytes > 0 then emit.apply(Chunk.fromArray(if readBytes == chunkSize then buf.array else buf.array.take(readBytes)))
59 | true
60 | finally jFileChannel.close()
61 | end try
62 | end fromFile
63 | end FlowCompanionIOOps
64 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/inScopeRunner.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | import ox.channels.ActorRef
4 |
5 | /** Returns a concurrency-scope-specific runner, which allows scheduling of functions to be run within the current concurrency scope, from
6 | * the context of arbitrary threads (not necessarily threads that are part of the current concurrency scope).
7 | *
8 | * Usage: obtain a runner from within a concurrency scope, while on a fork/thread that is managed by the concurrency scope. Then, pass that
9 | * runner to the external library. It can then schedule functions (e.g. create forks) to be run within the concurrency scope from arbitrary
10 | * threads, as long as the concurrency scope isn't complete.
11 | *
12 | * Execution is scheduled through an [[Actor]], which is lazily created, and bound to an [[Ox]] instances. The functions are run serially,
13 | * hence they should not block. Any exceptions thrown by the functions will cause the entire concurrency scope to end.
14 | *
15 | * This method should **only** be used when integrating Ox with libraries that manage concurrency on their own, and which run callbacks on
16 | * a managed thread pool. The logic executed by the third-party library should be entirely contained within the lifetime of this
17 | * concurrency scope. The sole purpose of this method is to enable running scope-aware logic from threads **other** than Ox-managed.
18 | *
19 | * Use with care!
20 | *
21 | * @see
22 | * [[InScopeRunner.async]]
23 | */
24 | def inScopeRunner()(using Ox): InScopeRunner = InScopeRunner(summon[Ox].runInScopeActor)
25 |
26 | /** @see
27 | * inScopeRunner
28 | */
29 | class InScopeRunner(runInScope: ActorRef[RunInScope]):
30 | /** Runs the given function asynchronously, in the scope of the [[Ox]] concurrency scope in which this runner was created.
31 | *
32 | * `f` should not block and return promptly, not to obstruct execution of other scheduled functions. Typically, it should start a
33 | * background fork. Any exceptions thrown by `f` will be cause the entire scope to end.
34 | */
35 | def async(f: Ox ?=> Unit): Unit = runInScope.tell(_.apply(f))
36 | end InScopeRunner
37 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/internal/ScopeContext.scala:
--------------------------------------------------------------------------------
1 | package ox.internal
2 |
3 | import ox.OxUnsupervised
4 | import ox.ForkLocalMap
5 |
6 | /** Should only ever be updated when starting a new scope, for the duration of the scope's lifetime. Used to verify that forks are properly
7 | * started, within a running concurrency scope, on a thread that is part of some scope in the tree.
8 | */
9 | private[ox] val currentScope: ThreadLocal[OxUnsupervised] = new ThreadLocal[OxUnsupervised]()
10 |
11 | private[ox] def currentLocals: ForkLocalMap =
12 | val scope = currentScope.get()
13 | if scope == null then ForkLocalMap(Map.empty) else scope.locals
14 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/oxThreadFactory.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | import java.util.concurrent.ThreadFactory
4 |
5 | private var customThreadFactory: ThreadFactory = _
6 |
7 | /** @see [[oxThreadFactory]] */
8 | def setOxThreadFactory(tf: ThreadFactory): Unit =
9 | customThreadFactory = tf
10 | if !oxThreadFactory.eq(customThreadFactory) then
11 | throw new RuntimeException("The thread factory was already used before setting a custom one!")
12 |
13 | /** The thread factory that is used to create threads in Ox scopes ([[supervised]], [[unsupervised]] etc.). Should be set once at the start
14 | * of the application, before any scopes or forks are created, using [[setOxThreadFactory]].
15 | *
16 | * @see
17 | * [[OxApp.Settings]]
18 | */
19 | lazy val oxThreadFactory: ThreadFactory =
20 | val custom = customThreadFactory
21 | if custom == null then Thread.ofVirtual().factory() else custom
22 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/resilience/RateLimiterAlgorithm.scala:
--------------------------------------------------------------------------------
1 | package ox.resilience
2 |
3 | /** Determines the algorithm to use for the rate limiter */
4 | trait RateLimiterAlgorithm:
5 |
6 | /** Acquires a permit to execute the operation. This method should block until a permit is available. */
7 | final def acquire(): Unit =
8 | acquire(1)
9 |
10 | /** Acquires permits to execute the operation. This method should block until a permit is available. */
11 | def acquire(permits: Int): Unit
12 |
13 | /** Tries to acquire a permit to execute the operation. This method should not block. */
14 | final def tryAcquire(): Boolean =
15 | tryAcquire(1)
16 |
17 | /** Tries to acquire permits to execute the operation. This method should not block. */
18 | def tryAcquire(permits: Int): Boolean
19 |
20 | /** Updates the internal state of the rate limiter to check whether new operations can be accepted. */
21 | def update(): Unit
22 |
23 | /** Returns the time in nanoseconds that needs to elapse until the next update. It should not modify internal state. */
24 | def getNextUpdate: Long
25 |
26 | /** Runs the operation, allowing the algorithm to take into account its duration, if needed. */
27 | final def runOperation[T](operation: => T): T = runOperation(operation, 1)
28 |
29 | /** Runs the operation, allowing the algorithm to take into account its duration, if needed. */
30 | def runOperation[T](operation: => T, permits: Int): T
31 |
32 | end RateLimiterAlgorithm
33 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/resilience/ResultPolicy.scala:
--------------------------------------------------------------------------------
1 | package ox.resilience
2 |
3 | /** A policy that allows to customize when a non-erroneous result is considered successful and when an error is worth retrying (which allows
4 | * for failing fast on certain errors).
5 | *
6 | * @param isSuccess
7 | * A function that determines whether a non-erroneous result is considered successful. By default, every non-erroneous result is
8 | * considered successful.
9 | * @param isWorthRetrying
10 | * A function that determines whether an error is worth retrying. By default, all errors are retried.
11 | * @tparam E
12 | * The error type of the operation. For operations returning a `T` or a `Try[T]`, this is fixed to `Throwable`. For operations returning
13 | * an `Either[E, T]`, this can be any `E`.
14 | * @tparam T
15 | * The successful result type for the operation.
16 | */
17 | case class ResultPolicy[E, T](isSuccess: T => Boolean = (_: T) => true, isWorthRetrying: E => Boolean = (_: E) => true)
18 |
19 | object ResultPolicy:
20 | /** A policy that considers every non-erroneous result successful and retries on any error. */
21 | def default[E, T]: ResultPolicy[E, T] = ResultPolicy()
22 |
23 | /** A policy that customizes when a non-erroneous result is considered successful, and retries all errors
24 | *
25 | * @param isSuccess
26 | * A predicate that indicates whether a non-erroneous result is considered successful.
27 | */
28 | def successfulWhen[E, T](isSuccess: T => Boolean): ResultPolicy[E, T] = ResultPolicy(isSuccess = isSuccess)
29 |
30 | /** A policy that customizes which errors are retried, and considers every non-erroneous result successful
31 | * @param isWorthRetrying
32 | * A predicate that indicates whether an erroneous result should be retried.
33 | */
34 | def retryWhen[E, T](isWorthRetrying: E => Boolean): ResultPolicy[E, T] = ResultPolicy(isWorthRetrying = isWorthRetrying)
35 |
36 | /** A policy that considers every non-erroneous result successful and never retries any error, i.e. fails fast */
37 | def neverRetry[E, T]: ResultPolicy[E, T] = ResultPolicy(isWorthRetrying = _ => false)
38 | end ResultPolicy
39 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/resilience/TokenBucket.scala:
--------------------------------------------------------------------------------
1 | package ox.resilience
2 |
3 | import java.util.concurrent.Semaphore
4 |
5 | /** Used by the leaky bucket rate limiter & [[AdaptiveRetry]], to limit the rate of operations. */
6 | case class TokenBucket(bucketSize: Int, initSize: Option[Int] = None):
7 | private val semaphore = Semaphore(initSize.getOrElse(bucketSize))
8 |
9 | def tryAcquire(permits: Int): Boolean =
10 | semaphore.tryAcquire(permits)
11 |
12 | def acquire(permits: Int): Unit =
13 | semaphore.acquire(permits)
14 |
15 | def release(permits: Int): Unit =
16 | val availablePermits = semaphore.availablePermits()
17 | val toRelease = if availablePermits + permits >= bucketSize then bucketSize - availablePermits else permits
18 | semaphore.release(toRelease)
19 |
20 | end TokenBucket
21 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/scheduling/Jitter.scala:
--------------------------------------------------------------------------------
1 | package ox.scheduling
2 |
3 | enum Jitter:
4 | /** Full jitter, i.e. the delay is a random value between 0 and the interval. */
5 | case Full
6 |
7 | /** Equal jitter, i.e. the delay is half of the interval plus a random value between 0 and the other half. */
8 | case Equal
9 | end Jitter
10 |
--------------------------------------------------------------------------------
/core/src/main/scala/ox/scheduling/RepeatConfig.scala:
--------------------------------------------------------------------------------
1 | package ox.scheduling
2 |
3 | /** A config that defines how to repeat an operation.
4 | *
5 | * [[Schedule]] provides the interval between subsequent invocations, which guarantees that the next operation will start no sooner than
6 | * the specified duration after the previous operations has started. If the previous operation takes longer than the interval, the next
7 | * operation will start immediately after the previous one has finished.
8 | *
9 | * It is a special case of [[ScheduledConfig]] with [[ScheduledConfig.sleepMode]] always set to [[SleepMode.StartToStart]] and a
10 | * [[ScheduledConfig.afterAttempt]] callback which checks if the result was successful.
11 | *
12 | * @param schedule
13 | * The schedule which determines the intervals between invocations and number of attempts to execute the operation.
14 | * @param shouldContinueOnResult
15 | * A function that determines whether to continue the loop after a success. The function receives the value that was emitted by the last
16 | * invocation. Defaults to [[_ => true]].
17 | * @tparam E
18 | * The error type of the operation. For operations returning a `T` or a `Try[T]`, this is fixed to `Throwable`. For operations returning
19 | * an `Either[E, T]`, this can be any `E`.
20 | * @tparam T
21 | * The successful result type for the operation.
22 | */
23 | case class RepeatConfig[E, T](
24 | schedule: Schedule,
25 | shouldContinueOnResult: T => Boolean = (_: T) => true
26 | ):
27 | def schedule(newSchedule: Schedule): RepeatConfig[E, T] = copy(schedule = newSchedule)
28 |
29 | def shouldContinueOnResult(newShouldContinueOnResult: T => Boolean): RepeatConfig[E, T] =
30 | copy(shouldContinueOnResult = newShouldContinueOnResult)
31 |
32 | def toScheduledConfig: ScheduledConfig[E, T] =
33 | val afterAttempt: (Int, Either[E, T]) => ScheduleStop = (_, attempt) =>
34 | attempt match
35 | case Left(_) => ScheduleStop.Yes
36 | case Right(value) => ScheduleStop(!shouldContinueOnResult(value))
37 |
38 | ScheduledConfig(schedule, afterAttempt, SleepMode.StartToStart)
39 | end toScheduledConfig
40 | end RepeatConfig
41 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/AppErrorTest.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 |
6 | import java.util.concurrent.Semaphore
7 |
8 | import scala.concurrent.duration.*
9 |
10 | class AppErrorTest extends AnyFlatSpec with Matchers:
11 | "supervisedError" should "return the app error from the main body" in {
12 | supervisedError(EitherMode[Int])(Left(10)) shouldBe Left(10)
13 | }
14 |
15 | it should "return success from the main body" in {
16 | supervisedError(EitherMode[Int])(Right("ok")) shouldBe Right("ok")
17 | }
18 |
19 | it should "return the app error returned by a failing fork" in {
20 | supervisedError(EitherMode[Int]) { forkUserError(Left(10)).discard; Right(()) } shouldBe Left(10)
21 | }
22 |
23 | it should "return success from the main body if a fork is successful" in {
24 | supervisedError(EitherMode[Int]) { forkUserError(Right("ok")).discard; Right(()) } shouldBe Right(())
25 | }
26 |
27 | it should "interrupt other forks if one fails" in {
28 | val s = Semaphore(0)
29 | supervisedError(EitherMode[Int]) {
30 | forkUser {
31 | s.acquire() // will never complete
32 | }.discard
33 | forkUser {
34 | s.acquire() // will never complete
35 | }.discard
36 | forkUserError {
37 | sleep(100.millis)
38 | Left(-1)
39 | }.discard
40 | Right(())
41 | } shouldBe Left(-1)
42 | }
43 | end AppErrorTest
44 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/ForeachParTest.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.util.{MaxCounter, Trail}
6 |
7 | import scala.List
8 | import scala.collection.IterableFactory
9 | import scala.collection.immutable.Iterable
10 | import scala.concurrent.duration.*
11 |
12 | class ForeachParTest extends AnyFlatSpec with Matchers:
13 | "foreachPar" should "run computations in parallel" in {
14 | val InputElements = 17
15 | val TransformationMillis = 100.millis
16 | val trail = new Trail()
17 |
18 | val input = 0 to InputElements
19 | def transformation(i: Int): Unit =
20 | sleep(TransformationMillis)
21 | trail.add(i.toString)
22 |
23 | val start = System.currentTimeMillis()
24 | input.to(Iterable).foreachPar(5)(transformation)
25 | val end = System.currentTimeMillis()
26 |
27 | trail.get should contain theSameElementsAs input.map(_.toString)
28 | (end - start) should be < (InputElements * TransformationMillis.toMillis)
29 | }
30 |
31 | it should "run not more computations than limit" in {
32 | val Parallelism = 5
33 |
34 | val input = 1 to 158
35 |
36 | val maxCounter = MaxCounter()
37 |
38 | def transformation(i: Int) =
39 | maxCounter.increment().discard
40 | sleep(10.millis)
41 | maxCounter.decrement()
42 |
43 | input.to(Iterable).foreachPar(Parallelism)(transformation)
44 |
45 | maxCounter.max should be <= Parallelism
46 | }
47 |
48 | it should "interrupt other computations in one fails" in {
49 | val InputElements = 18
50 | val TransformationMillis = 100.millis
51 | val trail = Trail()
52 |
53 | val input = 0 to InputElements
54 |
55 | def transformation(i: Int) =
56 | if i == 4 then
57 | trail.add("exception")
58 | throw new Exception("boom")
59 | else
60 | sleep(TransformationMillis)
61 | trail.add("transformation")
62 | i + 1
63 |
64 | try input.to(Iterable).foreachPar(5)(transformation)
65 | catch case e: Exception if e.getMessage == "boom" => trail.add("catch")
66 |
67 | sleep(300.millis)
68 | trail.add("all done")
69 |
70 | trail.get shouldBe Vector("exception", "catch", "all done")
71 | }
72 | end ForeachParTest
73 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/UtilTest.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.util.Trail
6 |
7 | class UtilTest extends AnyFlatSpec with Matchers:
8 | "discard" should "do nothing" in {
9 | val t = Trail()
10 | def f(): Int =
11 | t.add("in f")
12 | 42
13 |
14 | f().discard shouldBe ()
15 | t.get shouldBe Vector("in f")
16 | }
17 |
18 | "tapException" should "run the callback when an exception is thrown" in {
19 | val t = Trail()
20 | def f(): Int = throw new RuntimeException("boom!")
21 |
22 | try f().tapException(e => t.add(s"in callback: ${e.getMessage}"))
23 | catch case e: RuntimeException => t.add(s"in catch: ${e.getMessage}")
24 |
25 | t.get shouldBe Vector("in callback: boom!", "in catch: boom!")
26 | }
27 |
28 | it should "not run the callback when no exception is thrown" in {
29 | val t = Trail()
30 | def f(): Int = 42
31 |
32 | try
33 | t.add(f().tapException(e => t.add(s"in callback: ${e.getMessage}")).toString)
34 | t.add("after")
35 | catch case e: RuntimeException => t.add(s"in catch: ${e.getMessage}")
36 |
37 | t.get shouldBe Vector("42", "after")
38 | }
39 |
40 | it should "suppress any additional exceptions" in {
41 | val t = Trail()
42 |
43 | def f(): Int = throw new RuntimeException("boom!")
44 |
45 | try f().tapException(_ => throw new RuntimeException("boom boom!"))
46 | catch case e: RuntimeException => t.add(s"in catch: ${e.getMessage} ${e.getSuppressed.length}")
47 |
48 | t.get shouldBe Vector("in catch: boom! 1")
49 | }
50 |
51 | "pipe" should "work" in {
52 | (1 + 2).pipe(_ * 2) shouldBe 6
53 | }
54 |
55 | "tap" should "work" in {
56 | val t = Trail()
57 | {
58 | t.add("Adding")
59 | 1 + 2
60 | }.tap(v => t.add(s"Got: $v")) shouldBe 3
61 | t.get shouldBe Vector("Adding", "Got: 3")
62 | }
63 | end UtilTest
64 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/ActorTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | import scala.concurrent.duration.*
8 | import java.util.concurrent.atomic.AtomicBoolean
9 |
10 | class ActorTest extends AnyFlatSpec with Matchers:
11 |
12 | trait Test1:
13 | def f(x: Int): Long
14 |
15 | it should "invoke methods on the actor" in supervised {
16 | var state = 0L
17 | val logic = new Test1:
18 | override def f(x: Int): Long =
19 | state += x
20 | state
21 |
22 | val ref = Actor.create(logic)
23 |
24 | ref.ask(_.f(10)) shouldBe 10
25 | ref.ask(_.f(20)) shouldBe 30
26 | }
27 |
28 | it should "protect the internal state of the actor" in supervised {
29 | var state = 0L
30 | val logic = new Test1:
31 | override def f(x: Int): Long =
32 | state += x
33 | state
34 |
35 | val ref = Actor.create(logic)
36 |
37 | val outer = 1000
38 | val inner = 1000
39 |
40 | val forks = for (i <- 1 to outer) yield fork {
41 | for j <- 1 to inner do ref.ask(_.f(1))
42 | }
43 | forks.foreach(_.join())
44 |
45 | ref.ask(_.f(0)) shouldBe outer.toLong * inner
46 | }
47 |
48 | it should "run the close callback before re-throwing the exception" in {
49 | val isClosed = new AtomicBoolean(false)
50 | val thrown = the[RuntimeException] thrownBy {
51 | supervised {
52 | var state = 0L
53 | val logic = new Test1:
54 | override def f(x: Int): Long =
55 | state += x
56 | if state > 2 then throw new RuntimeException("too much")
57 | state
58 |
59 | val ref = Actor.create(logic, Some(_ => isClosed.set(true)))
60 |
61 | ref.ask(_.f(5))
62 | }
63 | }
64 |
65 | thrown.getMessage shouldBe "too much"
66 | isClosed.get() shouldBe true
67 | }
68 |
69 | it should "end the scope when an exception is thrown when handling .tell" in {
70 | val thrown = the[RuntimeException] thrownBy {
71 | supervised {
72 | val logic = new Test1:
73 | override def f(x: Int): Long = throw new RuntimeException("boom")
74 |
75 | val ref = Actor.create(logic)
76 | ref.tell(_.f(5).discard)
77 | sleep(1.second)
78 | }
79 | }
80 |
81 | thrown.getMessage shouldBe "boom"
82 | }
83 | end ActorTest
84 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsEmptyTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class SourceOpsEmptyTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "Source.empty"
10 |
11 | it should "be done" in supervised {
12 | Source.empty.isClosedForReceive shouldBe true
13 | }
14 |
15 | it should "be empty" in supervised {
16 | Source.empty.toList shouldBe empty
17 | }
18 | end SourceOpsEmptyTest
19 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsFactoryMethodsTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class SourceOpsFactoryMethodsTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "Source factory methods"
10 |
11 | it should "create a source from a fork" in {
12 | supervised {
13 | val f = fork(1)
14 | val c = Source.fromFork(f)
15 | c.toList shouldBe List(1)
16 | }
17 | }
18 | end SourceOpsFactoryMethodsTest
19 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsFailedTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class SourceOpsFailedTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "Source.failed"
10 |
11 | it should "fail on receive" in supervised {
12 | // when
13 | val s = Source.failed(RuntimeException("boom"))
14 |
15 | // then
16 | s.receiveOrClosed() should matchPattern { case ChannelClosed.Error(reason) if reason.getMessage == "boom" => }
17 | }
18 |
19 | it should "be in error" in supervised {
20 | Source.failed(RuntimeException("boom")).isClosedForReceive shouldBe true
21 | }
22 | end SourceOpsFailedTest
23 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsForeachTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class SourceOpsForeachTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "Source.foreach"
10 |
11 | it should "iterate over a source" in {
12 | val c = Channel.buffered[Int](10)
13 | c.sendOrClosed(1).discard
14 | c.sendOrClosed(2).discard
15 | c.sendOrClosed(3).discard
16 | c.doneOrClosed().discard
17 |
18 | var r: List[Int] = Nil
19 | c.foreach(v => r = v :: r)
20 |
21 | r shouldBe List(3, 2, 1)
22 | }
23 |
24 | it should "iterate over a source using for-syntax" in {
25 | val c = Channel.buffered[Int](10)
26 | c.sendOrClosed(1).discard
27 | c.sendOrClosed(2).discard
28 | c.sendOrClosed(3).discard
29 | c.doneOrClosed().discard
30 |
31 | var r: List[Int] = Nil
32 | for v <- c
33 | do r = v :: r
34 |
35 | r shouldBe List(3, 2, 1)
36 | }
37 |
38 | it should "convert source to a list" in {
39 | val c = Channel.buffered[Int](10)
40 | c.sendOrClosed(1).discard
41 | c.sendOrClosed(2).discard
42 | c.sendOrClosed(3).discard
43 | c.doneOrClosed().discard
44 |
45 | c.toList shouldBe List(1, 2, 3)
46 | }
47 | end SourceOpsForeachTest
48 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsFutureSourceTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | import scala.concurrent.Future
8 |
9 | class SourceOpsFutureSourceTest extends AnyFlatSpec with Matchers:
10 | import scala.concurrent.ExecutionContext.Implicits.global
11 |
12 | behavior of "SourceOps.futureSource"
13 |
14 | it should "return the original future failure when future fails" in supervised {
15 | val failure = new RuntimeException("future failed")
16 | Source.fromFutureSource(Future.failed(failure)).receiveOrClosed() shouldBe ChannelClosed.Error(failure)
17 | }
18 |
19 | it should "return the original future failure when future fails with ExecutionException" in supervised {
20 | // according to https://docs.scala-lang.org/overviews/core/futures.html#exceptions
21 | // the InterruptedException is one of the exceptions wrapped in ExecutionException
22 | val failure = new InterruptedException("future interrupted")
23 | Source.fromFutureSource(Future.failed(failure)).receiveOrClosed() shouldBe ChannelClosed.Error(failure)
24 | }
25 |
26 | it should "return future's source values" in supervised {
27 | Source.fromFutureSource(Future.successful(Source.fromValues(1, 2))).toList shouldBe List(1, 2)
28 | }
29 | end SourceOpsFutureSourceTest
30 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsFutureTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | import scala.concurrent.Future
8 |
9 | class SourceOpsFutureTest extends AnyFlatSpec with Matchers:
10 | import scala.concurrent.ExecutionContext.Implicits.global
11 |
12 | behavior of "Source.future"
13 |
14 | it should "return the original future failure when future fails" in supervised {
15 | val failure = new RuntimeException("future failed")
16 | Source.fromFuture(Future.failed(failure)).receiveOrClosed() shouldBe ChannelClosed.Error(failure)
17 | }
18 |
19 | it should "return the original future failure when future fails with ExecutionException" in supervised {
20 | // according to https://docs.scala-lang.org/overviews/core/futures.html#exceptions
21 | // the InterruptedException is one of the exceptions wrapped in ExecutionException
22 | val failure = new InterruptedException("future interrupted")
23 | Source.fromFuture(Future.failed(failure)).receiveOrClosed() shouldBe ChannelClosed.Error(failure)
24 | }
25 |
26 | it should "return future value" in supervised {
27 | Source.fromFuture(Future.successful(1)).toList shouldBe List(1)
28 | }
29 | end SourceOpsFutureTest
30 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 |
8 | import java.util.concurrent.atomic.AtomicInteger
9 |
10 | class SourceOpsTest extends AnyFlatSpec with Matchers with Eventually:
11 |
12 | it should "pipe one source to another" in {
13 | supervised {
14 | val c1 = Source.fromValues(1, 2, 3)
15 | val c2 = Channel.rendezvous[Int]
16 |
17 | forkDiscard {
18 | c1.pipeTo(c2, propagateDone = false)
19 | c2.done()
20 | }
21 |
22 | c2.toList shouldBe List(1, 2, 3)
23 | }
24 | }
25 |
26 | it should "pipe one source to another (with done propagation)" in {
27 | supervised {
28 | val c1 = Source.fromValues(1, 2, 3)
29 | val c2 = Channel.rendezvous[Int]
30 |
31 | forkDiscard {
32 | c1.pipeTo(c2, propagateDone = true)
33 | }
34 |
35 | c2.toList shouldBe List(1, 2, 3)
36 | }
37 | }
38 |
39 | it should "tap over a source" in {
40 | supervised {
41 | val sum = new AtomicInteger()
42 | Source.fromValues(1, 2, 3).tap(v => sum.addAndGet(v).discard).toList shouldBe List(1, 2, 3)
43 | sum.get() shouldBe 6
44 | }
45 | }
46 | end SourceOpsTest
47 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/channels/SourceOpsTransformTest.scala:
--------------------------------------------------------------------------------
1 | package ox.channels
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class SourceOpsTransformTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "Source.transform"
10 |
11 | it should "transform a source using a simple map" in {
12 | val c = Channel.buffered[Int](10)
13 | c.send(1)
14 | c.send(2)
15 | c.send(3)
16 | c.done()
17 |
18 | supervised {
19 | c.transform(_.map(_ * 2)).toList shouldBe List(2, 4, 6)
20 | }
21 | }
22 |
23 | it should "transform a source using a complex chain of operations" in {
24 | val c = Channel.buffered[Int](10)
25 | c.send(1)
26 | c.send(2)
27 | c.send(3)
28 | c.send(4)
29 | c.done()
30 |
31 | supervised {
32 | c.transform(_.drop(2).flatMap(i => List(i, i + 1, i + 2)).filter(_ % 2 == 0)).toList shouldBe List(4, 4, 6)
33 | }
34 | }
35 |
36 | it should "transform an infinite source" in {
37 | val c = Channel.rendezvous[Int]
38 | supervised {
39 | fork {
40 | var i = 0
41 | while true do
42 | c.send(i)
43 | i += 1
44 | }.discard
45 |
46 | val s = c.transform(_.filter(_ % 2 == 0).flatMap(i => List(i, i + 1)))
47 | s.receive() shouldBe 0
48 | s.receive() shouldBe 1
49 | s.receive() shouldBe 2
50 | }
51 | }
52 |
53 | it should "transform an infinite source (stress test)" in {
54 | for _ <- 1 to 1000 do // this nicely demonstrated two race conditions
55 | val c = Channel.rendezvous[Int]
56 | supervised {
57 | fork {
58 | var i = 0
59 | while true do
60 | c.send(i)
61 | i += 1
62 | }.discard
63 |
64 | val s = c.transform(x => x)
65 | s.receive() shouldBe 0
66 | }
67 | }
68 | end SourceOpsTransformTest
69 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowCompanionOpsTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | class FlowCompanionOpsTest {
4 | // fromValues, fromIterable etc. can be run multiple times
5 | }
6 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsAlsoToTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | import scala.util.{Failure, Try}
8 | import ox.channels.Channel
9 | import ox.channels.ChannelClosedException
10 |
11 | class FlowOpsAlsoToTest extends AnyFlatSpec with Matchers:
12 |
13 | behavior of "alsoTo"
14 |
15 | it should "send to both sinks" in:
16 | val c = Channel.withCapacity[Int](10)
17 | Flow.fromValues(1, 2, 3).alsoTo(c).runToList() shouldBe List(1, 2, 3)
18 | c.toList shouldBe List(1, 2, 3)
19 |
20 | it should "send to both sinks and not hang when other sink is rendezvous channel" in supervised:
21 | val c = Channel.rendezvous[Int]
22 | val f = fork(c.toList)
23 | Flow.fromValues(1, 2, 3, 4, 5).alsoTo(c).runToList() shouldBe List(1, 2, 3, 4, 5)
24 | f.join() shouldBe List(1, 2, 3, 4, 5)
25 |
26 | it should "close main flow when other closes" in supervised:
27 | val c = Channel.rendezvous[Int]
28 | forkDiscard:
29 | c.receiveOrClosed().discard
30 | c.receiveOrClosed().discard
31 | c.receiveOrClosed().discard
32 | c.doneOrClosed().discard
33 | // a send() from the main thread might be waiting - we need to consume that, and only then the main thread
34 | // will discover that the channel is closed
35 | c.receiveOrClosed().discard
36 |
37 | a[ChannelClosedException.Done] shouldBe thrownBy(Flow.fromIterable(1 to 100).alsoTo(c).runToList())
38 |
39 | it should "close main flow with error when other errors" in supervised:
40 | val c = Channel.withCapacity[Int](1)
41 | val f = fork:
42 | c.receiveOrClosed().discard
43 | c.receiveOrClosed().discard
44 | c.receiveOrClosed().discard
45 | c.errorOrClosed(new IllegalStateException)
46 |
47 | Try(Flow.fromIterable(1 to 100).alsoTo(c).runToList()) shouldBe a[Failure[IllegalStateException]]
48 | f.join()
49 |
50 | it should "close other channel with error when main errors" in supervised:
51 | val other = Channel.rendezvous[Int]
52 | val forkOther = fork(Try(other.toList))
53 | a[RuntimeException] shouldBe thrownBy(
54 | Flow.fromIterable(1 to 100).concat(Flow.failed(new IllegalStateException)).alsoTo(other).runToList()
55 | )
56 |
57 | forkOther.join() shouldBe a[Failure[IllegalStateException]]
58 | end FlowOpsAlsoToTest
59 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsBufferTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.channels.ChannelClosedException
6 |
7 | class FlowOpsBufferTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "buffer"
10 |
11 | it should "work with a single async boundary" in:
12 | Flow.fromValues(1, 2, 3).buffer().runToList() shouldBe List(1, 2, 3)
13 |
14 | it should "work with multiple async boundaries" in:
15 | Flow.fromValues(1, 2, 3).buffer().map(_ + 1).buffer().map(_ * 10).buffer().runToList() shouldBe List(20, 30, 40)
16 |
17 | it should "propagate errors" in:
18 | intercept[ChannelClosedException.Error] {
19 | Flow.fromValues(1, 2, 3).map(_ => throw new IllegalStateException).buffer().runToList()
20 | }.getCause() shouldBe a[IllegalStateException]
21 | end FlowOpsBufferTest
22 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsCollectTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsCollectTest extends AnyFlatSpec with Matchers:
8 | behavior of "collect"
9 |
10 | it should "collect over a source" in:
11 | val c = Flow.fromValues(1 to 10: _*)
12 |
13 | val s = c.collect:
14 | case i if i % 2 == 0 => i * 10
15 |
16 | s.runToList() shouldBe (2 to 10 by 2).map(_ * 10)
17 | end FlowOpsCollectTest
18 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsConcatPrependTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsConcatPrependTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "concat"
10 |
11 | it should "concat other source" in:
12 | Flow.fromValues(1, 2, 3).concat(Flow.fromValues(4, 5, 6)).runToList() shouldBe List(1, 2, 3, 4, 5, 6)
13 |
14 | behavior of "prepend"
15 |
16 | it should "prepend other source" in:
17 | Flow.fromValues(1, 2, 3).prepend(Flow.fromValues(4, 5, 6)).runToList() shouldBe List(4, 5, 6, 1, 2, 3)
18 | end FlowOpsConcatPrependTest
19 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsConcatTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import java.util.concurrent.atomic.AtomicBoolean
7 | import ox.discard
8 |
9 | class FlowOpsConcatTest extends AnyFlatSpec with Matchers with Eventually:
10 |
11 | it should "concatenate flows" in:
12 | val s1 = Flow.fromValues("a", "b", "c")
13 | val s2 = Flow.fromValues("d", "e", "f")
14 | val s3 = Flow.fromValues("g", "h", "i")
15 |
16 | val s = Flow.concat(List(s1, s2, s3))
17 |
18 | s.runToList() shouldBe List("a", "b", "c", "d", "e", "f", "g", "h", "i")
19 |
20 | it should "not evaluate subsequent flows if there's a filure" in:
21 | val evaluated = new AtomicBoolean(false)
22 | val f = Flow
23 | .failed(new IllegalStateException)
24 | .concat(Flow.usingEmit(emit =>
25 | evaluated.set(true)
26 | emit(1)
27 | ))
28 |
29 | intercept[IllegalStateException](f.runToList()).discard
30 | evaluated.get() shouldBe false
31 | end FlowOpsConcatTest
32 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsDebounceByTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsDebounceByTest extends AnyFlatSpec with Matchers:
8 | behavior of "debounceBy"
9 |
10 | it should "not debounce if applied on an empty flow" in:
11 | val c = Flow.empty[Int]
12 | val s = c.debounceBy(_ * 2)
13 | s.runToList() shouldBe List.empty
14 |
15 | it should "not debounce if applied on a flow containing only distinct f(value)" in:
16 | val c = Flow.fromValues(1 to 10: _*)
17 | val s = c.debounceBy(_ * 2)
18 | s.runToList() shouldBe (1 to 10)
19 |
20 | it should "debounce if applied on a flow containing repeating f(value)" in:
21 | val c = Flow.fromValues(1, 1, 2, 3, 4, 4, 5)
22 | val s = c.debounceBy(_ * 2)
23 | s.runToList() shouldBe (1 to 5)
24 |
25 | it should "debounce subsequent odd/prime numbers" in:
26 | val c = Flow.fromValues(1, 1, 1, 2, 4, 3, 7, 4, 5)
27 | val s = c.debounceBy(_ % 2 == 0)
28 | s.runToList() shouldBe List(1, 2, 3, 4, 5)
29 |
30 | end FlowOpsDebounceByTest
31 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsDebounceTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsDebounceTest extends AnyFlatSpec with Matchers:
8 | behavior of "debounce"
9 |
10 | it should "not debounce if applied on an empty flow" in:
11 | val c = Flow.empty[Int]
12 | val s = c.debounce
13 | s.runToList() shouldBe List.empty
14 |
15 | it should "not debounce if applied on a flow containing only distinct values" in:
16 | val c = Flow.fromValues(1 to 10: _*)
17 | val s = c.debounce
18 | s.runToList() shouldBe (1 to 10)
19 |
20 | it should "debounce if applied on a flow containing only repeating values" in:
21 | val c = Flow.fromValues(1, 1, 1, 1, 1)
22 | val s = c.debounce
23 | s.runToList() shouldBe List(1)
24 |
25 | it should "debounce if applied on a flow containing repeating elements" in:
26 | val c = Flow.fromValues(1, 1, 2, 3, 4, 4, 5)
27 | val s = c.debounce
28 | s.runToList() shouldBe (1 to 5)
29 |
30 | end FlowOpsDebounceTest
31 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsDrainTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 | import java.util.concurrent.atomic.AtomicInteger
7 |
8 | class FlowOpsDrainTest extends AnyFlatSpec with Matchers:
9 | behavior of "drain"
10 |
11 | it should "drain all elements" in:
12 | val f = Flow.fromValues(1, 2, 3)
13 | f.drain().runToList() shouldBe List.empty
14 |
15 | it should "run any side-effects that are part of the flow" in:
16 | val c = new AtomicInteger(0)
17 | val f = Flow.fromValues(1, 2, 3).tap(_ => c.incrementAndGet().discard)
18 | f.drain().runDrain()
19 | c.get() shouldBe 3
20 |
21 | it should "merge with another flow" in:
22 | val c = new AtomicInteger(0)
23 | val f1 = Flow.fromValues(1, 2, 3).tap(_ => c.incrementAndGet().discard)
24 | val f2 = Flow.fromValues(4, 5, 6).tap(_ => c.incrementAndGet().discard)
25 | f1.drain().merge(f2).runToList() shouldBe List(4, 5, 6)
26 | c.get() shouldBe 6
27 | end FlowOpsDrainTest
28 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsDropTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsDropTest extends AnyFlatSpec with Matchers:
8 | behavior of "drop"
9 |
10 | it should "not drop from the empty flow" in:
11 | val s = Flow.empty[Int]
12 | s.drop(1).runToList() shouldBe List.empty
13 |
14 | it should "drop elements from the source" in:
15 | val s = Flow.fromValues(1, 2, 3)
16 | s.drop(2).runToList() shouldBe List(3)
17 |
18 | it should "return empty source when more elements than source length was dropped" in:
19 | val s = Flow.fromValues(1, 2)
20 | s.drop(3).runToList() shouldBe List.empty
21 |
22 | it should "not drop when 'n == 0'" in:
23 | val s = Flow.fromValues(1, 2, 3)
24 | s.drop(0).runToList() shouldBe List(1, 2, 3)
25 | end FlowOpsDropTest
26 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsEmptyTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsEmptyTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "empty"
10 |
11 | it should "be empty" in:
12 | Flow.empty.runToList() shouldBe empty
13 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFactoryMethodsTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsFactoryMethodsTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "factory methods"
10 |
11 | it should "create a flow from a fork" in supervised:
12 | val f = fork(1)
13 | val c = Flow.fromFork(f)
14 | c.runToList() shouldBe List(1)
15 |
16 | it should "create an iterating flow" in:
17 | val c = Flow.iterate(1)(_ + 1)
18 | c.take(3).runToList() shouldBe List(1, 2, 3)
19 |
20 | it should "unfold a function" in:
21 | val c = Flow.unfold(0)(i => if i < 3 then Some((i, i + 1)) else None)
22 | c.runToList() shouldBe List(0, 1, 2)
23 |
24 | it should "produce a range" in:
25 | Flow.range(1, 5, 1).runToList() shouldBe List(1, 2, 3, 4, 5)
26 | Flow.range(1, 5, 2).runToList() shouldBe List(1, 3, 5)
27 | Flow.range(1, 11, 3).runToList() shouldBe List(1, 4, 7, 10)
28 | end FlowOpsFactoryMethodsTest
29 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFailedTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 | import ox.channels.ChannelClosed
7 |
8 | class FlowOpsFailedTest extends AnyFlatSpec with Matchers:
9 |
10 | behavior of "failed"
11 |
12 | it should "fail on receive" in supervised:
13 | // when
14 | val s = Flow.failed(RuntimeException("boom"))
15 |
16 | // then
17 | s.runToChannel().receiveOrClosed() should matchPattern:
18 | case ChannelClosed.Error(reason) if reason.getMessage == "boom" =>
19 |
20 | end FlowOpsFailedTest
21 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFilterTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsFilterTest extends AnyFlatSpec with Matchers:
8 | behavior of "filter"
9 |
10 | it should "not filter anything from the empty flow" in:
11 | val c = Flow.empty[Int]
12 | val s = c.filter(_ % 2 == 0)
13 | s.runToList() shouldBe List.empty
14 |
15 | it should "filter out everything if no element meets 'f'" in:
16 | val c = Flow.fromValues(1 to 10: _*)
17 | val s = c.filter(_ => false)
18 | s.runToList() shouldBe List.empty
19 |
20 | it should "not filter anything if all the elements meet 'f'" in:
21 | val c = Flow.fromValues(1 to 10: _*)
22 | val s = c.filter(_ => true)
23 | s.runToList() shouldBe (1 to 10)
24 |
25 | it should "filter out elements that don't meet 'f'" in:
26 | val c = Flow.fromValues(1 to 10: _*)
27 | val s = c.filter(_ % 2 == 0)
28 | s.runToList() shouldBe (2 to 10 by 2)
29 |
30 | end FlowOpsFilterTest
31 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFlatMapTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsFlatMapTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "flatMap"
10 |
11 | it should "flatten simple flows" in:
12 | Flow.fromValues(10, 20, 30).flatMap(v => Flow.fromValues(v + 1, v + 2)).runToList() shouldBe List(11, 12, 21, 22, 31, 32)
13 |
14 | it should "propagate errors" in:
15 | the[RuntimeException] thrownBy {
16 | Flow
17 | .fromValues(1, 2, 3)
18 | .flatMap(v => if v == 2 then Flow.failed(new RuntimeException("boom!")) else Flow.fromValues(v + 1, v + 2))
19 | .runToList()
20 | } should have message "boom!"
21 | end FlowOpsFlatMapTest
22 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFlattenTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.OptionValues
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 |
8 | class FlowOpsFlattenTest extends AnyFlatSpec with Matchers with OptionValues:
9 |
10 | behavior of "flatten"
11 |
12 | it should "flatten nested flows" in:
13 | Flow.fromValues(Flow.fromValues(10, 20), Flow.fromValues(30, 40)).flatten.runToList() shouldBe List(10, 20, 30, 40)
14 | end FlowOpsFlattenTest
15 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFoldTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsFoldTest extends AnyFlatSpec with Matchers:
8 | behavior of "fold"
9 |
10 | it should "throw an exception for a failed flow" in:
11 | the[IllegalStateException] thrownBy:
12 | Flow
13 | .failed[Int](new IllegalStateException())
14 | .runFold(0)((acc, n) => acc + n)
15 |
16 | it should "throw exception thrown in `f` when `f` throws" in:
17 | the[RuntimeException] thrownBy {
18 | Flow
19 | .fromValues(1)
20 | .runFold(0)((_, _) => throw new RuntimeException("Function `f` is broken"))
21 | } should have message "Function `f` is broken"
22 |
23 | it should "return `zero` value from fold on the empty source" in:
24 | Flow.empty[Int].runFold(0)((acc, n) => acc + n) shouldBe 0
25 |
26 | it should "return fold on non-empty fold" in:
27 | Flow.fromValues(1, 2).runFold(0)((acc, n) => acc + n) shouldBe 3
28 | end FlowOpsFoldTest
29 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsForeachTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsForeachTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "foreach"
10 |
11 | it should "iterate over a flow" in:
12 | val c = Flow.fromValues(1, 2, 3)
13 |
14 | var r: List[Int] = Nil
15 | c.runForeach(v => r = v :: r)
16 |
17 | r shouldBe List(3, 2, 1)
18 |
19 | it should "convert flow to a list" in:
20 | val c = Flow.fromValues(1, 2, 3)
21 |
22 | c.runToList() shouldBe List(1, 2, 3)
23 | end FlowOpsForeachTest
24 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFutureSourceTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | import scala.concurrent.Future
8 |
9 | class FlowOpsFutureSourceTest extends AnyFlatSpec with Matchers:
10 | behavior of "futureSource"
11 |
12 | it should "return the original future failure when future fails" in:
13 | val failure = new RuntimeException("future failed")
14 | the[RuntimeException] thrownBy {
15 | Flow.fromFutureSource(Future.failed(failure)).runToList()
16 | } shouldBe failure
17 |
18 | it should "return future's source values" in supervised:
19 | Flow.fromFutureSource(Future.successful(Flow.fromValues(1, 2).runToChannel())).runToList() shouldBe List(1, 2)
20 | end FlowOpsFutureSourceTest
21 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsFutureTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | import scala.concurrent.Future
8 |
9 | class FlowOpsFutureTest extends AnyFlatSpec with Matchers:
10 | behavior of "future"
11 |
12 | it should "return the original future failure when future fails" in:
13 | val failure = new RuntimeException("future failed")
14 | the[RuntimeException] thrownBy {
15 | Flow.fromFuture(Future.failed(failure)).runToList()
16 | } shouldBe failure
17 |
18 | it should "return future value" in:
19 | Flow.fromFuture(Future.successful(1)).runToList() shouldBe List(1)
20 | end FlowOpsFutureTest
21 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsInterleaveAllTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsInterleaveAllTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "interleaveAll"
10 |
11 | it should "interleave no sources" in:
12 | val s = Flow.interleaveAll(List.empty)
13 | s.runToList() shouldBe empty
14 |
15 | it should "interleave a single flow" in:
16 | val c = Flow.fromValues(1, 2, 3)
17 | val s = Flow.interleaveAll(List(c))
18 | s.runToList() shouldBe List(1, 2, 3)
19 |
20 | it should "interleave multiple flows" in:
21 | val c1 = Flow.fromValues(1, 2, 3, 4, 5, 6, 7, 8)
22 | val c2 = Flow.fromValues(10, 20, 30)
23 | val c3 = Flow.fromValues(100, 200, 300, 400, 500)
24 |
25 | val s = Flow.interleaveAll(List(c1, c2, c3))
26 |
27 | s.runToList() shouldBe List(1, 10, 100, 2, 20, 200, 3, 30, 300, 4, 400, 5, 500, 6, 7, 8)
28 |
29 | it should "interleave multiple flows using custom segment size" in:
30 | val c1 = Flow.fromValues(1, 2, 3, 4, 5, 6, 7, 8)
31 | val c2 = Flow.fromValues(10, 20, 30)
32 | val c3 = Flow.fromValues(100, 200, 300, 400, 500)
33 |
34 | val s = Flow.interleaveAll(List(c1, c2, c3), segmentSize = 2)
35 |
36 | s.runToList() shouldBe List(1, 2, 10, 20, 100, 200, 3, 4, 30, 300, 400, 5, 6, 500, 7, 8)
37 |
38 | it should "interleave multiple flows using custom segment size and complete eagerly" in:
39 | val c1 = Flow.fromValues(1, 2, 3, 4, 5, 6, 7, 8)
40 | val c2 = Flow.fromValues(10, 20, 30)
41 | val c3 = Flow.fromValues(100, 200, 300, 400, 500)
42 |
43 | val s = Flow.interleaveAll(List(c1, c2, c3), segmentSize = 2, eagerComplete = true)
44 |
45 | s.runToList() shouldBe List(1, 2, 10, 20, 100, 200, 3, 4, 30)
46 | end FlowOpsInterleaveAllTest
47 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsIntersperseTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsIntersperseTest extends AnyFlatSpec with Matchers:
8 | behavior of "Flow.intersperse"
9 |
10 | it should "intersperse with inject only over an empty source" in supervised {
11 | val f = Flow.empty[String]
12 | f.intersperse(", ").runToList() shouldBe List.empty
13 | }
14 |
15 | it should "intersperse with inject only over a source with one element" in supervised {
16 | val f = Flow.fromValues("foo")
17 | f.intersperse(", ").runToList() shouldBe List("foo")
18 | }
19 |
20 | it should "intersperse with inject only over a source with multiple elements" in supervised {
21 | val f = Flow.fromValues("foo", "bar")
22 | f.intersperse(", ").runToList() shouldBe List("foo", ", ", "bar")
23 | }
24 |
25 | it should "intersperse with start, inject and end over an empty source" in supervised {
26 | val f = Flow.empty[String]
27 | f.intersperse("[", ", ", "]").runToList() shouldBe List("[", "]")
28 | }
29 |
30 | it should "intersperse with start, inject and end over a source with one element" in supervised {
31 | val f = Flow.fromValues("foo")
32 | f.intersperse("[", ", ", "]").runToList() shouldBe List("[", "foo", "]")
33 | }
34 |
35 | it should "intersperse with start, inject and end over a source with multiple elements" in supervised {
36 | val f = Flow.fromValues("foo", "bar")
37 | f.intersperse("[", ", ", "]").runToList() shouldBe List("[", "foo", ", ", "bar", "]")
38 | }
39 | end FlowOpsIntersperseTest
40 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsLastOptionTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.OptionValues
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 |
8 | class FlowOpsLastOptionTest extends AnyFlatSpec with Matchers with OptionValues:
9 |
10 | behavior of "lastOption"
11 |
12 | it should "return None for the empty flow" in:
13 | Flow.empty[Int].runLastOption() shouldBe None
14 |
15 | it should "return Some for a non-empty" in:
16 | val s = Flow.fromValues(1, 2, 3)
17 | s.runLastOption() shouldBe Some(3)
18 |
19 | it should "throw ChannelClosedException.Error with exception and message that was thrown during retrieval" in:
20 | the[RuntimeException] thrownBy {
21 | Flow
22 | .failed(new RuntimeException("source is broken"))
23 | .runLastOption()
24 | } should have message "source is broken"
25 |
26 | end FlowOpsLastOptionTest
27 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsLastTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsLastTest extends AnyFlatSpec with Matchers:
8 | behavior of "last"
9 |
10 | it should "throw NoSuchElementException for the empty source" in:
11 | the[NoSuchElementException] thrownBy {
12 | Flow.empty[Int].runLast()
13 | } should have message "cannot obtain last element from an empty source"
14 |
15 | it should "throw ChannelClosedException.Error with exception and message that was thrown during retrieval" in:
16 | the[RuntimeException] thrownBy {
17 | Flow
18 | .failed(new RuntimeException("source is broken"))
19 | .runLast()
20 | } should have message "source is broken"
21 |
22 | it should "return last element for the non-empty source" in:
23 | Flow.fromValues(1, 2).runLast() shouldBe 2
24 |
25 | end FlowOpsLastTest
26 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsMapConcatTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 | import ox.channels.BufferCapacity
7 | import ox.channels.ChannelClosed
8 |
9 | class FlowOpsMapConcatTest extends AnyFlatSpec with Matchers:
10 |
11 | behavior of "mapConcat"
12 |
13 | it should "unfold iterables" in:
14 | val c = Flow.fromValues(List("a", "b", "c"), List("d", "e"), List("f"))
15 | val s = c.mapConcat(identity)
16 | s.runToList() shouldBe List("a", "b", "c", "d", "e", "f")
17 |
18 | it should "transform elements" in:
19 | val c = Flow.fromValues("ab", "cd")
20 | val s = c.mapConcat { str => str.toList }
21 |
22 | s.runToList() shouldBe List('a', 'b', 'c', 'd')
23 |
24 | it should "handle empty lists" in:
25 | val c = Flow.fromValues(List.empty, List("a"), List.empty, List("b", "c"))
26 | val s = c.mapConcat(identity)
27 |
28 | s.runToList() shouldBe List("a", "b", "c")
29 |
30 | it should "propagate errors in the mapping function" in:
31 | // given
32 | val flow = Flow.fromValues(List("a"), List("b", "c"), List("error here"))
33 |
34 | // when
35 | val flow2 = flow.mapConcat { element =>
36 | if element != List("error here") then element
37 | else throw new RuntimeException("boom")
38 | }
39 |
40 | // then
41 | supervised:
42 | given BufferCapacity = BufferCapacity(0) // so that the error isn't created too early
43 | val s = flow2.runToChannel()
44 | s.receive() shouldBe "a"
45 | s.receive() shouldBe "b"
46 | s.receive() shouldBe "c"
47 | s.receiveOrClosed() should matchPattern:
48 | case ChannelClosed.Error(reason) if reason.getMessage == "boom" =>
49 | end FlowOpsMapConcatTest
50 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsMapTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsMapTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "map"
10 |
11 | it should "map over a source" in:
12 | val c = Flow.fromValues(1, 2, 3)
13 | val s = c.map(_ * 10)
14 | s.runToList() shouldBe List(10, 20, 30)
15 |
16 | it should "map over a source using for-syntax" in:
17 | val c = Flow.fromValues(1, 2, 3)
18 | val s =
19 | for v <- c
20 | yield v * 2
21 | s.runToList() shouldBe List(2, 4, 6)
22 |
23 | end FlowOpsMapTest
24 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsMapUsingSinkTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsMapUsingSinkTest extends AnyFlatSpec with Matchers:
8 |
9 | behavior of "mapUsingSink"
10 |
11 | it should "map over a source, using emit" in:
12 | val c = Flow.fromValues(1, 2, 3)
13 | val s = c.mapUsingEmit(v =>
14 | emit =>
15 | emit(v + 1)
16 | emit(v * 10)
17 | )
18 | s.runToList() shouldBe List(2, 10, 3, 20, 4, 30)
19 |
20 | it should "propagate errors" in:
21 | val c = Flow.fromValues(1, 2, 3)
22 | val s = c.mapUsingEmit(_ => _ => throw new IllegalStateException)
23 | intercept[IllegalStateException](s.runToList())
24 | end FlowOpsMapUsingSinkTest
25 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsOnCompleteTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 | import java.util.concurrent.atomic.AtomicBoolean
7 |
8 | class FlowOpsEnsureTest extends AnyFlatSpec with Matchers:
9 | behavior of "ensure.onComplete"
10 |
11 | it should "run in case of success" in:
12 | val didRun = new AtomicBoolean(false)
13 | val f = Flow.fromValues(1, 2, 3).onComplete(didRun.set(true))
14 |
15 | didRun.get() shouldBe false
16 | f.runDrain()
17 | didRun.get() shouldBe true
18 |
19 | it should "run in case of error" in:
20 | val didRun = new AtomicBoolean(false)
21 | val f = Flow.fromValues(1, 2, 3).concat(Flow.failed(new RuntimeException)).onComplete(didRun.set(true))
22 |
23 | didRun.get() shouldBe false
24 | intercept[RuntimeException](f.runDrain()).discard
25 | didRun.get() shouldBe true
26 |
27 | behavior of "ensure.onDone"
28 |
29 | it should "run in case of success" in:
30 | val didRun = new AtomicBoolean(false)
31 | val f = Flow.fromValues(1, 2, 3).onDone(didRun.set(true))
32 |
33 | didRun.get() shouldBe false
34 | f.runDrain()
35 | didRun.get() shouldBe true
36 |
37 | it should "not run in case of error" in:
38 | val didRun = new AtomicBoolean(false)
39 | val f = Flow.fromValues(1, 2, 3).concat(Flow.failed(new RuntimeException)).onDone(didRun.set(true))
40 |
41 | didRun.get() shouldBe false
42 | intercept[RuntimeException](f.runDrain()).discard
43 | didRun.get() shouldBe false
44 |
45 | behavior of "ensure.onError"
46 |
47 | it should "not run in case of success" in:
48 | val didRun = new AtomicBoolean(false)
49 | val f = Flow.fromValues(1, 2, 3).onError(_ => didRun.set(true))
50 |
51 | didRun.get() shouldBe false
52 | f.runDrain()
53 | didRun.get() shouldBe false
54 |
55 | it should "run in case of error" in:
56 | val didRun = new AtomicBoolean(false)
57 | val f = Flow.fromValues(1, 2, 3).concat(Flow.failed(new RuntimeException)).onError(_ => didRun.set(true))
58 |
59 | didRun.get() shouldBe false
60 | intercept[RuntimeException](f.runDrain()).discard
61 | didRun.get() shouldBe true
62 | end FlowOpsEnsureTest
63 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsOrElseTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 | import ox.channels.ChannelClosed
7 |
8 | class FlowOpsOrElseTest extends AnyFlatSpec with Matchers:
9 | behavior of "orElse"
10 |
11 | it should "emit elements only from the original source when it is not empty" in:
12 | Flow.fromValues(1).orElse(Flow.fromValues(2, 3)).runToList() shouldBe List(1)
13 |
14 | it should "emit elements only from the alternative source when the original source is created empty" in:
15 | Flow.empty.orElse(Flow.fromValues(2, 3)).runToList() shouldBe List(2, 3)
16 |
17 | it should "emit elements only from the alternative source when the original source is empty" in:
18 | Flow.fromValues[Int]().orElse(Flow.fromValues(2, 3)).runToList() shouldBe List(2, 3)
19 |
20 | it should "return failed source when the original source is failed" in supervised:
21 | val failure = new RuntimeException()
22 | Flow.failed(failure).orElse(Flow.fromValues(2, 3)).runToChannel().receiveOrClosed() shouldBe ChannelClosed.Error(failure)
23 | end FlowOpsOrElseTest
24 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsPipeToTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 |
8 | import ox.channels.Channel
9 |
10 | class FlowOpsPipeToTest extends AnyFlatSpec with Matchers with Eventually:
11 |
12 | it should "pipe one source to another" in supervised:
13 | val c1 = Flow.fromValues(1, 2, 3)
14 | val c2 = Channel.rendezvous[Int]
15 |
16 | forkDiscard:
17 | c1.runPipeToSink(c2, propagateDone = false)
18 | c2.done()
19 |
20 | c2.toList shouldBe List(1, 2, 3)
21 |
22 | it should "pipe one source to another (with done propagation)" in supervised:
23 | val c1 = Flow.fromValues(1, 2, 3)
24 | val c2 = Channel.rendezvous[Int]
25 |
26 | forkDiscard:
27 | c1.runPipeToSink(c2, propagateDone = true)
28 |
29 | c2.toList shouldBe List(1, 2, 3)
30 | end FlowOpsPipeToTest
31 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsReduceTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsReduceTest extends AnyFlatSpec with Matchers:
8 | behavior of "reduce"
9 |
10 | it should "throw NoSuchElementException for reduce over the empty source" in:
11 | the[NoSuchElementException] thrownBy {
12 | Flow.empty[Int].runReduce(_ + _)
13 | } should have message "cannot reduce an empty flow"
14 |
15 | it should "throw exception thrown in `f` when `f` throws" in:
16 | the[RuntimeException] thrownBy {
17 | Flow
18 | .fromValues(1, 2)
19 | .runReduce((_, _) => throw new RuntimeException("Function `f` is broken"))
20 | } should have message "Function `f` is broken"
21 |
22 | it should "return first element from reduce over the single element source" in:
23 | Flow.fromValues(1).runReduce(_ + _) shouldBe 1
24 |
25 | it should "run reduce over on non-empty source" in:
26 | Flow.fromValues(1, 2).runReduce(_ + _) shouldBe 3
27 | end FlowOpsReduceTest
28 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsRepeatEvalTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 |
6 | import java.util.concurrent.ConcurrentHashMap
7 | import scala.jdk.CollectionConverters.*
8 |
9 | class FlowOpsRepeatEvalTest extends AnyFlatSpec with Matchers:
10 | behavior of "repeatEval"
11 |
12 | it should "evaluate the element before each send" in:
13 | var i = 0
14 | val s = Flow.repeatEval {
15 | i += 1
16 | i
17 | }
18 | s.take(3).runToList() shouldBe List(1, 2, 3)
19 |
20 | it should "evaluate the element before each send, as long as it's defined" in:
21 | var i = 0
22 | val evaluated = ConcurrentHashMap.newKeySet[Int]()
23 | val s = Flow.repeatEvalWhileDefined {
24 | i += 1
25 | evaluated.add(i)
26 | if i < 5 then Some(i) else None
27 | }
28 | s.runToList() shouldBe List(1, 2, 3, 4)
29 | evaluated.asScala shouldBe Set(1, 2, 3, 4, 5)
30 | end FlowOpsRepeatEvalTest
31 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsRunToChannelTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 | import ox.channels.ChannelClosed
7 | import ox.channels.Channel
8 |
9 | class FlowOpsRunToChannelTest extends AnyFlatSpec with Matchers:
10 | behavior of "runToChannel"
11 |
12 | it should "receive the elements in the flow" in supervised:
13 | val ch = Flow.fromValues(1, 2).runToChannel()
14 | ch.receive() shouldBe 1
15 | ch.receive() shouldBe 2
16 | ch.receiveOrClosed() shouldBe ChannelClosed.Done
17 |
18 | it should "return the original source when running a source-backed flow" in supervised:
19 | val ch = Channel.buffered[Int](16)
20 | ch.send(1)
21 | ch.send(2)
22 | ch.send(3)
23 |
24 | val ch2 = Flow.fromSource(ch).runToChannel()
25 | ch.eq(ch2) shouldBe true // checking if the optimization is in place
26 |
27 | ch2.receive() shouldBe 1
28 | end FlowOpsRunToChannelTest
29 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsSampleTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsSampleTest extends AnyFlatSpec with Matchers:
8 | behavior of "sample"
9 |
10 | it should "not sample anything from an empty flow" in:
11 | val c = Flow.empty[Int]
12 | val s = c.sample(5)
13 | s.runToList() shouldBe List.empty
14 |
15 | it should "not sample anything when 'n == 0'" in:
16 | val c = Flow.fromValues(1 to 10: _*)
17 | val s = c.sample(0)
18 | s.runToList() shouldBe List.empty
19 |
20 | it should "sample every element of the flow when 'n == 1'" in:
21 | val c = Flow.fromValues(1 to 10: _*)
22 | val n = 1
23 | val s = c.sample(n)
24 | s.runToList() shouldBe (n to 10 by n)
25 |
26 | it should "sample every nth element of the flow" in:
27 | val c = Flow.fromValues(1 to 10: _*)
28 | val n = 3
29 | val s = c.sample(n)
30 | s.runToList() shouldBe (n to 10 by n)
31 |
32 | end FlowOpsSampleTest
33 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsScanTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsScanTest extends AnyFlatSpec with Matchers:
8 | behavior of "scan"
9 |
10 | it should "scan the empty flow" in:
11 | val flow: Flow[Int] = Flow.empty
12 | val scannedFlow = flow.scan(0)((acc, el) => acc + el)
13 | scannedFlow.runToList() shouldBe List(0)
14 |
15 | it should "scan a flow of summed Int" in:
16 | val flow = Flow.fromValues(1 to 10: _*)
17 | val scannedFlow = flow.scan(0)((acc, el) => acc + el)
18 | scannedFlow.runToList() shouldBe List(0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55)
19 |
20 | it should "scan a flow of multiplied Int" in:
21 | val flow = Flow.fromValues(1 to 10: _*)
22 | val scannedFlow = flow.scan(1)((acc, el) => acc * el)
23 | scannedFlow.runToList() shouldBe List(1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800)
24 |
25 | it should "scan a flow of concatenated String" in:
26 | val flow = Flow.fromValues("f", "l", "o", "w")
27 | val scannedFlow = flow.scan("my")((acc, el) => acc + el)
28 | scannedFlow.runToList() shouldBe List("my", "myf", "myfl", "myflo", "myflow")
29 |
30 | end FlowOpsScanTest
31 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsSlidingTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 | import ox.channels.ChannelClosed
8 |
9 | class FlowOpsSlidingTest extends AnyFlatSpec with Matchers with Eventually:
10 |
11 | behavior of "sliding"
12 |
13 | it should "create sliding windows for n = 2 and step = 1" in:
14 | Flow.fromValues(1, 2, 3, 4).sliding(2).runToList() shouldBe List(List(1, 2), List(2, 3), List(3, 4))
15 |
16 | it should "create sliding windows for n = 3 and step = 1" in:
17 | Flow.fromValues(1, 2, 3, 4, 5).sliding(3).runToList() shouldBe List(List(1, 2, 3), List(2, 3, 4), List(3, 4, 5))
18 |
19 | it should "create sliding windows for n = 2 and step = 2" in:
20 | Flow.fromValues(1, 2, 3, 4, 5).sliding(2, step = 2).runToList() shouldBe List(List(1, 2), List(3, 4), List(5))
21 |
22 | it should "create sliding windows for n = 3 and step = 2" in:
23 | Flow.fromValues(1, 2, 3, 4, 5, 6).sliding(3, step = 2).runToList() shouldBe List(List(1, 2, 3), List(3, 4, 5), List(5, 6))
24 |
25 | it should "create sliding windows for n = 1 and step = 2" in:
26 | Flow.fromValues(1, 2, 3, 4, 5).sliding(1, step = 2).runToList() shouldBe List(List(1), List(3), List(5))
27 |
28 | it should "create sliding windows for n = 2 and step = 3" in:
29 | Flow.fromValues(1, 2, 3, 4, 5, 6).sliding(2, step = 3).runToList() shouldBe List(List(1, 2), List(4, 5))
30 |
31 | it should "create sliding windows for n = 2 and step = 3 (with 1 element remaining in the end)" in:
32 | Flow.fromValues(1, 2, 3, 4, 5, 6, 7).sliding(2, step = 3).runToList() shouldBe List(List(1, 2), List(4, 5), List(7))
33 |
34 | it should "return failed source when the original source is failed" in supervised:
35 | val failure = new RuntimeException()
36 | Flow.failed[Long](failure).sliding(1, 2).runToChannel().receiveOrClosed() shouldBe ChannelClosed.Error(failure)
37 | end FlowOpsSlidingTest
38 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsTakeLastTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 |
6 | class FlowOpsTakeLastTest extends AnyFlatSpec with Matchers:
7 | behavior of "takeLast"
8 |
9 | it should "throw ChannelClosedException.Error for source failed without exception" in:
10 | the[IllegalStateException] thrownBy {
11 | Flow
12 | .failed[Int](new IllegalStateException())
13 | .runTakeLast(1)
14 | }
15 |
16 | it should "fail to takeLast when n < 0" in:
17 | the[IllegalArgumentException] thrownBy {
18 | Flow.empty[Int].runTakeLast(-1)
19 | } should have message "requirement failed: n must be >= 0"
20 |
21 | it should "return empty list for the empty source" in:
22 | Flow.empty[Int].runTakeLast(1) shouldBe List.empty
23 |
24 | it should "return empty list when n == 0 and list is not empty" in:
25 | Flow.fromValues(1).runTakeLast(0) shouldBe List.empty
26 |
27 | it should "return list with all elements if the source is smaller than requested number" in:
28 | Flow.fromValues(1, 2).runTakeLast(3) shouldBe List(1, 2)
29 |
30 | it should "return the last n elements from the source" in:
31 | Flow.fromValues(1, 2, 3, 4, 5).runTakeLast(2) shouldBe List(4, 5)
32 | end FlowOpsTakeLastTest
33 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsTakeTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsTakeTest extends AnyFlatSpec with Matchers:
8 | behavior of "take"
9 |
10 | it should "take from a simple flow" in:
11 | val f = Flow.fromValues(1 to 10*)
12 |
13 | f.take(5).runToList() shouldBe (1 to 5)
14 |
15 | it should "take from an async flow" in:
16 | val f = Flow.fromValues(1 to 10*).buffer()
17 |
18 | f.take(5).runToList() shouldBe (1 to 5)
19 |
20 | it should "take all if the flow ends sooner than the desired number of elements" in:
21 | val f = Flow.fromValues(1 to 10*)
22 |
23 | f.take(50).runToList() shouldBe (1 to 10)
24 | end FlowOpsTakeTest
25 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsTakeWhileTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsTakeWhileTest extends AnyFlatSpec with Matchers:
8 | behavior of "takeWhile"
9 |
10 | it should "not take from the empty flow" in:
11 | Flow.empty[Int].takeWhile(_ < 3).runToList() shouldBe List.empty
12 |
13 | it should "take as long as predicate is satisfied" in:
14 | Flow.fromValues(1, 2, 3).takeWhile(_ < 3).runToList() shouldBe List(1, 2)
15 |
16 | it should "take the failed element if includeFirstFailing = true" in:
17 | Flow.fromValues(1, 2, 3, 4).takeWhile(_ < 3, includeFirstFailing = true).runToList() shouldBe List(1, 2, 3)
18 |
19 | it should "work if all elements match the predicate" in:
20 | Flow.fromValues(1, 2, 3).takeWhile(_ < 5).runToList() shouldBe List(1, 2, 3)
21 |
22 | it should "fail the sourcewith the same exception as the initial source" in:
23 | val f = Flow.usingEmit: emit =>
24 | emit(1)
25 | throw new IllegalArgumentException()
26 |
27 | an[IllegalArgumentException] should be thrownBy (f.runToList())
28 |
29 | it should "not take if predicate fails for first or more elements" in:
30 | Flow.fromValues(3, 2, 1).takeWhile(_ < 3).runToList() shouldBe Nil
31 | end FlowOpsTakeWhileTest
32 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsTapTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 |
8 | import java.util.concurrent.atomic.AtomicInteger
9 |
10 | class FlowOpsTapTest extends AnyFlatSpec with Matchers with Eventually:
11 |
12 | it should "tap over a flow" in:
13 | val sum = new AtomicInteger()
14 | Flow.fromValues(1, 2, 3).tap(v => sum.addAndGet(v).discard).runToList() shouldBe List(1, 2, 3)
15 | sum.get() shouldBe 6
16 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsThrottleTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | import scala.concurrent.duration.*
8 | import ox.util.ElapsedTime
9 |
10 | class FlowOpsThrottleTest extends AnyFlatSpec with Matchers with ElapsedTime:
11 | behavior of "throttle"
12 |
13 | it should "not throttle the empty source" in:
14 | val s = Flow.empty[Int]
15 | val (result, executionTime) = measure:
16 | s.throttle(1, 1.second).runToList()
17 | result shouldBe List.empty
18 | executionTime.toMillis should be < 1.second.toMillis
19 |
20 | it should "throttle to specified elements per time units" in:
21 | val s = Flow.fromValues(1, 2)
22 | val (result, executionTime) = measure:
23 | s.throttle(1, 50.millis).runToList()
24 | result shouldBe List(1, 2)
25 | executionTime.toMillis should (be >= 100L and be <= 150L)
26 |
27 | it should "fail to throttle when elements <= 0" in:
28 | val s = Flow.empty[Int]
29 | the[IllegalArgumentException] thrownBy {
30 | s.throttle(-1, 50.millis)
31 | } should have message "requirement failed: elements must be > 0"
32 |
33 | it should "fail to throttle when per lower than 1ms" in:
34 | val s = Flow.empty[Int]
35 | the[IllegalArgumentException] thrownBy {
36 | s.throttle(1, 50.nanos)
37 | } should have message "requirement failed: per time must be >= 1 ms"
38 | end FlowOpsThrottleTest
39 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsTickTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 |
8 | import scala.concurrent.duration.DurationInt
9 |
10 | class FlowOpsTickTest extends AnyFlatSpec with Matchers with Eventually:
11 | it should "tick regularly" in supervised:
12 | val start = System.currentTimeMillis()
13 | val c = Flow.tick(100.millis).runToChannel()
14 | c.receive() shouldBe ()
15 | (System.currentTimeMillis() - start) shouldBe >=(0L)
16 | (System.currentTimeMillis() - start) shouldBe <=(50L)
17 |
18 | c.receive() shouldBe ()
19 | (System.currentTimeMillis() - start) shouldBe >=(100L)
20 | (System.currentTimeMillis() - start) shouldBe <=(150L)
21 |
22 | c.receive() shouldBe ()
23 | (System.currentTimeMillis() - start) shouldBe >=(200L)
24 | (System.currentTimeMillis() - start) shouldBe <=(250L)
25 |
26 | it should "tick immediately in case of a slow consumer, and then resume normal " in supervised:
27 | val start = System.currentTimeMillis()
28 | val c = Flow.tick(100.millis).runToChannel()
29 |
30 | // simulating a slow consumer
31 | sleep(200.millis)
32 | c.receive() shouldBe () // a tick should be waiting
33 | (System.currentTimeMillis() - start) shouldBe >=(200L)
34 | (System.currentTimeMillis() - start) shouldBe <=(250L)
35 |
36 | c.receive() shouldBe () // and immediately another, as the interval between send-s has passed
37 | (System.currentTimeMillis() - start) shouldBe >=(200L)
38 | (System.currentTimeMillis() - start) shouldBe <=(250L)
39 | end FlowOpsTickTest
40 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsTimeoutTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.*
7 |
8 | import scala.concurrent.duration.DurationInt
9 |
10 | class FlowOpsTimeoutTest extends AnyFlatSpec with Matchers with Eventually:
11 | it should "timeout" in:
12 | val start = System.currentTimeMillis()
13 | val c = Flow.timeout(100.millis).concat(Flow.fromValues(1))
14 | c.runToList() shouldBe List(1)
15 | (System.currentTimeMillis() - start) shouldBe >=(100L)
16 | (System.currentTimeMillis() - start) shouldBe <=(150L)
17 | end FlowOpsTimeoutTest
18 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsUsingSink.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsUsingSinkTest extends AnyFlatSpec with Matchers:
8 | behavior of "usingSink"
9 |
10 | it should "send the passed elements" in:
11 | Flow
12 | .usingEmit(emit =>
13 | emit(1)
14 | emit(2)
15 | emit(3)
16 | )
17 | .runToList() shouldBe List(1, 2, 3)
18 | end FlowOpsUsingSinkTest
19 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsZipAllTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.*
6 |
7 | class FlowOpsZipAllTest extends AnyFlatSpec with Matchers:
8 | behavior of "zipAll"
9 |
10 | it should "not emit any element when both flows are empty" in:
11 | val s = Flow.empty[Int]
12 | val other = Flow.empty[String]
13 |
14 | s.zipAll(other, -1, "foo").runToList() shouldBe List.empty
15 |
16 | it should "emit this element when other flow is empty" in:
17 | val s = Flow.fromValues(1)
18 | val other = Flow.empty[String]
19 |
20 | s.zipAll(other, -1, "foo").runToList() shouldBe List((1, "foo"))
21 |
22 | it should "emit other element when this flow is empty" in:
23 | val s = Flow.empty[Int]
24 | val other = Flow.fromValues("a")
25 |
26 | s.zipAll(other, -1, "foo").runToList() shouldBe List((-1, "a"))
27 |
28 | it should "emit matching elements when both flows are of the same size" in:
29 | val s = Flow.fromValues(1, 2)
30 | val other = Flow.fromValues("a", "b")
31 |
32 | s.zipAll(other, -1, "foo").runToList() shouldBe List((1, "a"), (2, "b"))
33 |
34 | it should "emit default for other flow if this flow is longer" in:
35 | val s = Flow.fromValues(1, 2, 3)
36 | val other = Flow.fromValues("a")
37 |
38 | s.zipAll(other, -1, "foo").runToList() shouldBe List((1, "a"), (2, "foo"), (3, "foo"))
39 |
40 | it should "emit default for this flow if other flow is longer" in:
41 | val s = Flow.fromValues(1)
42 | val other = Flow.fromValues("a", "b", "c")
43 |
44 | s.zipAll(other, -1, "foo").runToList() shouldBe List((1, "a"), (-1, "b"), (-1, "c"))
45 | end FlowOpsZipAllTest
46 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsZipTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 |
7 | class FlowOpsZipTest extends AnyFlatSpec with Matchers with Eventually:
8 |
9 | it should "zip two sources" in:
10 | val c1 = Flow.fromValues(1, 2, 3, 0)
11 | val c2 = Flow.fromValues(4, 5, 6)
12 |
13 | val s = c1.zip(c2).runToList()
14 |
15 | s shouldBe List((1, 4), (2, 5), (3, 6))
16 | end FlowOpsZipTest
17 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/FlowOpsZipWithIndexTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow
2 |
3 | import org.scalatest.concurrent.Eventually
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 |
7 | class FlowOpsZipWithIndexTest extends AnyFlatSpec with Matchers with Eventually:
8 | behavior of "zipWithIndex"
9 |
10 | it should "not zip anything from an empty flow" in:
11 | val c = Flow.empty[Int]
12 | val s = c.zipWithIndex
13 | s.runToList() shouldBe List.empty
14 |
15 | it should "zip flow with index" in:
16 | val c = Flow.fromValues(1 to 5: _*)
17 | val s = c.zipWithIndex
18 | s.runToList() shouldBe List((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))
19 |
20 | end FlowOpsZipWithIndexTest
21 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/flow/reactive/FlowPublisherTckTest.scala:
--------------------------------------------------------------------------------
1 | package ox.flow.reactive
2 |
3 | import org.reactivestreams.tck.TestEnvironment
4 | import org.reactivestreams.tck.flow.FlowPublisherVerification
5 | import org.scalatest.funsuite.AnyFunSuite
6 | import ox.Ox
7 | import ox.flow.Flow
8 | import ox.supervised
9 |
10 | import java.util.concurrent.Flow.Publisher
11 |
12 | class FlowPublisherTckTest extends AnyFunSuite:
13 | var ox: Ox = null
14 | val verification = new FlowPublisherVerification[Int](new TestEnvironment()):
15 | override def createFailedFlowPublisher(): Publisher[Int] = Flow.failed[Int](new Exception("failed")).toPublisher(using ox)
16 |
17 | override def createFlowPublisher(elements: Long): Publisher[Int] = Flow.fromIterable(1 to elements.toInt).toPublisher(using ox)
18 |
19 | verification.getClass().getMethods().foreach { m =>
20 | if m.getAnnotation(classOf[org.testng.annotations.Test]) != null then
21 | if m.getName().startsWith("untested_") then ignore(m.getName()) {}
22 | else
23 | test(m.getName()) {
24 | supervised {
25 | ox = summon[Ox]
26 | m.invoke(verification)
27 | }
28 | }
29 | }
30 | end FlowPublisherTckTest
31 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/resilience/AfterAttemptTest.scala:
--------------------------------------------------------------------------------
1 | package ox.resilience
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import org.scalatest.{EitherValues, TryValues}
6 | import ox.resilience.*
7 | import ox.scheduling.Schedule
8 |
9 | class AfterAttemptTest extends AnyFlatSpec with Matchers with EitherValues with TryValues:
10 | behavior of "RetryPolicy afterAttempt callback"
11 |
12 | it should "retry a succeeding function with afterAttempt callback" in:
13 | // given
14 | var afterAttemptInvocationCount = 0
15 |
16 | var counter = 0
17 | val successfulResult = 42
18 |
19 | def f =
20 | counter += 1
21 | successfulResult
22 |
23 | var returnedResult: Either[Throwable, Int] = null
24 | def afterAttempt(attempt: Int, result: Either[Throwable, Int]): Unit =
25 | afterAttemptInvocationCount += 1
26 | returnedResult = result
27 |
28 | // when
29 | val result = retry(RetryConfig(Schedule.immediate.maxRepeats(3), afterAttempt = afterAttempt))(f)
30 |
31 | // then
32 | result shouldBe successfulResult
33 | counter shouldBe 1
34 |
35 | afterAttemptInvocationCount shouldBe 1
36 | returnedResult shouldBe Right(successfulResult)
37 |
38 | it should "retry a failing function with afterAttempt callback" in:
39 | // given
40 | var afterAttemptInvocationCount = 0
41 |
42 | var counter = 0
43 | val failedResult = new RuntimeException("boom")
44 |
45 | def f =
46 | counter += 1
47 | if true then throw failedResult
48 |
49 | var returnedResult: Either[Throwable, Unit] = null
50 | def afterAttempt(attempt: Int, result: Either[Throwable, Unit]): Unit =
51 | afterAttemptInvocationCount += 1
52 | returnedResult = result
53 |
54 | // when
55 | val result = the[RuntimeException] thrownBy retry(RetryConfig(Schedule.immediate.maxRepeats(3), afterAttempt = afterAttempt))(f)
56 |
57 | // then
58 | result shouldBe failedResult
59 | counter shouldBe 4
60 |
61 | afterAttemptInvocationCount shouldBe 4
62 | returnedResult shouldBe Left(failedResult)
63 | end AfterAttemptTest
64 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/resilience/ScheduleFallingBackRetryTest.scala:
--------------------------------------------------------------------------------
1 | package ox.resilience
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import ox.util.ElapsedTime
6 | import ox.scheduling.Schedule
7 |
8 | import scala.concurrent.duration.*
9 |
10 | class ScheduleFallingBackRetryTest extends AnyFlatSpec with Matchers with ElapsedTime:
11 | behavior of "retry with combination of schedules"
12 |
13 | it should "retry 3 times immediately and then 2 times with delay" in:
14 | // given
15 | var counter = 0
16 | val sleep = 100.millis
17 | val immediateRetries = 3
18 | val delayedRetries = 2
19 |
20 | def f =
21 | counter += 1
22 | throw new RuntimeException("boom")
23 |
24 | val schedule = Schedule.immediate.maxRepeats(immediateRetries).andThen(Schedule.fixedInterval(sleep).maxRepeats(delayedRetries))
25 |
26 | // when
27 | val (result, elapsedTime) = measure(the[RuntimeException] thrownBy retry(RetryConfig(schedule))(f))
28 |
29 | // then
30 | result should have message "boom"
31 | counter shouldBe immediateRetries + delayedRetries + 1
32 | elapsedTime.toMillis should be >= 2 * sleep.toMillis
33 |
34 | it should "retry forever" in:
35 | // given
36 | var counter = 0
37 | val retriesUntilSuccess = 1_000
38 | val successfulResult = 42
39 |
40 | def f =
41 | counter += 1
42 | if counter <= retriesUntilSuccess then throw new RuntimeException("boom") else successfulResult
43 |
44 | val schedule = Schedule.immediate.maxRepeats(100).andThen(Schedule.fixedInterval(2.millis))
45 |
46 | // when
47 | val result = retry(RetryConfig(schedule))(f)
48 |
49 | // then
50 | result shouldBe successfulResult
51 | counter shouldBe retriesUntilSuccess + 1
52 | end ScheduleFallingBackRetryTest
53 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/scheduling/JitterTest.scala:
--------------------------------------------------------------------------------
1 | package ox.scheduling
2 |
3 | import org.scalatest.Inspectors
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 |
7 | import scala.concurrent.duration.*
8 |
9 | class JitterTest extends AnyFlatSpec with Matchers:
10 |
11 | behavior of "Jitter"
12 |
13 | private val baseSchedule = Schedule.exponentialBackoff(100.millis)
14 |
15 | it should "use no jitter" in:
16 | // given
17 | val schedule = baseSchedule
18 |
19 | // when
20 | val delays = schedule.intervals().take(6).toList
21 |
22 | // then
23 | delays should contain theSameElementsInOrderAs Seq(100, 200, 400, 800, 1600, 3200).map(_.millis)
24 |
25 | it should "use full jitter" in:
26 | // given
27 | val schedule = baseSchedule.jitter(Jitter.Full)
28 |
29 | // when
30 | val rawDelays = baseSchedule.intervals().take(5).toList
31 | val delays = schedule.intervals().take(5).toList
32 |
33 | // then
34 | delays
35 | .zip(rawDelays)
36 | .foreach: (delay, backoffDelay) =>
37 | delay should (be >= 0.millis and be <= backoffDelay)
38 |
39 | it should "use equal jitter" in:
40 | // given
41 | val schedule = baseSchedule.jitter(Jitter.Equal)
42 |
43 | // when
44 | val rawDelays = baseSchedule.intervals().take(5).toList
45 | val delays = schedule.intervals().take(5).toList
46 |
47 | // then
48 | delays
49 | .zip(rawDelays)
50 | .foreach: (delay, backoffDelay) =>
51 | delay should (be >= backoffDelay / 2 and be <= backoffDelay)
52 |
53 | it should "use decorrelated jitter" in:
54 | // given
55 | val min = 100.millis
56 | val schedule = Schedule.decorrelatedJitter(min)
57 |
58 | // when
59 | val delays = schedule.intervals().take(5).toList
60 |
61 | // then
62 | Inspectors.forEvery(delays.sliding(2).toList):
63 | case Seq(previousDelay, delay) => delay should (be >= min and be <= previousDelay * 3)
64 | case _ => fail("should never happen") // so that the match is exhaustive
65 | end JitterTest
66 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/util/ElapsedTime.scala:
--------------------------------------------------------------------------------
1 | package ox.util
2 |
3 | import scala.concurrent.duration.*
4 |
5 | trait ElapsedTime:
6 | def measure[T](f: => T): (T, Duration) =
7 | val before = System.nanoTime()
8 | val result = f
9 | val after = System.nanoTime()
10 | (result, (after - before).nanos)
11 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/util/MaxCounter.scala:
--------------------------------------------------------------------------------
1 | package ox.util
2 |
3 | import java.util.concurrent.atomic.AtomicInteger
4 |
5 | class MaxCounter():
6 | val counter = new AtomicInteger(0)
7 | @volatile var max = 0
8 |
9 | def increment() =
10 | counter.updateAndGet { c =>
11 | val inc = c + 1
12 | max = if inc > max then inc else max
13 | inc
14 | }
15 |
16 | def decrement() =
17 | counter.decrementAndGet()
18 | end MaxCounter
19 |
--------------------------------------------------------------------------------
/core/src/test/scala/ox/util/Trail.scala:
--------------------------------------------------------------------------------
1 | package ox.util
2 |
3 | import ox.discard
4 |
5 | import java.time.Clock
6 | import java.util.concurrent.atomic.AtomicReference
7 |
8 | class Trail(trail: AtomicReference[Vector[String]] = AtomicReference(Vector.empty)):
9 | def add(s: String): Unit =
10 | println(s"[${Clock.systemUTC().instant()}] [${Thread.currentThread().threadId()}] $s")
11 | trail.updateAndGet(_ :+ s).discard
12 |
13 | def get: Vector[String] = trail.get
14 |
--------------------------------------------------------------------------------
/cron/src/main/scala/ox/scheduling/cron/CronSchedule.scala:
--------------------------------------------------------------------------------
1 | package ox.scheduling.cron
2 |
3 | import cron4s.lib.javatime.*
4 | import cron4s.{Cron, CronExpr, toDateTimeCronOps}
5 | import ox.scheduling.Schedule
6 |
7 | import java.time.LocalDateTime
8 | import java.time.temporal.ChronoUnit
9 | import scala.concurrent.duration.*
10 |
11 | /** Methods in this object provide [[Schedule]] based on supplied cron expression.
12 | */
13 | object CronSchedule:
14 | /** @param expression
15 | * cron expression to parse
16 | * @return
17 | * [[CronSchedule]] from cron expression
18 | * @throws cron4s.Error
19 | * in case of invalid expression
20 | */
21 | def unsafeFromString(expression: String): Schedule =
22 | fromCronExpr(Cron.unsafeParse(expression))
23 |
24 | /** @param cron
25 | * [[CronExpr]] to base [[Schedule]] on.
26 | * @return
27 | * [[Schedule]] from cron expression
28 | */
29 | def fromCronExpr(cron: CronExpr): Schedule =
30 | def computeNext(previous: LocalDateTime): (LocalDateTime, Option[FiniteDuration]) =
31 | val next = cron.next(previous)
32 | val duration = next.map(n => ChronoUnit.MILLIS.between(previous, n).millis)
33 | (next.getOrElse(previous), duration)
34 |
35 | Schedule.computed(LocalDateTime.now(), computeNext)
36 | end fromCronExpr
37 | end CronSchedule
38 |
--------------------------------------------------------------------------------
/doc/.gitignore:
--------------------------------------------------------------------------------
1 | _build
2 | _build_html
--------------------------------------------------------------------------------
/doc/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = ox
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/doc/adr/0002-retries.md:
--------------------------------------------------------------------------------
1 | # 2. Retries
2 |
3 | Date: 2023-11-30
4 |
5 | ## Context
6 |
7 | How should the [retries API](../scheduling/retries.md) be implemented in terms of:
8 | - developer friendliness,
9 | - supported ways of representing the operation under retry,
10 | - possibly infinite retries.
11 |
12 | ## Decision
13 |
14 | We're using a single, unified syntax to retry and operation:
15 | ```scala
16 | retry(operation)(policy)
17 | ```
18 | so that the developers don't need to wonder which variant to use.
19 |
20 | The operation can be a function returning a result directly, or wrapped in a `Try` or `Either`. Therefore, there are three overloaded variants of the `retry` function.
21 |
22 | For possibly infinite retries, we're using tail recursion to be stack safe. This comes at a cost of some code duplication in the retry logic, but is still more readable and easier to follow that a `while` loop with `var`s for passing the state.
23 |
--------------------------------------------------------------------------------
/doc/adr/0003-why-source-operators-do-not-throw.md:
--------------------------------------------------------------------------------
1 | # 3. Why source operators do not throw
2 |
3 | Date: 2024-01-25
4 |
5 | ## Context
6 |
7 | Revisiting ADR #1, what should happen when an error is encountered when processing channel elements? Should it be propagated downstream or re-thrown?
8 |
9 | ## Decision
10 |
11 | In addition to what's mentioned in ADR #1, operators don't throw, but propagate, because we want to allow throw-free coding style. When errors are propagated, on error every daemon operator thread shuts down, and we end the scope gracefully.
12 |
13 | Additionally, we assume that data only flows downstream - and this includes errors.
14 |
--------------------------------------------------------------------------------
/doc/adr/0004-channels-safe-unsafe-operations.md:
--------------------------------------------------------------------------------
1 | # 4. Channels: safe/unsafe Operations
2 |
3 | Date: 2024-02-28
4 |
5 | ## Context
6 |
7 | Channel operations such as `send`, `receive`, `select`, `close` etc. might fail because a channel is closed. How should
8 | this be signalled to the user?
9 |
10 | ## Decision
11 |
12 | We decided to have two variants of the methods:
13 |
14 | * default: `send`, `receive` etc., which throw an exception, when the channel is closed
15 | * safe: `sendSafe`, `receiveSafe` etc., which return a `ChannelClosed` value, when the channel is closed
16 |
17 | The "safe" variants are more performant: no stack trace is created, when the channel is closed. They are used by all
18 | channel combinators (such as `map`, `filter` etc.), to detect and propagate the errors downstream.
19 |
20 | ### Why not `Either` or `Try`?
21 |
22 | To avoid allocations on each operation (e.g. receive). Channels might be on the "hot path" and they might be important
23 | for performance. Union types provide a nice alternative here.
24 |
25 | Even with `Either`, though, if e.g. `send` had a signature `Either[ChannelClosed, Unit]`, discarding the result would
26 | at most be a warning (not in all cases), so potentially an error might go unnoticed.
27 |
28 | ### Why is the default to throw?
29 |
30 | Let's consider `send`. If the default would be `send(t: T): ChannelClosed | Unit`, with an additional exception-throwing
31 | variant `sendUnsafe(t: T): Unit`, then the API would be quite surprising.
32 |
33 | Coming to the library as a new user, they could just call send / receive. The compiler might warn them in some cases
34 | that they discard the non-unit result of `send`, but (a) would they pay attention to those warnings, and (b) would they
35 | get them in the first place (this type of compiler warning isn't detected in 100% o cases).
36 |
37 | In other words - it would be quite easy to mistakenly discard the results of `send`, so a default which guards against
38 | that (by throwing exceptions) is better, and the "safe" can always be used intentionally version if that's what's
39 | needed.
40 |
41 | ### Update 17/04/2024
42 |
43 | The `...Safe` operations got renamed to `...OrClosed` or `...OrError`, as they can still throw `InterruptedException`s.
44 |
--------------------------------------------------------------------------------
/doc/adr/0005-application-errors.md:
--------------------------------------------------------------------------------
1 | # 5. Application errors
2 |
3 | Date: 2024-03-05
4 |
5 | ## Context
6 |
7 | In some cases, it's useful to treat some return values as errors, which should cause the enclosing scope to end.
8 |
9 | ## Decision
10 |
11 | For computation combinators, which include `par`, `race` and `supervised`, we decided to introduce the concept of
12 | application errors. These are values of a shape defined by an `ErrorMode`, which are specially treated by Ox - if
13 | such a value represents an error, the enclosing scope ends.
14 |
15 | Some design limitations include:
16 |
17 | * we want normal scopes to remain unchanged
18 | * methods requiring a concurrency scope (that is, `using Ox`) should be callable from the new scope
19 | * all forks that might report application errors, must be constrained to return the same type of application errors
20 | * computation combinators, such as `par`, should have a single implmentation both when using application errors and
21 | exceptions only
22 |
23 | Taking this into account, we separate the `Ox` capability, which allows starting forks, and `OxError`, which
24 | additionally allows reporting application errors. An inheritance hierarchy, `OxError <: Ox` ensures that we can call
25 | methods requiring the `Ox` capability if `OxError` is available, but not the other way round.
26 |
27 | Finally, introducing a special `forkError` method allows us to require that it is run within a `supervisedError` scope
28 | and that it must return a value of the correct shape.
29 |
--------------------------------------------------------------------------------
/doc/adr/0007-supervised-unsupervised-scopes.md:
--------------------------------------------------------------------------------
1 | # 7. Supervised & unsupervised scopes
2 |
3 | Date: 2024-04-17
4 |
5 | ## Context
6 |
7 | Originally, Ox had only `scoped` which created non-supervised scopes, that is errors were only discovered via explicit
8 | joining. This was later changed by introducing `supervised` and `unsupervised` scopes, where the former would end
9 | immediately when any fork failed, and the latter would not. However, `supervised` scopes have an overhead: they create
10 | an additional fork, in which the scope's main body is run. Is it possible to avoid this extra fork?
11 |
12 | ## Decision
13 |
14 | In short: no.
15 |
16 | An alternate design would be to store the thread, that created the scope as part of the supervisor, and when any
17 | exception occurs, interrupt that thread so that it would discover the exception. However, this would be prone to
18 | interruption-races with external interruptions of that main thread. Even if we included an additional flag, specifying
19 | if the interruption happened because the scope ends, it would still be possible for an external interrupt to go
20 | unnoticed (if it happened at the same time, as the internal one). Even though unlikely, such a design would be fragile,
21 | hence we are keeping the current implementation.
22 |
23 | ## Consequences
24 |
25 | To make our design more type-safe, we split the `Ox` capability into `OxUnsupervised` (allowing only unsupervised
26 | forks), and `Ox`.
27 |
--------------------------------------------------------------------------------
/doc/adr/0008-scheduled-repeat-retry.md:
--------------------------------------------------------------------------------
1 | # 8. Retries
2 |
3 | Date: 2024-07-09
4 |
5 | ## Context
6 |
7 | How should the [retries](../scheduling/retries.md) and [repeat](../scheduling/repeat.md) APIs have the common implementation.
8 |
9 | ## Decision
10 |
11 | We're introducing [scheduled](../scheduling/scheduled.md) as a common API for both retries and repeats.
12 |
13 | In addition, `Schedule` trait and its implementations are decoupled from the retry DSL, so that they can be used for repeating as well.
14 | `retry` API remains unchanged, but it now uses `scheduled` underneath.
15 |
16 | Also, `repeat` functions has been added as a sugar for `scheduled` with DSL focused on repeating.
17 |
18 | The main difference between `retry` and `repeat` is about interpretation of the duration provided by the `Schedule` (delay vs interval).
19 |
--------------------------------------------------------------------------------
/doc/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "flake-utils": {
4 | "inputs": {
5 | "systems": "systems"
6 | },
7 | "locked": {
8 | "lastModified": 1710146030,
9 | "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
10 | "owner": "numtide",
11 | "repo": "flake-utils",
12 | "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
13 | "type": "github"
14 | },
15 | "original": {
16 | "owner": "numtide",
17 | "repo": "flake-utils",
18 | "type": "github"
19 | }
20 | },
21 | "nixpkgs": {
22 | "locked": {
23 | "lastModified": 1715534503,
24 | "narHash": "sha256-5ZSVkFadZbFP1THataCaSf0JH2cAH3S29hU9rrxTEqk=",
25 | "owner": "nixos",
26 | "repo": "nixpkgs",
27 | "rev": "2057814051972fa1453ddfb0d98badbea9b83c06",
28 | "type": "github"
29 | },
30 | "original": {
31 | "owner": "nixos",
32 | "ref": "nixos-unstable",
33 | "repo": "nixpkgs",
34 | "type": "github"
35 | }
36 | },
37 | "root": {
38 | "inputs": {
39 | "flake-utils": "flake-utils",
40 | "nixpkgs": "nixpkgs"
41 | }
42 | },
43 | "systems": {
44 | "locked": {
45 | "lastModified": 1681028828,
46 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
47 | "owner": "nix-systems",
48 | "repo": "default",
49 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
50 | "type": "github"
51 | },
52 | "original": {
53 | "owner": "nix-systems",
54 | "repo": "default",
55 | "type": "github"
56 | }
57 | }
58 | },
59 | "root": "root",
60 | "version": 7
61 | }
62 |
--------------------------------------------------------------------------------
/doc/flake.nix:
--------------------------------------------------------------------------------
1 | {
2 | description = "Python shell flake";
3 |
4 | inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
5 | inputs.flake-utils.url = "github:numtide/flake-utils";
6 |
7 | outputs = { self, nixpkgs, flake-utils, ... }:
8 | flake-utils.lib.eachDefaultSystem (system:
9 | let
10 | pkgs = nixpkgs.legacyPackages.${system};
11 | python = pkgs.python3.withPackages (ps: with ps; [
12 | pip
13 | ]);
14 | in
15 | {
16 | devShell = pkgs.mkShell {
17 | buildInputs = [ python ];
18 |
19 | shellHook = ''
20 | # Create a Python virtual environment and activate it
21 | python -m venv .env
22 | source .env/bin/activate
23 | # Install the Python dependencies from requirements.txt
24 | if [ -f requirements.txt ]; then
25 | pip install -r requirements.txt
26 | fi
27 | '';
28 | };
29 | }
30 | );
31 | }
32 |
--------------------------------------------------------------------------------
/doc/high-level-concurrency/collections.md:
--------------------------------------------------------------------------------
1 | # Parallelize collection operations
2 |
3 | Ox contains a number of methods which allow parallelizing operations on collections.
4 |
5 | ## mapPar
6 |
7 | ```scala mdoc:silent
8 | import ox.mapPar
9 |
10 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
11 |
12 | val result: List[Int] = input.mapPar(4)(_ + 1)
13 | // (2, 3, 4, 5, 6, 7, 8, 9, 10)
14 | ```
15 |
16 | If any transformation fails, others are interrupted and `mapPar` rethrows exception that was
17 | thrown by the transformation. Parallelism
18 | limits how many concurrent forks are going to process the collection.
19 |
20 | ## foreachPar
21 |
22 | ```scala mdoc:silent:reset
23 | import ox.foreachPar
24 |
25 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
26 |
27 | input.foreachPar(4)(i => println())
28 | // prints each element of the list, might be in any order
29 | ```
30 |
31 | Similar to `mapPar` but doesn't return anything.
32 |
33 | ## filterPar
34 |
35 | ```scala mdoc:silent:reset
36 | import ox.filterPar
37 |
38 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
39 |
40 | val result:List[Int] = input.filterPar(4)(_ % 2 == 0)
41 | // (2, 4, 6, 8, 10)
42 | ```
43 |
44 | Filters collection in parallel using provided predicate. If any predicate fails, rethrows the exception
45 | and other forks calculating predicates are interrupted.
46 |
47 | ## collectPar
48 |
49 | ```scala mdoc:silent:reset
50 | import ox.collectPar
51 |
52 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
53 |
54 | val result: List[Int] = input.collectPar(4) {
55 | case i if i % 2 == 0 => i + 1
56 | }
57 | // (3, 5, 7, 9, 11)
58 | ```
59 |
60 | Similar to `mapPar` but only applies transformation to elements for which the partial function is defined. Other
61 | elements are skipped.
62 |
--------------------------------------------------------------------------------
/doc/high-level-concurrency/par.md:
--------------------------------------------------------------------------------
1 | # Running computations in parallel
2 |
3 | A number of computations can be ran in parallel using the `par` method, for example:
4 |
5 | ```scala mdoc:compile-only
6 | import ox.{par, sleep}
7 | import scala.concurrent.duration.*
8 |
9 | def computation1: Int =
10 | sleep(2.seconds)
11 | 1
12 |
13 | def computation2: String =
14 | sleep(1.second)
15 | "2"
16 |
17 | val result: (Int, String) = par(computation1, computation2)
18 | // (1, "2")
19 | ```
20 |
21 | If any of the computations fails, the other is interrupted. In such case, `par` waits until both branches complete
22 | and then re-throws the exception.
23 |
24 | It's also possible to run a sequence of computations given as a `Seq[() => T]` in parallel, optionally limiting the
25 | parallelism using `parLimit`:
26 |
27 | ```scala mdoc:compile-only
28 | import ox.{parLimit, sleep}
29 | import scala.concurrent.duration.*
30 |
31 | def computation(n: Int): Int =
32 | sleep(1.second)
33 | println(s"Running $n")
34 | n*2
35 |
36 | val computations = (1 to 20).map(n => () => computation(n))
37 | val result: Seq[Int] = parLimit(5)(computations)
38 | // (1, "2")
39 | ```
40 |
41 | ## Using application errors
42 |
43 | Some values might be considered as application errors. If a computation returns such an error, other computations are
44 | interrupted, same as when an exception is thrown. The error is then returned by the `par` method.
45 |
46 | It's possible to use an arbitrary [error mode](../basics/error-handling.md) by providing it as the initial argument to `par`.
47 | Alternatively, a built-in version using `Either` is available as `parEither`:
48 |
49 | ```scala mdoc:compile-only
50 | import ox.{parEither, sleep}
51 | import scala.concurrent.duration.*
52 |
53 | val result = parEither(
54 | {
55 | sleep(200.millis)
56 | Right("ok")
57 | }, {
58 | sleep(100.millis)
59 | Left(-1)
60 | }
61 | )
62 |
63 | // result is Left(-1), the other branch is interrupted
64 | ```
65 |
--------------------------------------------------------------------------------
/doc/high-level-concurrency/race.md:
--------------------------------------------------------------------------------
1 | # Race two computations
2 |
3 | A number of computations can be raced with each other using the `raceSuccess` method, for example:
4 |
5 | ```scala mdoc:compile-only
6 | import ox.{raceSuccess, sleep}
7 | import scala.concurrent.duration.*
8 |
9 | def computation1: Int =
10 | sleep(2.seconds)
11 | 1
12 |
13 | def computation2: Int =
14 | sleep(1.second)
15 | 2
16 |
17 | val result: Int = raceSuccess(computation1, computation2)
18 | // 2
19 | ```
20 |
21 | The losing computation is interrupted. `raceSuccess` waits until both branches finish; this also applies to the losing one,
22 | which might take a while to clean up after interruption.
23 |
24 | It is also possible to race a sequence of computations, given as `Seq[() => T]`.
25 |
26 | ## Race variants
27 |
28 | * `raceSuccess` returns the first success, or if all fail, re-throws the first exception
29 | * `raceResult` returns the first success, or if any fail, re-throws the first exception (the first computation which finishes in any
30 | way is the "winner")
31 |
32 | ## Using application errors
33 |
34 | Some values might be considered as application errors. If a computation returns such an error, `raceSuccess` continues waiting
35 | if there are other computations in progress, same as when an exception is thrown. Ultimately, if no result is successful,
36 | `raceSuccess` either throws the first exception, or the first application error that has been reported (whichever comes first).
37 |
38 | It's possible to use an arbitrary [error mode](../basics/error-handling.md) by providing it as the initial argument to `raceSuccess`.
39 | Alternatively, a built-in version using `Either` is available as `raceEither`:
40 |
41 | ```scala mdoc:compile-only
42 | import ox.{raceEither, sleep}
43 | import scala.concurrent.duration.*
44 |
45 | raceEither({
46 | sleep(200.millis)
47 | Left(-1)
48 | }, {
49 | sleep(500.millis)
50 | Right("ok")
51 | }, {
52 | sleep(1.second)
53 | Right("also ok")
54 | })
55 | ```
56 |
57 | Here, the example returns `Right("ok")`; the first result is considered an error (a `Left`), and the third computation
58 | is cancelled.
59 |
60 |
--------------------------------------------------------------------------------
/doc/high-level-concurrency/timeout.md:
--------------------------------------------------------------------------------
1 | # Timeout a computation
2 |
3 | ```scala
4 | import ox.timeout
5 | import scala.concurrent.duration.DurationInt
6 |
7 | def computation: Int =
8 | sleep(2.seconds)
9 | 1
10 |
11 | val result1: Try[Int] = Try(timeout(1.second)(computation)) // failure: TimeoutException
12 | val result2: Try[Int] = Try(timeout(3.seconds)(computation)) // success: 1
13 | ```
14 |
15 | A variant, `timeoutOption`, doesn't throw a `TimeoutException` on timeout, but returns `None` instead.
16 |
--------------------------------------------------------------------------------
/doc/info/community-support.md:
--------------------------------------------------------------------------------
1 | # Community & support
2 |
3 | ## Community
4 |
5 | If you'd have feedback, development ideas or critique, please head to our [community forum](https://softwaremill.community/c/ox/12)!
6 | Alternatively, you can create an issue or submit a pull request on [GitHub](https://github.com/softwaremill/ox).
7 |
8 | ## Sponsors
9 |
10 | Development and maintenance of Ox is sponsored by [SoftwareMill](https://softwaremill.com), a software development and consulting company.
11 | We help clients scale their business through software. Our areas of expertise include backends, distributed systems,
12 | machine learning and data analytics.
13 |
14 | [](https://softwaremill.com)
15 |
16 | ## Commercial Support
17 |
18 | We offer commercial support for Ox and related technologies, as well as development services.
19 | [Contact us](https://softwaremill.com/contact/) to learn more about our offer!
--------------------------------------------------------------------------------
/doc/info/dependency.md:
--------------------------------------------------------------------------------
1 | # Dependency (sbt, scala-cli, etc.)
2 |
3 | To use ox core in your project, add:
4 |
5 | ```scala
6 | // sbt dependency
7 | "com.softwaremill.ox" %% "core" % "@VERSION@"
8 |
9 | // scala-cli dependency
10 | //> using dep com.softwaremill.ox::core:@VERSION@
11 | ```
12 |
13 | Ox core depends only on the Java [jox](https://github.com/softwaremill/jox) project, where channels are implemented. There are no other direct or transitive dependencies.
14 |
15 | Integration modules have separate dependencies.
--------------------------------------------------------------------------------
/doc/info/scope.md:
--------------------------------------------------------------------------------
1 | # Project scope
2 |
3 | The areas that we'd like to cover with Ox are:
4 |
5 | * concurrency: developer-friendly structured concurrency, high-level concurrency operators, safe low-level primitives,
6 | communication between concurrently running computations
7 | * error management: retries, timeouts, a safe approach to error propagation, safe resource management
8 | * scheduling & timers
9 | * resiliency: circuit breakers, bulkheads, rate limiters, backpressure
10 |
11 | All of the above should allow for observability of the orchestrated business logic. We aim to enable writing simple,
12 | expression-oriented code in functional style. We'd like to keep the syntax overhead to a minimum, preserving
13 | developer-friendly stack traces, and without compromising performance.
14 |
15 | Some of the above are already addressed in the API, some are coming up in the future. We'd love your help in shaping the
16 | project!
17 |
18 | ## Inspiration & building blocks
19 |
20 | * [Project Loom](https://openjdk.org/projects/loom/) (virtual threads)
21 | * structured concurrency Java APIs ([JEP 505](https://openjdk.org/jeps/505))
22 | * scoped values ([JEP 506](https://openjdk.org/jeps/506))
23 | * fast, scalable [Go](https://golang.org)-like channels using [jox](https://github.com/softwaremill/jox)
24 | * the [Scala 3](https://www.scala-lang.org) programming language
25 |
--------------------------------------------------------------------------------
/doc/integrations/mdc-logback.md:
--------------------------------------------------------------------------------
1 | # Inheritable MDC using Logback
2 |
3 | Dependency:
4 |
5 | ```scala
6 | "com.softwaremill.ox" %% "mdc-logback" % "@VERSION@"
7 | ```
8 |
9 | Ox provides support for setting inheritable MDC (mapped diagnostic context) values, when using the [Logback](https://logback.qos.ch)
10 | logging library. Normally, value set using `MDC.put` aren't inherited across (virtual) threads, which includes forks
11 | created in concurrency contexts.
12 |
13 | Inheritable values are especially useful e.g. when setting a correlation id in an HTTP request interceptor, or at any
14 | entrypoint to the application. Such correlation id values can be then added automatically to each log message, provided
15 | the appropriate log encoder pattern is used.
16 |
17 | To enable using inheritable MDC values, the application's code should call `InheritableMDC.init` as soon as possible.
18 | The best place would be the application's entrypoint (the `main` method).
19 |
20 | Once this is done, inheritable MDC values can be set in a scoped & structured manner using `InheritableMDC.supervisedWhere`
21 | and variants.
22 |
23 | As inheritable MDC values use a [`ForkLocal`](../structured-concurrency/fork-local.md) under the hood, their usage
24 | restrictions apply: outer concurrency scopes should not be used to create forks within the scopes. Only newly created
25 | scopes, or the provided scope can be used to create forks. That's why `supervisedWhere`, `unsupervisedWhere` and
26 | `supervisedErrorWhere` methods are provided.
27 |
28 | "Normal" MDC usage is not affected. That is, values set using `MDC.put` are not inherited, and are only available in
29 | the thread where they are set.
30 |
31 | For example:
32 |
33 | ```scala mdoc:compile-only
34 | import org.slf4j.MDC
35 |
36 | import ox.fork
37 | import ox.logback.InheritableMDC
38 |
39 | InheritableMDC.supervisedWhere("a" -> "1", "b" -> "2") {
40 | MDC.put("c", "3") // not inherited
41 |
42 | fork {
43 | MDC.get("a") // "1"
44 | MDC.get("b") // "2"
45 | MDC.get("c") // null
46 | }.join()
47 |
48 | MDC.get("a") // "1"
49 | MDC.get("b") // "2"
50 | MDC.get("c") // "3"
51 | }
52 | ```
53 |
--------------------------------------------------------------------------------
/doc/integrations/otel-context.md:
--------------------------------------------------------------------------------
1 | # Propagating OpenTelemetry context
2 |
3 | Dependency:
4 |
5 | ```scala
6 | "com.softwaremill.ox" %% "otel-context" % "@VERSION@"
7 | ```
8 |
9 | When using the default OpenTelemetry context-propagation mechanisms, which rely on thread-local storage, the context
10 | will not be propagated across virtual thread boundaries, e.g. when creating new forks as part of
11 | [`supervised`](../structured-concurrency/fork-join.md) scopes. This might lead to spans not being properly correlated
12 | into traces, or metrics without the appropriate context.
13 |
14 | To fix this problem, the context must be propagated whenever a new virtual thread is created. One way to achieve this
15 | is by using a custom thread factory, provided by this module - `PropagatingVirtualThreadFactory`. It can be set
16 | for the whole app when using [`OxApp`](../utils/oxapp.md), or manually through `oxThreadFactory`:
17 |
18 | ```scala mdoc:compile-only
19 | import ox.*
20 | import ox.otel.context.PropagatingVirtualThreadFactory
21 |
22 | object MyApp extends OxApp:
23 | override def settings: OxApp.Settings = OxApp.Settings.Default.copy(
24 | threadFactory = Some(PropagatingVirtualThreadFactory())
25 | )
26 |
27 | def run(args: Vector[String])(using Ox): ExitCode = ExitCode.Success
28 | ```
--------------------------------------------------------------------------------
/doc/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=python -msphinx
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=ox
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed,
20 | echo.then set the SPHINXBUILD environment variable to point to the full
21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the
22 | echo.Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/doc/other/dictionary.md:
--------------------------------------------------------------------------------
1 | # Dictionary
2 |
3 | How we use various terms throughout the codebase and the documentation (or at least try to):
4 |
5 | Scopes:
6 | * **concurrency scope**: either `supervised` (default), `supervisedError` (permitting application errors),
7 | or `unsupervised`
8 | * scope **body**: the code block passed to a concurrency scope (the `supervised`, `supervisedError` or `unsupervised`
9 | method)
10 |
11 | Types of forks:
12 | * supervised / unsupervised
13 | * daemon / user
14 | * optionally, recognizing application errors
15 |
16 | Fork lifecycle:
17 | * within scopes, asynchronously running **forks** can be **started**
18 | * after being started a fork is **running**
19 | * then, forks **complete**: either a fork **succeeds** with a value, or a fork **fails** with an exception
20 | * external **cancellation** (`Fork.cancel()`) interrupts the fork and waits until it completes; interruption uses
21 | JVM's mechanism of injecting an `InterruptedException`
22 | * forks are **supervised** if they are run in a `supervised` scope, and not explicitly unsupervised (that is, started
23 | using `forkUnsupervised` or `forkCancellable`)
24 |
25 | Scope lifecycle:
26 | * a scope **ends**: when unsupervised, the scope's body is entirely evaluated; when supervised, all user (non-daemon) &
27 | supervised forks complete successfully, or at least one user/daemon supervised fork fails, or an application error
28 | is reported. When the scope ends, all forks that are still running are cancelled
29 | * scope **completes**, once all forks complete and finalizers are run; then, the `supervised`, `supervisedError` or
30 | `unsupervised` method returns.
31 |
32 | Errors:
33 | * fork **failure**: when a fork fails with an exception
34 | * **application error**: forks might successfully complete with values which are considered application-level errors;
35 | such values are reported to the enclosing scope and cause the scope to end
36 |
37 | Other:
38 | * **computation combinator**: a method which takes user-provided functions and manages their execution, e.g. using
39 | concurrency, interruption, and appropriately handling errors; examples include `par`, `race`, `retry`, `timeout`
40 |
41 | Channels:
42 | * **values** can be **sent** to a channel, or **received** from a channel
43 |
44 | Flows:
45 | * when **run**, a flow **emits** **elements**
--------------------------------------------------------------------------------
/doc/other/links.md:
--------------------------------------------------------------------------------
1 | # Blogs, videos, ...
2 |
3 | ## Blogs
4 |
5 | * [Prototype Loom-based concurrency API for Scala](https://softwaremill.com/prototype-loom-based-concurrency-api-for-scala/)
6 | * [Go-like channels using project Loom and Scala](https://softwaremill.com/go-like-channels-using-project-loom-and-scala/)
7 | * [Two types of futures](https://softwaremill.com/two-types-of-futures/)
8 | * [Supervision, Kafka and Java 21: what’s new in Ox](https://softwaremill.com/supervision-kafka-and-java-21-whats-new-in-ox/)
9 | * [Designing a (yet another) retry API](https://softwaremill.com/designing-a-yet-another-retry-api/)
10 | * [Handling errors in direct-style Scala](https://softwaremill.com/handling-errors-in-direct-style-scala/)
11 | * [Direct-style concurrent streaming](https://softwaremill.com/direct-style-concurrent-streaming/)
12 |
13 | ## Videos
14 |
15 | Coming up!
--------------------------------------------------------------------------------
/doc/other/performance.md:
--------------------------------------------------------------------------------
1 | # Performance
2 |
3 | Some performance tests have been done around channels, see:
4 |
5 | * [Limits of Loom's performance](https://softwaremill.com/limits-of-looms-performance/)
6 | * [Go-like selects using jox channels in Java](https://softwaremill.com/go-like-selects-using-jox-channels-in-java/)
7 |
--------------------------------------------------------------------------------
/doc/other/stability.md:
--------------------------------------------------------------------------------
1 | # Stability of modules
2 |
3 | The modules are categorized using the following levels:
4 |
5 | * **stable**: binary compatibility is guaranteed within a major version; adheres to semantic versioning
6 | * **stabilizing**: the API is mostly stable, with rare binary-incompatible changes possible in minor releases (only if necessary)
7 | * **experimental**: API can change significantly even in patch releases
8 |
9 | The major version is increased when there are binary-incompatible changes in **stable** modules.
10 |
11 | The minor version is increased when there are significant new features in **stable** modules (keeping compatibility), or binary-incompatible changes in **stabilizing** modules.
12 |
13 | The patch version is increased when there are binary-compatible changes in **stable** / **stabilizing** modules, any changes in **exeperimental** modules, or when a new module is added (e.g. a new integration).
14 |
15 | ## Main modules
16 |
17 | | Module | Level |
18 | |-----------------------|--------------|
19 | | core | stabilizing |
20 | | flow-reactive-streams | stabilizing |
21 | | kafka | experimental |
22 | | mdc-logback | experimental |
23 | | cron | experimental |
24 | | otel-context | experimental |
25 |
--------------------------------------------------------------------------------
/doc/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx_rtd_theme==2.0.0
2 | sphinx==7.3.7
3 | sphinx-autobuild==2024.4.16
4 | myst-parser==2.0.0
5 |
--------------------------------------------------------------------------------
/doc/scheduling/scheduled.md:
--------------------------------------------------------------------------------
1 | # Scheduled
2 |
3 | The `scheduled` functions allow to run an operation according to a given schedule. It is preferred to use `repeat`,
4 | `retry`, or combination of both functions for most use cases, as they provide a more convenient DSL. In fact `retry`
5 | and `repeat` use `scheduled` internally.
6 |
7 | ## Operation definition
8 |
9 | Similarly to the `retry` and `repeat` APIs, the `operation` can be provided:
10 |
11 | * directly using a by-name parameter, i.e. `f: => T`
12 | * using a by-name `Either[E, T]`
13 | * or using an arbitrary [error mode](../basics/error-handling.md), accepting the computation in an `F`
14 | context: `f: => F[T]`.
15 |
16 | ## Configuration
17 |
18 | The `scheduled` config consists of:
19 |
20 | - a `Schedule`, which indicates how many times the `operation` should be run, provides a duration based on which
21 | a sleep is calculated and provides an initial delay if configured.
22 | - a `SleepMode`, which determines how the sleep between subsequent operations should be calculated:
23 | - `Interval` - default for `repeat` operations, where the sleep is calculated as the duration provided by schedule
24 | minus the duration of the last operation (can be negative, in which case the next operation occurs immediately).
25 | - `Delay` - default for `retry` operations, where the sleep is just the duration provided by schedule.
26 | - `afterAttempt` - a callback function that is invoked after each operation and determines if the scheduler loop
27 | should continue. Used for `onRetry`, `shouldContinueOnError`, `shouldContinueOnResult` and adaptive retries in
28 | `retry` API. Defaults to always continuing.
29 |
30 | ## Schedule
31 |
32 | See the [retry](retries.md) documentation for an overview of the available ways to create and modify a `Schedule`.
33 |
34 | ### Testing schedules
35 |
36 | Schedules can be tested by forcing the evaluation of `Schedule.intervals` and inspecting the resulting lazy list
37 | of intervals.
--------------------------------------------------------------------------------
/doc/streaming/backpressure.md:
--------------------------------------------------------------------------------
1 | # Backpressure
2 |
3 | Channels and running flows are back-pressured. The `Channel.send` operation is blocking until there's a receiver thread available, or if there's enough space in the buffer. The processing space is hence bound by the total size of channel buffers.
4 |
--------------------------------------------------------------------------------
/doc/streaming/errors.md:
--------------------------------------------------------------------------------
1 | # Error propagation
2 |
3 | Errors are only propagated downstream, ultimately reaching the point where the flow is run / source is discharged. This leads to an exception being thrown there.
4 |
5 | When running flows, any [scopes](../structured-concurrency/fork-join.md) started as part of executing the flow's stages should have completed, before the exception is re-thrown by the `run...` method.
6 |
7 | For channel-transforming operations, once the exception reaches the enclosing scope, any forks should become interrupted, including any that are still running and are handling the upstream processing stages.
8 |
9 | The approach we decided to take (only propagating errors downstream) is one of the two possible designs - with the other being re-throwing an exception when it's encountered. Please see [the respective ADR](../adr/0001-error-propagation-in-channels.md) for a discussion.
10 |
--------------------------------------------------------------------------------
/doc/structured-concurrency/fork-local.md:
--------------------------------------------------------------------------------
1 | # Fork locals
2 |
3 | `ForkLocal`s replace usages of `ThreadLocal` when using Ox's forks and structural concurrency. They are useful to
4 | propagate auxiliary context, e.g. trace or correlation ids.
5 |
6 | A fork local needs to be first created with a default value. Then, its value can be set within a new [scope](fork-join.md).
7 | Usually, a new supervised scope is created, within which the `ForkLocal` is set to the given value - but only within that
8 | scope, as long as it's not completed. Hence, values are bound structurally:
9 |
10 | ```scala mdoc:compile-only
11 | import ox.{ForkLocal, fork, supervised}
12 |
13 | val v = ForkLocal("a")
14 | supervised {
15 | println(v.get()) // "a"
16 | fork {
17 | v.supervisedWhere("x") {
18 | println(v.get()) // "x"
19 | fork {
20 | println(v.get()) // "x"
21 | }.join()
22 | }
23 | }.join()
24 | println(v.get()) // "a"
25 | }
26 | ```
27 |
28 | Scoped values propagate across nested scopes.
29 |
30 | ```{note}
31 | Due to the "structured" nature of setting a fork local's value, forks using external (wider) scopes should not be
32 | created within a block where a fork local is set. An attempt to do so will throw a
33 | `java.util.concurrent.StructureViolationException`.
34 | ```
35 |
36 | ## Creating helper functions which set fork locals
37 |
38 | If you're writing a helper function which sets a value of a fork local within a passed code block, you have to make
39 | sure that the code block doesn't accidentally capture the outer concurrency scope (leading to an exception on the
40 | first `fork`).
41 |
42 | This can be done by capturing the code block as a context function `Ox ?=> T`, so that any nested invocations of `fork`
43 | will use the provided instance, not the outer one. E.g.:
44 |
45 | ```scala
46 | def withSpan[T](spanName: String)(f: Ox ?=> T): T =
47 | val span = spanBuilder.startSpan(spanName)
48 | currentSpan.supervisedWhere(Some(span)) {
49 | try f
50 | finally span.end()
51 | }
52 | ```
53 |
54 | ## Implementation notes
55 |
56 | `ForkLocal`s are based on an immutable map passed via a `ThreadLocal`, when a fork is started or a value set. The
57 | implementation will instead rely on `ScopedValue`s, which are part of [JEP 506](https://openjdk.org/jeps/506), when
58 | both scoped values and structured concurrency will be available as stable features in an LTS Java release.
--------------------------------------------------------------------------------
/doc/structured-concurrency/index.md:
--------------------------------------------------------------------------------
1 | # What is structured concurrency?
2 |
3 | Structured concurrency is an approach where the lifetime of a thread is determined by the syntactic structure of the
4 | code.
5 |
6 | First introduced by [Martin Sústrik](https://250bpm.com/blog:71/) and later popularized by
7 | [Nathaniel J. Smith](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/),
8 | structured concurrency made its way into Python, Kotlin, Java and now Scala.
9 |
10 | The basic concept in structured concurrency are scopes, within which concurrently running threads of execution can be
11 | started. The scope only finishes once all threads started within finish (either successfully, or with an error). Thus,
12 | it isn't possible to "leak" threads outside of a method. Threads become more a method's implementation detail, rather
13 | than an effect.
14 |
15 | These characteristics make structured concurrency an ideal candidate to make concurrency safer in direct-style
16 | programming, while keeping blocking-like method calls. Structured concurrency enables local reasoning on the threading
17 | effects, which is also one of the prime tenets of functional programming!
18 |
19 | Ox extends the structured concurrency concepts with various forms of error handling, described in the following sections.
20 |
--------------------------------------------------------------------------------
/doc/structured-concurrency/interruptions.md:
--------------------------------------------------------------------------------
1 | # Interruptions
2 |
3 | When catching exceptions, care must be taken not to catch & fail to propagate an `InterruptedException`. Doing so will
4 | prevent the scope cleanup mechanisms to make appropriate progress, as the scope won't finish until all started threads
5 | complete.
6 |
7 | A good solution is to catch only non-fatal exception using `NonFatal`, e.g.:
8 |
9 | ```scala mdoc:compile-only
10 | import ox.{forever, fork, supervised}
11 |
12 | import org.slf4j.LoggerFactory
13 | import scala.util.control.NonFatal
14 |
15 | val logger = LoggerFactory.getLogger(this.getClass)
16 | def processSingleItem(): Unit = ()
17 |
18 | supervised {
19 | fork {
20 | forever {
21 | try processSingleItem()
22 | catch case NonFatal(e) => logger.error("Processing error", e)
23 | }
24 | }
25 |
26 | // do something else
27 | }
28 | ```
29 |
--------------------------------------------------------------------------------
/doc/utils/control-flow.md:
--------------------------------------------------------------------------------
1 | # Control flow methods
2 |
3 | There are some helper methods which might be useful when writing code using ox's concurrency operators:
4 |
5 | * `forever { ... }` repeatedly evaluates the given code block forever
6 | * `repeatWhile { ... }` repeatedly evaluates the given code block, as long as it returns `true`
7 | * `repeatUntil { ... }` repeatedly evaluates the given code block, until it returns `true`
8 | * `never` blocks the current thread indefinitely, until it is interrupted
9 | * `checkInterrupt()` checks if the current thread is interrupted, and if so, throws an `InterruptedException`. Useful in
10 | compute-intensive code, which wants to cooperate in the cancellation protocol
11 |
12 | All of these are `inline` methods, imposing no runtime overhead.
13 |
--------------------------------------------------------------------------------
/doc/utils/resources.md:
--------------------------------------------------------------------------------
1 | # Resources
2 |
3 | ## Single scoped resource
4 |
5 | Ox provides convenience inline methods to allocate, use and (uninterruptibly) release resources with a try-finally
6 | block: `use` and `useCloseable`. For example:
7 |
8 | ```scala mdoc:compile-only
9 | import ox.useCloseable
10 |
11 | useCloseable(new java.io.PrintWriter("test.txt")) { writer =>
12 | writer.println("Hello, world!")
13 | }
14 | ```
15 |
16 | If a concurrency scope is available (e.g. `supervised`), or there are multiple resources to allocate, consider using the
17 | approach described below, to avoid creating an additional syntactical scope.
18 |
19 | Alternatively, you can use `useInterruptibly`, where the releasing might be interrupted, and which is equivalent to a
20 | `try`-`finally` block.
21 |
22 | ```{warning}
23 | To properly release resources when the entire application is interrupted, make sure to use [`OxApp`](oxapp.md) as the
24 | application's main entry point.
25 | ```
26 |
27 | ## Within a concurrency scope
28 |
29 | Resources can be allocated within a concurrency scope. They will be released in reverse acquisition order, after all
30 | forks started within the scope finish (but before the scope completes). E.g.:
31 |
32 | ```scala mdoc:compile-only
33 | import ox.{supervised, useInScope}
34 |
35 | case class MyResource(c: Int)
36 |
37 | def acquire(c: Int): MyResource =
38 | println(s"acquiring $c ...")
39 | MyResource(c)
40 |
41 | def release(resource: MyResource): Unit =
42 | println(s"releasing ${resource.c} ...")
43 |
44 | supervised {
45 | val resource1 = useInScope(acquire(10))(release)
46 | val resource2 = useInScope(acquire(20))(release)
47 | println(s"Using $resource1 ...")
48 | println(s"Using $resource2 ...")
49 | }
50 | ```
51 |
52 | ### Release-only
53 |
54 | You can also register resources to be released (without acquisition logic), before the scope completes:
55 |
56 | ```scala mdoc:compile-only
57 | import ox.{supervised, releaseAfterScope}
58 |
59 | case class MyResource(c: Int)
60 |
61 | def release(resource: MyResource): Unit =
62 | println(s"releasing ${resource.c} ...")
63 |
64 | supervised {
65 | val resource1 = MyResource(10)
66 | releaseAfterScope(release(resource1))
67 | println(s"Using $resource1 ...")
68 | }
69 | ```
70 |
--------------------------------------------------------------------------------
/doc/utils/utility.md:
--------------------------------------------------------------------------------
1 | # Utilities
2 |
3 | In addition to concurrency, error handling and resiliency features, Ox includes some utility methods, which make writing
4 | direct-style Scala code more convenient. When possible, these are `inline` methods taking `inline` parameters, hence
5 | incurring no runtime overhead.
6 |
7 | Top-level methods:
8 |
9 | * `uninterruptible { ... }` evaluates the given code block making sure it can't be interrupted
10 | * `sleep(scala.concurrent.Duration)` blocks the current thread/fork for the given duration; same as `Thread.sleep`, but
11 | using's Scala's `Duration`
12 |
13 | Extension functions on arbitrary expressions:
14 |
15 | * `.discard` extension method evaluates the given code block and discards its result, avoiding "discarded non-unit
16 | value" warnings
17 | * `.pipe(f)` applies `f` to the value of the expression and returns the result; useful for chaining operations
18 | * `.tap(f)` applies `f` to the value of the expression and returns the original value; useful for side-effecting
19 | operations
20 | * `.tapException(Throwable => Unit)` and `.tapNonFatalException(Throwable => Unit)` allow running the provided
21 | side-effecting callback when the expression throws an exception
22 |
23 | Extension functions on `scala.concurrent.Future[T]`:
24 |
25 | * `.get(): T` blocks the current thread/fork until the future completes; returns the successful value of the future, or
26 | throws the exception, with which it failed
27 |
28 |
--------------------------------------------------------------------------------
/doc/watch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sphinx-autobuild . _build/html
3 |
--------------------------------------------------------------------------------
/examples/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS}%boldYellow(%replace( [%X{cid}] ){' \[\] ', ' '})[%thread] %-5level %logger{5} - %msg%n%rEx
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/examples/src/test/scala/ox/crawler/Http.scala:
--------------------------------------------------------------------------------
1 | package ox.crawler
2 |
3 | type Host = String
4 | case class Url(host: Host, path: String)
5 |
6 | trait Http:
7 | def get(url: Url): String
8 |
--------------------------------------------------------------------------------
/examples/src/test/scala/ox/crawler/test/CrawlerTest.scala:
--------------------------------------------------------------------------------
1 | package ox.crawler.test
2 |
3 | import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import org.scalatest.time.{Millis, Seconds, Span}
7 | import ox.discard
8 | import ox.crawler.Crawler
9 |
10 | class CrawlerTest extends AnyFlatSpec with Matchers with CrawlerTestData with ScalaFutures with IntegrationPatience:
11 |
12 | override implicit val patienceConfig: PatienceConfig =
13 | PatienceConfig(
14 | timeout = scaled(Span(60, Seconds)),
15 | interval = scaled(Span(150, Millis))
16 | )
17 |
18 | for testData <- testDataSets do
19 | it should s"crawl a test data set ${testData.name}" in {
20 | import testData.*
21 |
22 | val t = timed {
23 | (Crawler.crawl(startingUrl, url => http(url), parseLinks) should be(expectedCounts)).discard
24 | }
25 |
26 | shouldTakeMillisMin.foreach(m => t should be >= (m))
27 | shouldTakeMillisMax.foreach(m => t should be <= (m))
28 | }
29 | end for
30 | end CrawlerTest
31 |
--------------------------------------------------------------------------------
/examples/src/test/scala/ox/main.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | import org.slf4j.LoggerFactory
4 |
5 | import scala.concurrent.duration.*
6 |
7 | @main def test1 =
8 | val log = LoggerFactory.getLogger("test1")
9 | val r = supervised {
10 | val f1 = fork {
11 | sleep(1.second)
12 | log.info("f1 done")
13 | 5
14 | }
15 | val f2 = fork {
16 | sleep(2.seconds)
17 | log.info("f2 done")
18 | 6
19 | }
20 | f1.join() + f2.join()
21 | }
22 | log.info("result: " + r)
23 | end test1
24 |
--------------------------------------------------------------------------------
/examples/src/test/scala/ox/ratelimiter/test/RateLimiterTest.scala:
--------------------------------------------------------------------------------
1 | package ox.ratelimiter.test
2 |
3 | import org.scalatest.concurrent.{Eventually, IntegrationPatience}
4 | import org.scalatest.flatspec.AnyFlatSpec
5 | import org.scalatest.matchers.should.Matchers
6 | import ox.ratelimiter.RateLimiter
7 |
8 | import java.time.LocalTime
9 | import java.util.concurrent.atomic.AtomicReference
10 | import scala.concurrent.duration.*
11 |
12 | class RateLimiterTest extends AnyFlatSpec with Matchers with Eventually with IntegrationPatience:
13 | it should "rate limit futures scheduled upfront" in {
14 | RateLimiter.withRateLimiter(2, 1.second) { rateLimiter =>
15 | val complete = new AtomicReference(Vector.empty[Int])
16 | for i <- 1 to 7 do
17 | rateLimiter.runLimited {
18 | println(s"${LocalTime.now()} Running $i")
19 | complete.updateAndGet(_ :+ i)
20 | }
21 |
22 | eventually {
23 | complete.get() should have size (7)
24 | complete.get().slice(0, 2).toSet should be(Set(1, 2))
25 | complete.get().slice(2, 4).toSet should be(Set(3, 4))
26 | complete.get().slice(4, 6).toSet should be(Set(5, 6))
27 | complete.get().slice(6, 7).toSet should be(Set(7))
28 | }
29 | }
30 | }
31 | end RateLimiterTest
32 |
--------------------------------------------------------------------------------
/examples/src/test/scala/ox/sockets/socket.scala:
--------------------------------------------------------------------------------
1 | package ox.sockets
2 |
3 | trait Socket:
4 | def accept(timeout: Long): ConnectedSocket
5 |
6 | trait ConnectedSocket:
7 | def send(msg: String): Unit
8 | def receive(timeout: Long): String
9 |
10 | class SocketTerminatedException extends Exception
11 |
--------------------------------------------------------------------------------
/examples/src/test/scala/ox/supervise/model.scala:
--------------------------------------------------------------------------------
1 | package ox.supervise
2 |
3 | trait RemoteQueue:
4 | def read(): String
5 | def close(): Unit
6 |
7 | trait QueueConnector:
8 | def connect: RemoteQueue
9 |
--------------------------------------------------------------------------------
/flow-reactive-streams/src/main/scala/ox/flow/reactive/flowReactiveStreamsExtension.scala:
--------------------------------------------------------------------------------
1 | package ox.flow.reactive
2 |
3 | import ox.flow.Flow
4 | import org.reactivestreams.Publisher
5 | import ox.Ox
6 | import ox.channels.BufferCapacity
7 | import org.reactivestreams.FlowAdapters
8 |
9 | extension [A](flow: Flow[A])
10 | /** This variant returns an implementation of `org.reactivestreams.Publisher`, as opposed to `java.util.concurrent.Flow.Publisher` which
11 | * is supported in the core module.
12 | *
13 | * @see
14 | * [[Flow.toPublisher]]
15 | */
16 | def toReactiveStreamsPublisher(using Ox, BufferCapacity): Publisher[A] =
17 | FlowAdapters.toPublisher(flow.toPublisher)
18 | end extension
19 |
20 | object FlowReactiveStreams:
21 | /** This variant returns accepts an implementation of `org.reactivestreams.Publisher`, as opposed to `java.util.concurrent.Flow.Publisher`
22 | * which is supported in the core module.
23 | *
24 | * @see
25 | * [[Flow.fromPublisher]]
26 | */
27 | def fromPublisher[T](p: Publisher[T])(using BufferCapacity): Flow[T] = Flow.fromPublisher(FlowAdapters.toFlowPublisher(p))
28 | end FlowReactiveStreams
29 |
--------------------------------------------------------------------------------
/generated-doc/out/.gitignore:
--------------------------------------------------------------------------------
1 | _build
2 | _build_html
--------------------------------------------------------------------------------
/generated-doc/out/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/generated-doc/out/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = ox
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/generated-doc/out/adr/0002-retries.md:
--------------------------------------------------------------------------------
1 | # 2. Retries
2 |
3 | Date: 2023-11-30
4 |
5 | ## Context
6 |
7 | How should the [retries API](../scheduling/retries.md) be implemented in terms of:
8 | - developer friendliness,
9 | - supported ways of representing the operation under retry,
10 | - possibly infinite retries.
11 |
12 | ## Decision
13 |
14 | We're using a single, unified syntax to retry and operation:
15 | ```scala
16 | retry(operation)(policy)
17 | ```
18 | so that the developers don't need to wonder which variant to use.
19 |
20 | The operation can be a function returning a result directly, or wrapped in a `Try` or `Either`. Therefore, there are three overloaded variants of the `retry` function.
21 |
22 | For possibly infinite retries, we're using tail recursion to be stack safe. This comes at a cost of some code duplication in the retry logic, but is still more readable and easier to follow that a `while` loop with `var`s for passing the state.
23 |
--------------------------------------------------------------------------------
/generated-doc/out/adr/0003-why-source-operators-do-not-throw.md:
--------------------------------------------------------------------------------
1 | # 3. Why source operators do not throw
2 |
3 | Date: 2024-01-25
4 |
5 | ## Context
6 |
7 | Revisiting ADR #1, what should happen when an error is encountered when processing channel elements? Should it be propagated downstream or re-thrown?
8 |
9 | ## Decision
10 |
11 | In addition to what's mentioned in ADR #1, operators don't throw, but propagate, because we want to allow throw-free coding style. When errors are propagated, on error every daemon operator thread shuts down, and we end the scope gracefully.
12 |
13 | Additionally, we assume that data only flows downstream - and this includes errors.
14 |
--------------------------------------------------------------------------------
/generated-doc/out/adr/0004-channels-safe-unsafe-operations.md:
--------------------------------------------------------------------------------
1 | # 4. Channels: safe/unsafe Operations
2 |
3 | Date: 2024-02-28
4 |
5 | ## Context
6 |
7 | Channel operations such as `send`, `receive`, `select`, `close` etc. might fail because a channel is closed. How should
8 | this be signalled to the user?
9 |
10 | ## Decision
11 |
12 | We decided to have two variants of the methods:
13 |
14 | * default: `send`, `receive` etc., which throw an exception, when the channel is closed
15 | * safe: `sendSafe`, `receiveSafe` etc., which return a `ChannelClosed` value, when the channel is closed
16 |
17 | The "safe" variants are more performant: no stack trace is created, when the channel is closed. They are used by all
18 | channel combinators (such as `map`, `filter` etc.), to detect and propagate the errors downstream.
19 |
20 | ### Why not `Either` or `Try`?
21 |
22 | To avoid allocations on each operation (e.g. receive). Channels might be on the "hot path" and they might be important
23 | for performance. Union types provide a nice alternative here.
24 |
25 | Even with `Either`, though, if e.g. `send` had a signature `Either[ChannelClosed, Unit]`, discarding the result would
26 | at most be a warning (not in all cases), so potentially an error might go unnoticed.
27 |
28 | ### Why is the default to throw?
29 |
30 | Let's consider `send`. If the default would be `send(t: T): ChannelClosed | Unit`, with an additional exception-throwing
31 | variant `sendUnsafe(t: T): Unit`, then the API would be quite surprising.
32 |
33 | Coming to the library as a new user, they could just call send / receive. The compiler might warn them in some cases
34 | that they discard the non-unit result of `send`, but (a) would they pay attention to those warnings, and (b) would they
35 | get them in the first place (this type of compiler warning isn't detected in 100% o cases).
36 |
37 | In other words - it would be quite easy to mistakenly discard the results of `send`, so a default which guards against
38 | that (by throwing exceptions) is better, and the "safe" can always be used intentionally version if that's what's
39 | needed.
40 |
41 | ### Update 17/04/2024
42 |
43 | The `...Safe` operations got renamed to `...OrClosed` or `...OrError`, as they can still throw `InterruptedException`s.
44 |
--------------------------------------------------------------------------------
/generated-doc/out/adr/0005-application-errors.md:
--------------------------------------------------------------------------------
1 | # 5. Application errors
2 |
3 | Date: 2024-03-05
4 |
5 | ## Context
6 |
7 | In some cases, it's useful to treat some return values as errors, which should cause the enclosing scope to end.
8 |
9 | ## Decision
10 |
11 | For computation combinators, which include `par`, `race` and `supervised`, we decided to introduce the concept of
12 | application errors. These are values of a shape defined by an `ErrorMode`, which are specially treated by Ox - if
13 | such a value represents an error, the enclosing scope ends.
14 |
15 | Some design limitations include:
16 |
17 | * we want normal scopes to remain unchanged
18 | * methods requiring a concurrency scope (that is, `using Ox`) should be callable from the new scope
19 | * all forks that might report application errors, must be constrained to return the same type of application errors
20 | * computation combinators, such as `par`, should have a single implmentation both when using application errors and
21 | exceptions only
22 |
23 | Taking this into account, we separate the `Ox` capability, which allows starting forks, and `OxError`, which
24 | additionally allows reporting application errors. An inheritance hierarchy, `OxError <: Ox` ensures that we can call
25 | methods requiring the `Ox` capability if `OxError` is available, but not the other way round.
26 |
27 | Finally, introducing a special `forkError` method allows us to require that it is run within a `supervisedError` scope
28 | and that it must return a value of the correct shape.
29 |
--------------------------------------------------------------------------------
/generated-doc/out/adr/0007-supervised-unsupervised-scopes.md:
--------------------------------------------------------------------------------
1 | # 7. Supervised & unsupervised scopes
2 |
3 | Date: 2024-04-17
4 |
5 | ## Context
6 |
7 | Originally, Ox had only `scoped` which created non-supervised scopes, that is errors were only discovered via explicit
8 | joining. This was later changed by introducing `supervised` and `unsupervised` scopes, where the former would end
9 | immediately when any fork failed, and the latter would not. However, `supervised` scopes have an overhead: they create
10 | an additional fork, in which the scope's main body is run. Is it possible to avoid this extra fork?
11 |
12 | ## Decision
13 |
14 | In short: no.
15 |
16 | An alternate design would be to store the thread, that created the scope as part of the supervisor, and when any
17 | exception occurs, interrupt that thread so that it would discover the exception. However, this would be prone to
18 | interruption-races with external interruptions of that main thread. Even if we included an additional flag, specifying
19 | if the interruption happened because the scope ends, it would still be possible for an external interrupt to go
20 | unnoticed (if it happened at the same time, as the internal one). Even though unlikely, such a design would be fragile,
21 | hence we are keeping the current implementation.
22 |
23 | ## Consequences
24 |
25 | To make our design more type-safe, we split the `Ox` capability into `OxUnsupervised` (allowing only unsupervised
26 | forks), and `Ox`.
27 |
--------------------------------------------------------------------------------
/generated-doc/out/adr/0008-scheduled-repeat-retry.md:
--------------------------------------------------------------------------------
1 | # 8. Retries
2 |
3 | Date: 2024-07-09
4 |
5 | ## Context
6 |
7 | How should the [retries](../scheduling/retries.md) and [repeat](../scheduling/repeat.md) APIs have the common implementation.
8 |
9 | ## Decision
10 |
11 | We're introducing [scheduled](../scheduling/scheduled.md) as a common API for both retries and repeats.
12 |
13 | In addition, `Schedule` trait and its implementations are decoupled from the retry DSL, so that they can be used for repeating as well.
14 | `retry` API remains unchanged, but it now uses `scheduled` underneath.
15 |
16 | Also, `repeat` functions has been added as a sugar for `scheduled` with DSL focused on repeating.
17 |
18 | The main difference between `retry` and `repeat` is about interpretation of the duration provided by the `Schedule` (delay vs interval).
19 |
--------------------------------------------------------------------------------
/generated-doc/out/flake.lock:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "flake-utils": {
4 | "inputs": {
5 | "systems": "systems"
6 | },
7 | "locked": {
8 | "lastModified": 1710146030,
9 | "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
10 | "owner": "numtide",
11 | "repo": "flake-utils",
12 | "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
13 | "type": "github"
14 | },
15 | "original": {
16 | "owner": "numtide",
17 | "repo": "flake-utils",
18 | "type": "github"
19 | }
20 | },
21 | "nixpkgs": {
22 | "locked": {
23 | "lastModified": 1715534503,
24 | "narHash": "sha256-5ZSVkFadZbFP1THataCaSf0JH2cAH3S29hU9rrxTEqk=",
25 | "owner": "nixos",
26 | "repo": "nixpkgs",
27 | "rev": "2057814051972fa1453ddfb0d98badbea9b83c06",
28 | "type": "github"
29 | },
30 | "original": {
31 | "owner": "nixos",
32 | "ref": "nixos-unstable",
33 | "repo": "nixpkgs",
34 | "type": "github"
35 | }
36 | },
37 | "root": {
38 | "inputs": {
39 | "flake-utils": "flake-utils",
40 | "nixpkgs": "nixpkgs"
41 | }
42 | },
43 | "systems": {
44 | "locked": {
45 | "lastModified": 1681028828,
46 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
47 | "owner": "nix-systems",
48 | "repo": "default",
49 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
50 | "type": "github"
51 | },
52 | "original": {
53 | "owner": "nix-systems",
54 | "repo": "default",
55 | "type": "github"
56 | }
57 | }
58 | },
59 | "root": "root",
60 | "version": 7
61 | }
62 |
--------------------------------------------------------------------------------
/generated-doc/out/flake.nix:
--------------------------------------------------------------------------------
1 | {
2 | description = "Python shell flake";
3 |
4 | inputs.nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
5 | inputs.flake-utils.url = "github:numtide/flake-utils";
6 |
7 | outputs = { self, nixpkgs, flake-utils, ... }:
8 | flake-utils.lib.eachDefaultSystem (system:
9 | let
10 | pkgs = nixpkgs.legacyPackages.${system};
11 | python = pkgs.python3.withPackages (ps: with ps; [
12 | pip
13 | ]);
14 | in
15 | {
16 | devShell = pkgs.mkShell {
17 | buildInputs = [ python ];
18 |
19 | shellHook = ''
20 | # Create a Python virtual environment and activate it
21 | python -m venv .env
22 | source .env/bin/activate
23 | # Install the Python dependencies from requirements.txt
24 | if [ -f requirements.txt ]; then
25 | pip install -r requirements.txt
26 | fi
27 | '';
28 | };
29 | }
30 | );
31 | }
32 |
--------------------------------------------------------------------------------
/generated-doc/out/high-level-concurrency/collections.md:
--------------------------------------------------------------------------------
1 | # Parallelize collection operations
2 |
3 | Ox contains a number of methods which allow parallelizing operations on collections.
4 |
5 | ## mapPar
6 |
7 | ```scala
8 | import ox.mapPar
9 |
10 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
11 |
12 | val result: List[Int] = input.mapPar(4)(_ + 1)
13 | // (2, 3, 4, 5, 6, 7, 8, 9, 10)
14 | ```
15 |
16 | If any transformation fails, others are interrupted and `mapPar` rethrows exception that was
17 | thrown by the transformation. Parallelism
18 | limits how many concurrent forks are going to process the collection.
19 |
20 | ## foreachPar
21 |
22 | ```scala
23 | import ox.foreachPar
24 |
25 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
26 |
27 | input.foreachPar(4)(i => println())
28 | // prints each element of the list, might be in any order
29 | ```
30 |
31 | Similar to `mapPar` but doesn't return anything.
32 |
33 | ## filterPar
34 |
35 | ```scala
36 | import ox.filterPar
37 |
38 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
39 |
40 | val result:List[Int] = input.filterPar(4)(_ % 2 == 0)
41 | // (2, 4, 6, 8, 10)
42 | ```
43 |
44 | Filters collection in parallel using provided predicate. If any predicate fails, rethrows the exception
45 | and other forks calculating predicates are interrupted.
46 |
47 | ## collectPar
48 |
49 | ```scala
50 | import ox.collectPar
51 |
52 | val input: List[Int] = List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
53 |
54 | val result: List[Int] = input.collectPar(4) {
55 | case i if i % 2 == 0 => i + 1
56 | }
57 | // (3, 5, 7, 9, 11)
58 | ```
59 |
60 | Similar to `mapPar` but only applies transformation to elements for which the partial function is defined. Other
61 | elements are skipped.
62 |
--------------------------------------------------------------------------------
/generated-doc/out/high-level-concurrency/par.md:
--------------------------------------------------------------------------------
1 | # Running computations in parallel
2 |
3 | A number of computations can be ran in parallel using the `par` method, for example:
4 |
5 | ```scala
6 | import ox.{par, sleep}
7 | import scala.concurrent.duration.*
8 |
9 | def computation1: Int =
10 | sleep(2.seconds)
11 | 1
12 |
13 | def computation2: String =
14 | sleep(1.second)
15 | "2"
16 |
17 | val result: (Int, String) = par(computation1, computation2)
18 | // (1, "2")
19 | ```
20 |
21 | If any of the computations fails, the other is interrupted. In such case, `par` waits until both branches complete
22 | and then re-throws the exception.
23 |
24 | It's also possible to run a sequence of computations given as a `Seq[() => T]` in parallel, optionally limiting the
25 | parallelism using `parLimit`:
26 |
27 | ```scala
28 | import ox.{parLimit, sleep}
29 | import scala.concurrent.duration.*
30 |
31 | def computation(n: Int): Int =
32 | sleep(1.second)
33 | println(s"Running $n")
34 | n*2
35 |
36 | val computations = (1 to 20).map(n => () => computation(n))
37 | val result: Seq[Int] = parLimit(5)(computations)
38 | // (1, "2")
39 | ```
40 |
41 | ## Using application errors
42 |
43 | Some values might be considered as application errors. If a computation returns such an error, other computations are
44 | interrupted, same as when an exception is thrown. The error is then returned by the `par` method.
45 |
46 | It's possible to use an arbitrary [error mode](../basics/error-handling.md) by providing it as the initial argument to `par`.
47 | Alternatively, a built-in version using `Either` is available as `parEither`:
48 |
49 | ```scala
50 | import ox.{parEither, sleep}
51 | import scala.concurrent.duration.*
52 |
53 | val result = parEither(
54 | {
55 | sleep(200.millis)
56 | Right("ok")
57 | }, {
58 | sleep(100.millis)
59 | Left(-1)
60 | }
61 | )
62 |
63 | // result is Left(-1), the other branch is interrupted
64 | ```
65 |
--------------------------------------------------------------------------------
/generated-doc/out/high-level-concurrency/race.md:
--------------------------------------------------------------------------------
1 | # Race two computations
2 |
3 | A number of computations can be raced with each other using the `raceSuccess` method, for example:
4 |
5 | ```scala
6 | import ox.{raceSuccess, sleep}
7 | import scala.concurrent.duration.*
8 |
9 | def computation1: Int =
10 | sleep(2.seconds)
11 | 1
12 |
13 | def computation2: Int =
14 | sleep(1.second)
15 | 2
16 |
17 | val result: Int = raceSuccess(computation1, computation2)
18 | // 2
19 | ```
20 |
21 | The losing computation is interrupted. `raceSuccess` waits until both branches finish; this also applies to the losing one,
22 | which might take a while to clean up after interruption.
23 |
24 | It is also possible to race a sequence of computations, given as `Seq[() => T]`.
25 |
26 | ## Race variants
27 |
28 | * `raceSuccess` returns the first success, or if all fail, re-throws the first exception
29 | * `raceResult` returns the first success, or if any fail, re-throws the first exception (the first computation which finishes in any
30 | way is the "winner")
31 |
32 | ## Using application errors
33 |
34 | Some values might be considered as application errors. If a computation returns such an error, `raceSuccess` continues waiting
35 | if there are other computations in progress, same as when an exception is thrown. Ultimately, if no result is successful,
36 | `raceSuccess` either throws the first exception, or the first application error that has been reported (whichever comes first).
37 |
38 | It's possible to use an arbitrary [error mode](../basics/error-handling.md) by providing it as the initial argument to `raceSuccess`.
39 | Alternatively, a built-in version using `Either` is available as `raceEither`:
40 |
41 | ```scala
42 | import ox.{raceEither, sleep}
43 | import scala.concurrent.duration.*
44 |
45 | raceEither({
46 | sleep(200.millis)
47 | Left(-1)
48 | }, {
49 | sleep(500.millis)
50 | Right("ok")
51 | }, {
52 | sleep(1.second)
53 | Right("also ok")
54 | })
55 | ```
56 |
57 | Here, the example returns `Right("ok")`; the first result is considered an error (a `Left`), and the third computation
58 | is cancelled.
59 |
60 |
--------------------------------------------------------------------------------
/generated-doc/out/high-level-concurrency/timeout.md:
--------------------------------------------------------------------------------
1 | # Timeout a computation
2 |
3 | ```scala
4 | import ox.timeout
5 | import scala.concurrent.duration.DurationInt
6 |
7 | def computation: Int =
8 | sleep(2.seconds)
9 | 1
10 |
11 | val result1: Try[Int] = Try(timeout(1.second)(computation)) // failure: TimeoutException
12 | val result2: Try[Int] = Try(timeout(3.seconds)(computation)) // success: 1
13 | ```
14 |
15 | A variant, `timeoutOption`, doesn't throw a `TimeoutException` on timeout, but returns `None` instead.
16 |
--------------------------------------------------------------------------------
/generated-doc/out/info/community-support.md:
--------------------------------------------------------------------------------
1 | # Community & support
2 |
3 | ## Community
4 |
5 | If you'd have feedback, development ideas or critique, please head to our [community forum](https://softwaremill.community/c/ox/12)!
6 | Alternatively, you can create an issue or submit a pull request on [GitHub](https://github.com/softwaremill/ox).
7 |
8 | ## Sponsors
9 |
10 | Development and maintenance of Ox is sponsored by [SoftwareMill](https://softwaremill.com), a software development and consulting company.
11 | We help clients scale their business through software. Our areas of expertise include backends, distributed systems,
12 | machine learning and data analytics.
13 |
14 | [](https://softwaremill.com)
15 |
16 | ## Commercial Support
17 |
18 | We offer commercial support for Ox and related technologies, as well as development services.
19 | [Contact us](https://softwaremill.com/contact/) to learn more about our offer!
--------------------------------------------------------------------------------
/generated-doc/out/info/dependency.md:
--------------------------------------------------------------------------------
1 | # Dependency (sbt, scala-cli, etc.)
2 |
3 | To use ox core in your project, add:
4 |
5 | ```scala
6 | // sbt dependency
7 | "com.softwaremill.ox" %% "core" % "0.6.0"
8 |
9 | // scala-cli dependency
10 | //> using dep com.softwaremill.ox::core:0.6.0
11 | ```
12 |
13 | Ox core depends only on the Java [jox](https://github.com/softwaremill/jox) project, where channels are implemented. There are no other direct or transitive dependencies.
14 |
15 | Integration modules have separate dependencies.
--------------------------------------------------------------------------------
/generated-doc/out/info/scope.md:
--------------------------------------------------------------------------------
1 | # Project scope
2 |
3 | The areas that we'd like to cover with Ox are:
4 |
5 | * concurrency: developer-friendly structured concurrency, high-level concurrency operators, safe low-level primitives,
6 | communication between concurrently running computations
7 | * error management: retries, timeouts, a safe approach to error propagation, safe resource management
8 | * scheduling & timers
9 | * resiliency: circuit breakers, bulkheads, rate limiters, backpressure
10 |
11 | All of the above should allow for observability of the orchestrated business logic. We aim to enable writing simple,
12 | expression-oriented code in functional style. We'd like to keep the syntax overhead to a minimum, preserving
13 | developer-friendly stack traces, and without compromising performance.
14 |
15 | Some of the above are already addressed in the API, some are coming up in the future. We'd love your help in shaping the
16 | project!
17 |
18 | ## Inspiration & building blocks
19 |
20 | * [Project Loom](https://openjdk.org/projects/loom/) (virtual threads)
21 | * structured concurrency Java APIs ([JEP 505](https://openjdk.org/jeps/505))
22 | * scoped values ([JEP 506](https://openjdk.org/jeps/506))
23 | * fast, scalable [Go](https://golang.org)-like channels using [jox](https://github.com/softwaremill/jox)
24 | * the [Scala 3](https://www.scala-lang.org) programming language
25 |
--------------------------------------------------------------------------------
/generated-doc/out/integrations/mdc-logback.md:
--------------------------------------------------------------------------------
1 | # Inheritable MDC using Logback
2 |
3 | Dependency:
4 |
5 | ```scala
6 | "com.softwaremill.ox" %% "mdc-logback" % "0.6.0"
7 | ```
8 |
9 | Ox provides support for setting inheritable MDC (mapped diagnostic context) values, when using the [Logback](https://logback.qos.ch)
10 | logging library. Normally, value set using `MDC.put` aren't inherited across (virtual) threads, which includes forks
11 | created in concurrency contexts.
12 |
13 | Inheritable values are especially useful e.g. when setting a correlation id in an HTTP request interceptor, or at any
14 | entrypoint to the application. Such correlation id values can be then added automatically to each log message, provided
15 | the appropriate log encoder pattern is used.
16 |
17 | To enable using inheritable MDC values, the application's code should call `InheritableMDC.init` as soon as possible.
18 | The best place would be the application's entrypoint (the `main` method).
19 |
20 | Once this is done, inheritable MDC values can be set in a scoped & structured manner using `InheritableMDC.supervisedWhere`
21 | and variants.
22 |
23 | As inheritable MDC values use a [`ForkLocal`](../structured-concurrency/fork-local.md) under the hood, their usage
24 | restrictions apply: outer concurrency scopes should not be used to create forks within the scopes. Only newly created
25 | scopes, or the provided scope can be used to create forks. That's why `supervisedWhere`, `unsupervisedWhere` and
26 | `supervisedErrorWhere` methods are provided.
27 |
28 | "Normal" MDC usage is not affected. That is, values set using `MDC.put` are not inherited, and are only available in
29 | the thread where they are set.
30 |
31 | For example:
32 |
33 | ```scala
34 | import org.slf4j.MDC
35 |
36 | import ox.fork
37 | import ox.logback.InheritableMDC
38 |
39 | InheritableMDC.supervisedWhere("a" -> "1", "b" -> "2") {
40 | MDC.put("c", "3") // not inherited
41 |
42 | fork {
43 | MDC.get("a") // "1"
44 | MDC.get("b") // "2"
45 | MDC.get("c") // null
46 | }.join()
47 |
48 | MDC.get("a") // "1"
49 | MDC.get("b") // "2"
50 | MDC.get("c") // "3"
51 | }
52 | ```
53 |
--------------------------------------------------------------------------------
/generated-doc/out/integrations/otel-context.md:
--------------------------------------------------------------------------------
1 | # Propagating OpenTelemetry context
2 |
3 | Dependency:
4 |
5 | ```scala
6 | "com.softwaremill.ox" %% "otel-context" % "0.6.0"
7 | ```
8 |
9 | When using the default OpenTelemetry context-propagation mechanisms, which rely on thread-local storage, the context
10 | will not be propagated across virtual thread boundaries, e.g. when creating new forks as part of
11 | [`supervised`](../structured-concurrency/fork-join.md) scopes. This might lead to spans not being properly correlated
12 | into traces, or metrics without the appropriate context.
13 |
14 | To fix this problem, the context must be propagated whenever a new virtual thread is created. One way to achieve this
15 | is by using a custom thread factory, provided by this module - `PropagatingVirtualThreadFactory`. It can be set
16 | for the whole app when using [`OxApp`](../utils/oxapp.md), or manually through `oxThreadFactory`:
17 |
18 | ```scala
19 | import ox.*
20 | import ox.otel.context.PropagatingVirtualThreadFactory
21 |
22 | object MyApp extends OxApp:
23 | override def settings: OxApp.Settings = OxApp.Settings.Default.copy(
24 | threadFactory = Some(PropagatingVirtualThreadFactory())
25 | )
26 |
27 | def run(args: Vector[String])(using Ox): ExitCode = ExitCode.Success
28 | ```
--------------------------------------------------------------------------------
/generated-doc/out/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=python -msphinx
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=ox
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed,
20 | echo.then set the SPHINXBUILD environment variable to point to the full
21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the
22 | echo.Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/generated-doc/out/other/dictionary.md:
--------------------------------------------------------------------------------
1 | # Dictionary
2 |
3 | How we use various terms throughout the codebase and the documentation (or at least try to):
4 |
5 | Scopes:
6 | * **concurrency scope**: either `supervised` (default), `supervisedError` (permitting application errors),
7 | or `unsupervised`
8 | * scope **body**: the code block passed to a concurrency scope (the `supervised`, `supervisedError` or `unsupervised`
9 | method)
10 |
11 | Types of forks:
12 | * supervised / unsupervised
13 | * daemon / user
14 | * optionally, recognizing application errors
15 |
16 | Fork lifecycle:
17 | * within scopes, asynchronously running **forks** can be **started**
18 | * after being started a fork is **running**
19 | * then, forks **complete**: either a fork **succeeds** with a value, or a fork **fails** with an exception
20 | * external **cancellation** (`Fork.cancel()`) interrupts the fork and waits until it completes; interruption uses
21 | JVM's mechanism of injecting an `InterruptedException`
22 | * forks are **supervised** if they are run in a `supervised` scope, and not explicitly unsupervised (that is, started
23 | using `forkUnsupervised` or `forkCancellable`)
24 |
25 | Scope lifecycle:
26 | * a scope **ends**: when unsupervised, the scope's body is entirely evaluated; when supervised, all user (non-daemon) &
27 | supervised forks complete successfully, or at least one user/daemon supervised fork fails, or an application error
28 | is reported. When the scope ends, all forks that are still running are cancelled
29 | * scope **completes**, once all forks complete and finalizers are run; then, the `supervised`, `supervisedError` or
30 | `unsupervised` method returns.
31 |
32 | Errors:
33 | * fork **failure**: when a fork fails with an exception
34 | * **application error**: forks might successfully complete with values which are considered application-level errors;
35 | such values are reported to the enclosing scope and cause the scope to end
36 |
37 | Other:
38 | * **computation combinator**: a method which takes user-provided functions and manages their execution, e.g. using
39 | concurrency, interruption, and appropriately handling errors; examples include `par`, `race`, `retry`, `timeout`
40 |
41 | Channels:
42 | * **values** can be **sent** to a channel, or **received** from a channel
43 |
44 | Flows:
45 | * when **run**, a flow **emits** **elements**
--------------------------------------------------------------------------------
/generated-doc/out/other/links.md:
--------------------------------------------------------------------------------
1 | # Blogs, videos, ...
2 |
3 | ## Blogs
4 |
5 | * [Prototype Loom-based concurrency API for Scala](https://softwaremill.com/prototype-loom-based-concurrency-api-for-scala/)
6 | * [Go-like channels using project Loom and Scala](https://softwaremill.com/go-like-channels-using-project-loom-and-scala/)
7 | * [Two types of futures](https://softwaremill.com/two-types-of-futures/)
8 | * [Supervision, Kafka and Java 21: what’s new in Ox](https://softwaremill.com/supervision-kafka-and-java-21-whats-new-in-ox/)
9 | * [Designing a (yet another) retry API](https://softwaremill.com/designing-a-yet-another-retry-api/)
10 | * [Handling errors in direct-style Scala](https://softwaremill.com/handling-errors-in-direct-style-scala/)
11 | * [Direct-style concurrent streaming](https://softwaremill.com/direct-style-concurrent-streaming/)
12 |
13 | ## Videos
14 |
15 | Coming up!
--------------------------------------------------------------------------------
/generated-doc/out/other/performance.md:
--------------------------------------------------------------------------------
1 | # Performance
2 |
3 | Some performance tests have been done around channels, see:
4 |
5 | * [Limits of Loom's performance](https://softwaremill.com/limits-of-looms-performance/)
6 | * [Go-like selects using jox channels in Java](https://softwaremill.com/go-like-selects-using-jox-channels-in-java/)
7 |
--------------------------------------------------------------------------------
/generated-doc/out/other/stability.md:
--------------------------------------------------------------------------------
1 | # Stability of modules
2 |
3 | The modules are categorized using the following levels:
4 |
5 | * **stable**: binary compatibility is guaranteed within a major version; adheres to semantic versioning
6 | * **stabilizing**: the API is mostly stable, with rare binary-incompatible changes possible in minor releases (only if necessary)
7 | * **experimental**: API can change significantly even in patch releases
8 |
9 | The major version is increased when there are binary-incompatible changes in **stable** modules.
10 |
11 | The minor version is increased when there are significant new features in **stable** modules (keeping compatibility), or binary-incompatible changes in **stabilizing** modules.
12 |
13 | The patch version is increased when there are binary-compatible changes in **stable** / **stabilizing** modules, any changes in **exeperimental** modules, or when a new module is added (e.g. a new integration).
14 |
15 | ## Main modules
16 |
17 | | Module | Level |
18 | |-----------------------|--------------|
19 | | core | stabilizing |
20 | | flow-reactive-streams | stabilizing |
21 | | kafka | experimental |
22 | | mdc-logback | experimental |
23 | | cron | experimental |
24 | | otel-context | experimental |
25 |
--------------------------------------------------------------------------------
/generated-doc/out/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx_rtd_theme==2.0.0
2 | sphinx==7.3.7
3 | sphinx-autobuild==2024.4.16
4 | myst-parser==2.0.0
5 |
--------------------------------------------------------------------------------
/generated-doc/out/scheduling/scheduled.md:
--------------------------------------------------------------------------------
1 | # Scheduled
2 |
3 | The `scheduled` functions allow to run an operation according to a given schedule. It is preferred to use `repeat`,
4 | `retry`, or combination of both functions for most use cases, as they provide a more convenient DSL. In fact `retry`
5 | and `repeat` use `scheduled` internally.
6 |
7 | ## Operation definition
8 |
9 | Similarly to the `retry` and `repeat` APIs, the `operation` can be provided:
10 |
11 | * directly using a by-name parameter, i.e. `f: => T`
12 | * using a by-name `Either[E, T]`
13 | * or using an arbitrary [error mode](../basics/error-handling.md), accepting the computation in an `F`
14 | context: `f: => F[T]`.
15 |
16 | ## Configuration
17 |
18 | The `scheduled` config consists of:
19 |
20 | - a `Schedule`, which indicates how many times the `operation` should be run, provides a duration based on which
21 | a sleep is calculated and provides an initial delay if configured.
22 | - a `SleepMode`, which determines how the sleep between subsequent operations should be calculated:
23 | - `Interval` - default for `repeat` operations, where the sleep is calculated as the duration provided by schedule
24 | minus the duration of the last operation (can be negative, in which case the next operation occurs immediately).
25 | - `Delay` - default for `retry` operations, where the sleep is just the duration provided by schedule.
26 | - `afterAttempt` - a callback function that is invoked after each operation and determines if the scheduler loop
27 | should continue. Used for `onRetry`, `shouldContinueOnError`, `shouldContinueOnResult` and adaptive retries in
28 | `retry` API. Defaults to always continuing.
29 |
30 | ## Schedule
31 |
32 | See the [retry](retries.md) documentation for an overview of the available ways to create and modify a `Schedule`.
33 |
34 | ### Testing schedules
35 |
36 | Schedules can be tested by forcing the evaluation of `Schedule.intervals` and inspecting the resulting lazy list
37 | of intervals.
--------------------------------------------------------------------------------
/generated-doc/out/streaming/backpressure.md:
--------------------------------------------------------------------------------
1 | # Backpressure
2 |
3 | Channels and running flows are back-pressured. The `Channel.send` operation is blocking until there's a receiver thread available, or if there's enough space in the buffer. The processing space is hence bound by the total size of channel buffers.
4 |
--------------------------------------------------------------------------------
/generated-doc/out/streaming/errors.md:
--------------------------------------------------------------------------------
1 | # Error propagation
2 |
3 | Errors are only propagated downstream, ultimately reaching the point where the flow is run / source is discharged. This leads to an exception being thrown there.
4 |
5 | When running flows, any [scopes](../structured-concurrency/fork-join.md) started as part of executing the flow's stages should have completed, before the exception is re-thrown by the `run...` method.
6 |
7 | For channel-transforming operations, once the exception reaches the enclosing scope, any forks should become interrupted, including any that are still running and are handling the upstream processing stages.
8 |
9 | The approach we decided to take (only propagating errors downstream) is one of the two possible designs - with the other being re-throwing an exception when it's encountered. Please see [the respective ADR](../adr/0001-error-propagation-in-channels.md) for a discussion.
10 |
--------------------------------------------------------------------------------
/generated-doc/out/structured-concurrency/fork-local.md:
--------------------------------------------------------------------------------
1 | # Fork locals
2 |
3 | `ForkLocal`s replace usages of `ThreadLocal` when using Ox's forks and structural concurrency. They are useful to
4 | propagate auxiliary context, e.g. trace or correlation ids.
5 |
6 | A fork local needs to be first created with a default value. Then, its value can be set within a new [scope](fork-join.md).
7 | Usually, a new supervised scope is created, within which the `ForkLocal` is set to the given value - but only within that
8 | scope, as long as it's not completed. Hence, values are bound structurally:
9 |
10 | ```scala
11 | import ox.{ForkLocal, fork, supervised}
12 |
13 | val v = ForkLocal("a")
14 | supervised {
15 | println(v.get()) // "a"
16 | fork {
17 | v.supervisedWhere("x") {
18 | println(v.get()) // "x"
19 | fork {
20 | println(v.get()) // "x"
21 | }.join()
22 | }
23 | }.join()
24 | println(v.get()) // "a"
25 | }
26 | ```
27 |
28 | Scoped values propagate across nested scopes.
29 |
30 | ```{note}
31 | Due to the "structured" nature of setting a fork local's value, forks using external (wider) scopes should not be
32 | created within a block where a fork local is set. An attempt to do so will throw a
33 | `java.util.concurrent.StructureViolationException`.
34 | ```
35 |
36 | ## Creating helper functions which set fork locals
37 |
38 | If you're writing a helper function which sets a value of a fork local within a passed code block, you have to make
39 | sure that the code block doesn't accidentally capture the outer concurrency scope (leading to an exception on the
40 | first `fork`).
41 |
42 | This can be done by capturing the code block as a context function `Ox ?=> T`, so that any nested invocations of `fork`
43 | will use the provided instance, not the outer one. E.g.:
44 |
45 | ```scala
46 | def withSpan[T](spanName: String)(f: Ox ?=> T): T =
47 | val span = spanBuilder.startSpan(spanName)
48 | currentSpan.supervisedWhere(Some(span)) {
49 | try f
50 | finally span.end()
51 | }
52 | ```
53 |
54 | ## Implementation notes
55 |
56 | `ForkLocal`s are based on an immutable map passed via a `ThreadLocal`, when a fork is started or a value set. The
57 | implementation will instead rely on `ScopedValue`s, which are part of [JEP 506](https://openjdk.org/jeps/506), when
58 | both scoped values and structured concurrency will be available as stable features in an LTS Java release.
--------------------------------------------------------------------------------
/generated-doc/out/structured-concurrency/index.md:
--------------------------------------------------------------------------------
1 | # What is structured concurrency?
2 |
3 | Structured concurrency is an approach where the lifetime of a thread is determined by the syntactic structure of the
4 | code.
5 |
6 | First introduced by [Martin Sústrik](https://250bpm.com/blog:71/) and later popularized by
7 | [Nathaniel J. Smith](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/),
8 | structured concurrency made its way into Python, Kotlin, Java and now Scala.
9 |
10 | The basic concept in structured concurrency are scopes, within which concurrently running threads of execution can be
11 | started. The scope only finishes once all threads started within finish (either successfully, or with an error). Thus,
12 | it isn't possible to "leak" threads outside of a method. Threads become more a method's implementation detail, rather
13 | than an effect.
14 |
15 | These characteristics make structured concurrency an ideal candidate to make concurrency safer in direct-style
16 | programming, while keeping blocking-like method calls. Structured concurrency enables local reasoning on the threading
17 | effects, which is also one of the prime tenets of functional programming!
18 |
19 | Ox extends the structured concurrency concepts with various forms of error handling, described in the following sections.
20 |
--------------------------------------------------------------------------------
/generated-doc/out/structured-concurrency/interruptions.md:
--------------------------------------------------------------------------------
1 | # Interruptions
2 |
3 | When catching exceptions, care must be taken not to catch & fail to propagate an `InterruptedException`. Doing so will
4 | prevent the scope cleanup mechanisms to make appropriate progress, as the scope won't finish until all started threads
5 | complete.
6 |
7 | A good solution is to catch only non-fatal exception using `NonFatal`, e.g.:
8 |
9 | ```scala
10 | import ox.{forever, fork, supervised}
11 |
12 | import org.slf4j.LoggerFactory
13 | import scala.util.control.NonFatal
14 |
15 | val logger = LoggerFactory.getLogger(this.getClass)
16 | def processSingleItem(): Unit = ()
17 |
18 | supervised {
19 | fork {
20 | forever {
21 | try processSingleItem()
22 | catch case NonFatal(e) => logger.error("Processing error", e)
23 | }
24 | }
25 |
26 | // do something else
27 | }
28 | ```
29 |
--------------------------------------------------------------------------------
/generated-doc/out/utils/control-flow.md:
--------------------------------------------------------------------------------
1 | # Control flow methods
2 |
3 | There are some helper methods which might be useful when writing code using ox's concurrency operators:
4 |
5 | * `forever { ... }` repeatedly evaluates the given code block forever
6 | * `repeatWhile { ... }` repeatedly evaluates the given code block, as long as it returns `true`
7 | * `repeatUntil { ... }` repeatedly evaluates the given code block, until it returns `true`
8 | * `never` blocks the current thread indefinitely, until it is interrupted
9 | * `checkInterrupt()` checks if the current thread is interrupted, and if so, throws an `InterruptedException`. Useful in
10 | compute-intensive code, which wants to cooperate in the cancellation protocol
11 |
12 | All of these are `inline` methods, imposing no runtime overhead.
13 |
--------------------------------------------------------------------------------
/generated-doc/out/utils/resources.md:
--------------------------------------------------------------------------------
1 | # Resources
2 |
3 | ## Single scoped resource
4 |
5 | Ox provides convenience inline methods to allocate, use and (uninterruptibly) release resources with a try-finally
6 | block: `use` and `useCloseable`. For example:
7 |
8 | ```scala
9 | import ox.useCloseable
10 |
11 | useCloseable(new java.io.PrintWriter("test.txt")) { writer =>
12 | writer.println("Hello, world!")
13 | }
14 | ```
15 |
16 | If a concurrency scope is available (e.g. `supervised`), or there are multiple resources to allocate, consider using the
17 | approach described below, to avoid creating an additional syntactical scope.
18 |
19 | Alternatively, you can use `useInterruptibly`, where the releasing might be interrupted, and which is equivalent to a
20 | `try`-`finally` block.
21 |
22 | ```{warning}
23 | To properly release resources when the entire application is interrupted, make sure to use [`OxApp`](oxapp.md) as the
24 | application's main entry point.
25 | ```
26 |
27 | ## Within a concurrency scope
28 |
29 | Resources can be allocated within a concurrency scope. They will be released in reverse acquisition order, after all
30 | forks started within the scope finish (but before the scope completes). E.g.:
31 |
32 | ```scala
33 | import ox.{supervised, useInScope}
34 |
35 | case class MyResource(c: Int)
36 |
37 | def acquire(c: Int): MyResource =
38 | println(s"acquiring $c ...")
39 | MyResource(c)
40 |
41 | def release(resource: MyResource): Unit =
42 | println(s"releasing ${resource.c} ...")
43 |
44 | supervised {
45 | val resource1 = useInScope(acquire(10))(release)
46 | val resource2 = useInScope(acquire(20))(release)
47 | println(s"Using $resource1 ...")
48 | println(s"Using $resource2 ...")
49 | }
50 | ```
51 |
52 | ### Release-only
53 |
54 | You can also register resources to be released (without acquisition logic), before the scope completes:
55 |
56 | ```scala
57 | import ox.{supervised, releaseAfterScope}
58 |
59 | case class MyResource(c: Int)
60 |
61 | def release(resource: MyResource): Unit =
62 | println(s"releasing ${resource.c} ...")
63 |
64 | supervised {
65 | val resource1 = MyResource(10)
66 | releaseAfterScope(release(resource1))
67 | println(s"Using $resource1 ...")
68 | }
69 | ```
70 |
--------------------------------------------------------------------------------
/generated-doc/out/utils/utility.md:
--------------------------------------------------------------------------------
1 | # Utilities
2 |
3 | In addition to concurrency, error handling and resiliency features, Ox includes some utility methods, which make writing
4 | direct-style Scala code more convenient. When possible, these are `inline` methods taking `inline` parameters, hence
5 | incurring no runtime overhead.
6 |
7 | Top-level methods:
8 |
9 | * `uninterruptible { ... }` evaluates the given code block making sure it can't be interrupted
10 | * `sleep(scala.concurrent.Duration)` blocks the current thread/fork for the given duration; same as `Thread.sleep`, but
11 | using's Scala's `Duration`
12 |
13 | Extension functions on arbitrary expressions:
14 |
15 | * `.discard` extension method evaluates the given code block and discards its result, avoiding "discarded non-unit
16 | value" warnings
17 | * `.pipe(f)` applies `f` to the value of the expression and returns the result; useful for chaining operations
18 | * `.tap(f)` applies `f` to the value of the expression and returns the original value; useful for side-effecting
19 | operations
20 | * `.tapException(Throwable => Unit)` and `.tapNonFatalException(Throwable => Unit)` allow running the provided
21 | side-effecting callback when the expression throws an exception
22 |
23 | Extension functions on `scala.concurrent.Future[T]`:
24 |
25 | * `.get(): T` blocks the current thread/fork until the future completes; returns the successful value of the future, or
26 | throws the exception, with which it failed
27 |
28 |
--------------------------------------------------------------------------------
/generated-doc/out/watch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sphinx-autobuild . _build/html
3 |
--------------------------------------------------------------------------------
/kafka/docker-tests/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | zookeeper:
4 | image: confluentinc/cp-zookeeper:latest
5 | environment:
6 | ZOOKEEPER_CLIENT_PORT: 2181
7 | ZOOKEEPER_TICK_TIME: 2000
8 | ports:
9 | - 22181:2181
10 |
11 | kafka:
12 | image: confluentinc/cp-kafka:latest
13 | depends_on:
14 | - zookeeper
15 | ports:
16 | - 29092:29092
17 | environment:
18 | KAFKA_BROKER_ID: 1
19 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
20 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092
21 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
22 | KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
23 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
24 |
--------------------------------------------------------------------------------
/kafka/src/main/scala/ox/kafka/KafkaConsumerWrapper.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka
2 |
3 | import org.apache.kafka.clients.consumer.{ConsumerRecords, KafkaConsumer, OffsetAndMetadata}
4 | import org.apache.kafka.common.TopicPartition
5 | import org.slf4j.LoggerFactory
6 | import ox.*
7 | import ox.channels.*
8 |
9 | import scala.jdk.CollectionConverters.*
10 |
11 | trait KafkaConsumerWrapper[K, V]:
12 | def subscribe(topics: Seq[String]): Unit
13 | def poll(): ConsumerRecords[K, V]
14 | def commit(offsets: Map[TopicPartition, Long]): Unit
15 |
16 | object KafkaConsumerWrapper:
17 | private val logger = LoggerFactory.getLogger(classOf[KafkaConsumerWrapper.type])
18 |
19 | def apply[K, V](consumer: KafkaConsumer[K, V], closeWhenComplete: Boolean)(using Ox): ActorRef[KafkaConsumerWrapper[K, V]] =
20 | val logic = new KafkaConsumerWrapper[K, V]:
21 | override def subscribe(topics: Seq[String]): Unit =
22 | try consumer.subscribe(topics.asJava)
23 | catch
24 | case t: Throwable =>
25 | logger.error(s"Exception when subscribing to $topics", t)
26 | throw t
27 |
28 | override def poll(): ConsumerRecords[K, V] =
29 | try consumer.poll(java.time.Duration.ofMillis(100))
30 | catch
31 | case t: Throwable =>
32 | logger.error("Exception when polling for records in Kafka", t)
33 | throw t
34 |
35 | override def commit(offsets: Map[TopicPartition, Long]): Unit =
36 | try consumer.commitSync(offsets.view.mapValues(o => new OffsetAndMetadata(o + 1)).toMap.asJava)
37 | catch
38 | case t: Throwable =>
39 | logger.error("Exception when committing offsets", t)
40 | throw t
41 |
42 | def close(wrapper: KafkaConsumerWrapper[K, V]): Unit = if closeWhenComplete then
43 | logger.debug("Closing the Kafka consumer")
44 | uninterruptible(consumer.close())
45 |
46 | Actor.create(logic, Some(close))
47 | end apply
48 | end KafkaConsumerWrapper
49 |
--------------------------------------------------------------------------------
/kafka/src/main/scala/ox/kafka/KafkaFlow.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka
2 |
3 | import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer}
4 | import org.slf4j.LoggerFactory
5 | import ox.*
6 | import ox.flow.Flow
7 |
8 | object KafkaFlow:
9 | private val logger = LoggerFactory.getLogger(classOf[KafkaFlow.type])
10 |
11 | def subscribe[K, V](settings: ConsumerSettings[K, V], topic: String, otherTopics: String*): Flow[ReceivedMessage[K, V]] =
12 | subscribe(settings.toConsumer, closeWhenComplete = true, topic, otherTopics*)
13 |
14 | def subscribe[K, V](
15 | kafkaConsumer: KafkaConsumer[K, V],
16 | closeWhenComplete: Boolean,
17 | topic: String,
18 | otherTopics: String*
19 | ): Flow[ReceivedMessage[K, V]] =
20 | Flow.usingEmit: emit =>
21 | supervised:
22 | val kafkaConsumerActor = KafkaConsumerWrapper(kafkaConsumer, closeWhenComplete)
23 | kafkaConsumerActor.tell(_.subscribe(topic :: otherTopics.toList))
24 | forever {
25 | val records = kafkaConsumerActor.ask(_.poll())
26 | records.forEach(r => emit(ReceivedMessage(kafkaConsumerActor, r)))
27 | }.tapException(logger.error("Exception when polling for records", _))
28 |
29 | end KafkaFlow
30 |
--------------------------------------------------------------------------------
/kafka/src/main/scala/ox/kafka/ProducerSettings.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka
2 |
3 | import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
4 | import org.apache.kafka.common.serialization.{Serializer, StringSerializer}
5 |
6 | import java.util.Properties
7 |
8 | case class ProducerSettings[K, V](
9 | bootstrapServers: List[String],
10 | keySerializer: Serializer[K],
11 | valueSerializer: Serializer[V],
12 | otherProperties: Map[String, String]
13 | ):
14 | def bootstrapServers(servers: String*): ProducerSettings[K, V] = copy(bootstrapServers = servers.toList)
15 | def keySerializer[KK](serializer: Serializer[KK]): ProducerSettings[KK, V] = copy(keySerializer = serializer)
16 | def valueSerializer[VV](serializer: Serializer[VV]): ProducerSettings[K, VV] = copy(valueSerializer = serializer)
17 | def property(key: String, value: String): ProducerSettings[K, V] = copy(otherProperties = otherProperties + (key -> value))
18 |
19 | def toProperties: Properties =
20 | val props = new Properties()
21 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers.mkString(","))
22 | otherProperties.foreach { case (key, value) => props.put(key, value) }
23 | props
24 |
25 | def toProducer: KafkaProducer[K, V] = KafkaProducer(toProperties, keySerializer, valueSerializer)
26 | end ProducerSettings
27 |
28 | object ProducerSettings:
29 | private val StringSerializerInstance = new StringSerializer
30 | def default: ProducerSettings[String, String] =
31 | ProducerSettings(DefaultBootstrapServers, StringSerializerInstance, StringSerializerInstance, Map.empty)
32 |
--------------------------------------------------------------------------------
/kafka/src/main/scala/ox/kafka/ReceivedMessage.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka
2 |
3 | import org.apache.kafka.clients.consumer.ConsumerRecord
4 | import org.apache.kafka.common.header.Header
5 | import org.apache.kafka.common.record.TimestampType
6 | import ox.channels.ActorRef
7 |
8 | import scala.jdk.CollectionConverters.*
9 |
10 | case class ReceivedMessage[K, V](consumer: ActorRef[KafkaConsumerWrapper[K, V]], consumerRecord: ConsumerRecord[K, V]):
11 | def key: K = consumerRecord.key()
12 | def value: V = consumerRecord.value()
13 | def header: Iterable[Header] = consumerRecord.headers().asScala
14 | def offset: Long = consumerRecord.offset()
15 | def partition: Int = consumerRecord.partition()
16 | def topic: String = consumerRecord.topic()
17 | def timestamp: Long = consumerRecord.timestamp()
18 | def timestampType: TimestampType = consumerRecord.timestampType()
19 | end ReceivedMessage
20 |
--------------------------------------------------------------------------------
/kafka/src/main/scala/ox/kafka/kafkaOffsetCommit.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka
2 |
3 | import org.apache.kafka.clients.producer.ProducerRecord
4 | import org.apache.kafka.common.TopicPartition
5 | import ox.*
6 | import ox.channels.*
7 |
8 | import scala.collection.mutable
9 | import scala.concurrent.duration.*
10 | import ox.flow.Flow
11 |
12 | private[kafka] def doCommit(packets: Source[SendPacket[_, _]]): Unit =
13 | val commitInterval = 1.second
14 | val toCommit = mutable.Map[TopicPartition, Long]()
15 | var consumer: ActorRef[KafkaConsumerWrapper[_, _]] = null // assuming all packets come from the same consumer
16 |
17 | Flow.tick(commitInterval).merge(Flow.fromSource(packets)).runForeach {
18 | case () =>
19 | if consumer != null && toCommit.nonEmpty then
20 | // waiting for the commit to happen
21 | consumer.ask(_.commit(toCommit.toMap))
22 | toCommit.clear()
23 | case packet: SendPacket[?, ?] =>
24 | packet.commit.foreach { receivedMessage =>
25 | if consumer == null then consumer = receivedMessage.consumer.asInstanceOf[ActorRef[KafkaConsumerWrapper[_, _]]]
26 | val tp = new TopicPartition(receivedMessage.topic, receivedMessage.partition)
27 | toCommit.updateWith(tp) {
28 | case Some(offset) => Some(math.max(offset, receivedMessage.offset))
29 | case None => Some(receivedMessage.offset)
30 | }
31 | }
32 | }
33 | end doCommit
34 |
35 | case class SendPacket[K, V](send: List[ProducerRecord[K, V]], commit: List[ReceivedMessage[_, _]])
36 |
37 | object SendPacket:
38 | def apply[K, V](send: ProducerRecord[K, V], commit: ReceivedMessage[?, ?]): SendPacket[K, V] =
39 | SendPacket(List(send), List(commit))
40 |
41 | def apply[K, V](send: List[ProducerRecord[K, V]], commit: ReceivedMessage[?, ?]): SendPacket[K, V] =
42 | SendPacket(send, List(commit))
43 |
--------------------------------------------------------------------------------
/kafka/src/main/scala/ox/kafka/package.scala:
--------------------------------------------------------------------------------
1 | package ox
2 |
3 | package object kafka:
4 | private[kafka] val DefaultBootstrapServers = List("localhost:9092")
5 |
--------------------------------------------------------------------------------
/kafka/src/test/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{HH:mm:ss.SSS}%boldYellow(%replace( [%X{cid}] ){' \[\] ', ' '})[%thread] %-5level %logger{5} - %msg%n%rEx
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/kafka/src/test/scala/ox/kafka/manual/pekko/publishPekko.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka.manual.pekko
2 |
3 | import org.apache.kafka.clients.producer.ProducerRecord
4 | import org.apache.kafka.common.serialization.StringSerializer
5 | import org.apache.pekko.actor.ActorSystem
6 | import org.apache.pekko.kafka.ProducerSettings
7 | import org.apache.pekko.kafka.scaladsl.Producer
8 | import org.apache.pekko.stream.scaladsl.Source
9 | import ox.{discard, get}
10 | import ox.kafka.manual.{randomString, timedAndLogged}
11 |
12 | import scala.concurrent.Await
13 | import scala.concurrent.duration.Duration
14 |
15 | @main def publishPekko(): Unit =
16 | val topic = "t2"
17 | timedAndLogged("publish-pekko") {
18 | given system: ActorSystem = ActorSystem("publish")
19 |
20 | val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer).withBootstrapServers("localhost:29092")
21 |
22 | val source = Source(1 to 10000000).map(_ => randomString())
23 | val producerRecordSource = source.map { m => new ProducerRecord[String, String](topic, m) }
24 | producerRecordSource.runWith(Producer.plainSink(producerSettings)).get().discard
25 | system.terminate().get().discard
26 | }
27 | end publishPekko
28 |
--------------------------------------------------------------------------------
/kafka/src/test/scala/ox/kafka/manual/pekko/transferPekko.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka.manual.pekko
2 |
3 | import org.apache.kafka.clients.consumer.ConsumerConfig
4 | import org.apache.kafka.clients.producer.ProducerRecord
5 | import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
6 | import org.apache.pekko.actor.ActorSystem
7 | import org.apache.pekko.kafka.scaladsl.Consumer.DrainingControl
8 | import org.apache.pekko.kafka.scaladsl.{Committer, Consumer, Producer}
9 | import org.apache.pekko.kafka.{CommitterSettings, ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions}
10 | import ox.{discard, get}
11 | import ox.kafka.manual.timedAndLogged
12 |
13 | import scala.concurrent.Await
14 | import scala.concurrent.duration.Duration
15 |
16 | @main def transferPekko(): Unit =
17 | val sourceTopic = "t2"
18 | val destTopic = "t2mapped"
19 | val group = "group2"
20 |
21 | timedAndLogged("transfer-pekko") {
22 | given system: ActorSystem = ActorSystem("transfer")
23 |
24 | val producerSettings = ProducerSettings(system, new StringSerializer, new StringSerializer).withBootstrapServers("localhost:29092")
25 | val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
26 | .withBootstrapServers("localhost:29092")
27 | .withGroupId(group)
28 | .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
29 |
30 | val stream = Consumer
31 | .committableSource(consumerSettings, Subscriptions.topics(sourceTopic))
32 | .take(10_000_000)
33 | .map { msg =>
34 | ProducerMessage.single(
35 | new ProducerRecord[String, String](destTopic, msg.record.key(), msg.record.value().reverse),
36 | msg.committableOffset
37 | )
38 | }
39 | .via(Producer.flexiFlow(producerSettings))
40 | .map(_.passThrough)
41 | .toMat(Committer.sink(CommitterSettings(system)))(DrainingControl.apply)
42 | .run()
43 | .streamCompletion
44 |
45 | stream.get().discard
46 | system.terminate().get().discard
47 | }
48 | end transferPekko
49 |
--------------------------------------------------------------------------------
/kafka/src/test/scala/ox/kafka/manual/publish.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka.manual
2 |
3 | import org.apache.kafka.clients.producer.ProducerRecord
4 | import ox.*
5 |
6 | import ox.kafka.*
7 | import ox.flow.Flow
8 |
9 | @main def publish(): Unit =
10 | val topic = "t1"
11 |
12 | timedAndLogged("publish") {
13 | import KafkaStage.*
14 |
15 | val bootstrapServer = "localhost:29092"
16 | val settings = ProducerSettings.default.bootstrapServers(bootstrapServer)
17 | Flow
18 | .unfold(())(_ => Some((randomString(), ())))
19 | // 100 bytes * 10000000 = 1 GB
20 | .take(10_000_000)
21 | .map(msg => ProducerRecord[String, String](topic, msg))
22 | .mapPublish(settings)
23 | .runDrain()
24 | }
25 | end publish
26 |
--------------------------------------------------------------------------------
/kafka/src/test/scala/ox/kafka/manual/transfer.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka.manual
2 |
3 | import org.apache.kafka.clients.producer.ProducerRecord
4 | import ox.*
5 | import ox.channels.BufferCapacity
6 | import ox.kafka.*
7 | import ox.kafka.ConsumerSettings.AutoOffsetReset
8 |
9 | @main def transfer(): Unit =
10 | val sourceTopic = "t1"
11 | val destTopic = "t1mapped"
12 | val group = "group1"
13 |
14 | timedAndLogged("transfer") {
15 | import KafkaStage.*
16 |
17 | val bootstrapServer = "localhost:29092"
18 | val consumerSettings = ConsumerSettings.default(group).bootstrapServers(bootstrapServer).autoOffsetReset(AutoOffsetReset.Earliest)
19 | val producerSettings = ProducerSettings.default.bootstrapServers(bootstrapServer)
20 | KafkaFlow
21 | .subscribe(consumerSettings, sourceTopic)
22 | .take(10_000_000)
23 | .map(in => (in.value.reverse, in))
24 | .map((value, original) => SendPacket(ProducerRecord[String, String](destTopic, value), original))
25 | .mapPublishAndCommit(producerSettings)
26 | .runDrain()
27 | }
28 | end transfer
29 |
--------------------------------------------------------------------------------
/kafka/src/test/scala/ox/kafka/manual/util.scala:
--------------------------------------------------------------------------------
1 | package ox.kafka.manual
2 |
3 | import scala.util.Random
4 | import ox.timed
5 |
6 | def timedAndLogged[T](name: String)(f: => T): T =
7 | val (took, result) = timed(f)
8 | println(s"$name took ${took.toMillis}ms")
9 | result
10 |
11 | def randomString() = Random().alphanumeric.take(100).mkString
12 |
--------------------------------------------------------------------------------
/mdc-logback/src/test/scala/ox/logback/InheritableMDCTest.scala:
--------------------------------------------------------------------------------
1 | package ox.logback
2 |
3 | import org.scalatest.flatspec.AnyFlatSpec
4 | import org.scalatest.matchers.should.Matchers
5 | import org.slf4j.MDC
6 | import ox.fork
7 |
8 | class InheritableMDCTest extends AnyFlatSpec with Matchers:
9 | InheritableMDC.init
10 |
11 | it should "make MDC values available in forks" in {
12 | InheritableMDC.supervisedWhere("a" -> "1", "b" -> "2") {
13 | MDC.put("c", "3") // should not be inherited
14 |
15 | fork {
16 | MDC.get("a") shouldBe "1"
17 | MDC.get("b") shouldBe "2"
18 | MDC.get("c") shouldBe null
19 | }.join()
20 |
21 | MDC.get("a") shouldBe "1"
22 | MDC.get("b") shouldBe "2"
23 | MDC.get("c") shouldBe "3"
24 | }
25 | }
26 | end InheritableMDCTest
27 |
--------------------------------------------------------------------------------
/otel-context/src/main/scala/ox/otel/context/PropagatingVirtualThreadFactory.scala:
--------------------------------------------------------------------------------
1 | package ox.otel.context
2 |
3 | import java.util.concurrent.ThreadFactory
4 | import io.opentelemetry.context.Context
5 |
6 | /** A virtual thread factory which propagates the OpenTelemetry [[Context]] when creating new threads.
7 | *
8 | * Should be used in [[ox.OxApp]] settings (as `threadFactory`), or using the [[ox.oxThreadFactory]].
9 | */
10 | class PropagatingVirtualThreadFactory extends ThreadFactory:
11 | private val delegate = Thread.ofVirtual().factory()
12 |
13 | override def newThread(r: Runnable): Thread =
14 | val parentContext = Context.current()
15 | delegate.newThread(() =>
16 | val scope = parentContext.makeCurrent()
17 | try r.run()
18 | finally scope.close()
19 | )
20 | end PropagatingVirtualThreadFactory
21 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.11.2
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | val sbtSoftwareMillVersion = "2.0.25"
2 | addSbtPlugin("com.softwaremill.sbt-softwaremill" % "sbt-softwaremill-common" % sbtSoftwareMillVersion)
3 | addSbtPlugin("com.softwaremill.sbt-softwaremill" % "sbt-softwaremill-publish" % sbtSoftwareMillVersion)
4 | addSbtPlugin("org.scalameta" % "sbt-mdoc" % "2.7.1")
5 |
--------------------------------------------------------------------------------