├── .bsp
└── sbt.json
├── .gitignore
├── .idea
├── .gitignore
├── codeStyles
│ ├── Project.xml
│ └── codeStyleConfig.xml
├── misc.xml
├── modules.xml
├── modules
│ ├── cats-effect-build.iml
│ └── cats-effect.iml
├── sbt.xml
├── scala_compiler.xml
├── scala_settings.xml
└── vcs.xml
├── README.md
├── build.sbt
├── project
└── build.properties
└── src
└── main
├── resources
└── connection.txt
└── scala
└── com
└── rockthejvm
├── part1recap
├── CatsTypeClasses.scala
├── ContextualAbstractionsScala2.scala
├── ContextualAbstractionsScala3.scala
└── Essentials.scala
├── part2effects
├── Effects.scala
├── IOApps.scala
├── IOErrorHandling.scala
├── IOIntroduction.scala
├── IOParallelism.scala
└── IOTraversal.scala
├── part3concurrency
├── AsyncIOs.scala
├── BlockingIOs.scala
├── CancellingIOs.scala
├── Fibers.scala
├── RacingIOs.scala
└── Resources.scala
├── part4coordination
├── CountdownLatches.scala
├── CyclicBarriers.scala
├── Defers.scala
├── Mutex.scala
├── Refs.scala
└── Semaphores.scala
├── part5polymorphic
├── PolymorphicAsync.scala
├── PolymorphicCancellation.scala
├── PolymorphicCoordination.scala
├── PolymorphicFibers.scala
├── PolymorphicSync.scala
└── PolymorphicTemporalSuspension.scala
├── playground
└── Playground.scala
├── utils
├── Utils.scala
└── general
│ └── Utils.scala
└── utilsScala2
├── general
└── package.scala
└── package.scala
/.bsp/sbt.json:
--------------------------------------------------------------------------------
1 | {"name":"sbt","version":"1.5.2","bspVersion":"2.0.0-M5","languages":["scala"],"argv":["/Users/daniel/Library/Java/JavaVirtualMachines/adopt-openjdk-11.0.11/Contents/Home/bin/java","-Xms100m","-Xmx100m","-classpath","/Users/daniel/Library/Application Support/JetBrains/IdeaIC2021.2/plugins/Scala/launcher/sbt-launch.jar","xsbt.boot.Boot","-bsp","--sbt-launch-jar=/Users/daniel/Library/Application%20Support/JetBrains/IdeaIC2021.2/plugins/Scala/launcher/sbt-launch.jar"]}
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Created by https://www.toptal.com/developers/gitignore/api/intellij,java,scala,sbt
3 | # Edit at https://www.toptal.com/developers/gitignore?templates=intellij,java,scala,sbt
4 |
5 | ### Intellij ###
6 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
7 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
8 |
9 | # User-specific stuff
10 | .idea/**/workspace.xml
11 | .idea/**/tasks.xml
12 | .idea/**/usage.statistics.xml
13 | .idea/**/dictionaries
14 | .idea/**/shelf
15 |
16 | # AWS User-specific
17 | .idea/**/aws.xml
18 |
19 | # Generated files
20 | .idea/**/contentModel.xml
21 |
22 | # Sensitive or high-churn files
23 | .idea/**/dataSources/
24 | .idea/**/dataSources.ids
25 | .idea/**/dataSources.local.xml
26 | .idea/**/sqlDataSources.xml
27 | .idea/**/dynamic.xml
28 | .idea/**/uiDesigner.xml
29 | .idea/**/dbnavigator.xml
30 |
31 | # Gradle
32 | .idea/**/gradle.xml
33 | .idea/**/libraries
34 |
35 | # Gradle and Maven with auto-import
36 | # When using Gradle or Maven with auto-import, you should exclude module files,
37 | # since they will be recreated, and may cause churn. Uncomment if using
38 | # auto-import.
39 | # .idea/artifacts
40 | # .idea/compiler.xml
41 | # .idea/jarRepositories.xml
42 | # .idea/modules.xml
43 | # .idea/*.iml
44 | # .idea/modules
45 | # *.iml
46 | # *.ipr
47 |
48 | # CMake
49 | cmake-build-*/
50 |
51 | # Mongo Explorer plugin
52 | .idea/**/mongoSettings.xml
53 |
54 | # File-based project format
55 | *.iws
56 |
57 | # IntelliJ
58 | out/
59 |
60 | # mpeltonen/sbt-idea plugin
61 | .idea_modules/
62 |
63 | # JIRA plugin
64 | atlassian-ide-plugin.xml
65 |
66 | # Cursive Clojure plugin
67 | .idea/replstate.xml
68 |
69 | # Crashlytics plugin (for Android Studio and IntelliJ)
70 | com_crashlytics_export_strings.xml
71 | crashlytics.properties
72 | crashlytics-build.properties
73 | fabric.properties
74 |
75 | # Editor-based Rest Client
76 | .idea/httpRequests
77 |
78 | # Android studio 3.1+ serialized cache file
79 | .idea/caches/build_file_checksums.ser
80 |
81 | ### Intellij Patch ###
82 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
83 |
84 | # *.iml
85 | # modules.xml
86 | # .idea/misc.xml
87 | # *.ipr
88 |
89 | # Sonarlint plugin
90 | # https://plugins.jetbrains.com/plugin/7973-sonarlint
91 | .idea/**/sonarlint/
92 |
93 | # SonarQube Plugin
94 | # https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
95 | .idea/**/sonarIssues.xml
96 |
97 | # Markdown Navigator plugin
98 | # https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
99 | .idea/**/markdown-navigator.xml
100 | .idea/**/markdown-navigator-enh.xml
101 | .idea/**/markdown-navigator/
102 |
103 | # Cache file creation bug
104 | # See https://youtrack.jetbrains.com/issue/JBR-2257
105 | .idea/$CACHE_FILE$
106 |
107 | # CodeStream plugin
108 | # https://plugins.jetbrains.com/plugin/12206-codestream
109 | .idea/codestream.xml
110 |
111 | ### Java ###
112 | # Compiled class file
113 | *.class
114 |
115 | # Log file
116 | *.log
117 |
118 | # BlueJ files
119 | *.ctxt
120 |
121 | # Mobile Tools for Java (J2ME)
122 | .mtj.tmp/
123 |
124 | # Package Files #
125 | *.jar
126 | *.war
127 | *.nar
128 | *.ear
129 | *.zip
130 | *.tar.gz
131 | *.rar
132 |
133 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
134 | hs_err_pid*
135 |
136 | ### SBT ###
137 | # Simple Build Tool
138 | # http://www.scala-sbt.org/release/docs/Getting-Started/Directories.html#configuring-version-control
139 |
140 | dist/*
141 | target/
142 | lib_managed/
143 | src_managed/
144 | project/boot/
145 | project/plugins/project/
146 | .history
147 | .cache
148 | .lib/
149 |
150 | ### Scala ###
151 |
152 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
153 |
154 | # End of https://www.toptal.com/developers/gitignore/api/intellij,java,scala,sbt
155 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/.idea/codeStyles/Project.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/.idea/codeStyles/codeStyleConfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/modules/cats-effect-build.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
--------------------------------------------------------------------------------
/.idea/modules/cats-effect.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/.idea/sbt.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
17 |
18 |
--------------------------------------------------------------------------------
/.idea/scala_compiler.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/scala_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## The Rock the JVM Cats Effect Course Repository
2 |
3 | Powered by [Rock the JVM!](rockthejvm.com)
4 |
5 | This repository contains the code we wrote during the Rock the JVM [Cats Effect course](https://rockthejvm.com/p/cats-effect). Unless explicitly mentioned, the code in this repository is exactly what was caught on camera.
6 |
7 | ### Install and setup
8 |
9 | - install your favorite IDE - in the course I work with [IntelliJ IDEA](https://jetbrains.com/idea)
10 | - either clone the repo or download as zip
11 | - open with the IDE as an SBT project
12 |
13 | ### Getting Started
14 |
15 | Start by cloning this repository and checkout the `start` tag:
16 |
17 | ```
18 | git checkout start
19 | ```
20 |
21 | The repository has tags for intermediate states of the code. This is useful to keep track of course progress, and particularly for the harder exercises where we modify the same code. To revert to that state of the code, all you have to do is `git checkout (the respective tag)`. The tags are as follows:
22 |
23 | * `start`
24 | * `1.1-scala-recap`
25 | * `1.2-contextual-abstractions-scala-2`
26 | * `1.3-contextual-abstractions-scala-3`
27 | * `1.4-cats-type-classes`
28 | * `2.1-effects`
29 | * `2.2-effects-exercises`
30 | * `2.3-io`
31 | * `2.4-io-exercises`
32 | * `2.5-io-error-handling`
33 | * `2.6-io-apps`
34 | * `2.7-io-parallelism`
35 | * `2.8-io-traversal`
36 | * `3.1-fibers`
37 | * `3.2-fibers-exercise`
38 | * `3.4-bracket`
39 | * `3.5-resource`
40 | * `3.6-io-racing`
41 | * `3.7-io-cancellation`
42 | * `3.8-io-cancellation-exercises`
43 | * `3.9-io-blocking`
44 | * `3.10-io-async`
45 | * `4.1-ref`
46 | * `4.3-deferred`
47 | * `4.4-deferred-exercises`
48 | * `4.5-mutex`
49 | * `4.7-semaphore`
50 | * `4.8-countdownlatch`
51 | * `4.9-countdownlatch-exercise`
52 | * `4.10-cyclicbarrier`
53 | * `5.1-monadcancel`
54 | * `5.3-spawn`
55 | * `5.4-concurrent`
56 | * `5.6-temporal`
57 | * `5.7-sync`
58 | * `5.8-async`
59 |
60 | ### Seeing the complete code
61 |
62 | Either clone this repo as it is, or do
63 |
64 | ```
65 | git checkout master
66 | ```
67 |
68 | ### For questions or suggestions
69 |
70 | If you have changes to suggest to this repo, either
71 | - submit a GitHub issue
72 | - tell me in the course Q/A forum
73 | - submit a pull request!
74 |
--------------------------------------------------------------------------------
/build.sbt:
--------------------------------------------------------------------------------
1 | val scala3Version = "3.0.0"
2 |
3 | lazy val root = project
4 | .in(file("."))
5 | .settings(
6 | name := "cats-effect",
7 | version := "0.1.0",
8 |
9 | scalaVersion := scala3Version,
10 |
11 | libraryDependencies ++= Seq(
12 | "org.typelevel" %% "cats-effect" % "3.2.0",
13 | )
14 | )
15 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.5.2
2 |
--------------------------------------------------------------------------------
/src/main/resources/connection.txt:
--------------------------------------------------------------------------------
1 | rockthejvm.com
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part1recap/CatsTypeClasses.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part1recap
2 |
3 | object CatsTypeClasses {
4 |
5 | /*
6 | - applicative
7 | - functor
8 | - flatMap
9 | - monad
10 | - applicativeError/monadError
11 | - traverse
12 | */
13 |
14 | // functor - "mappable" data structures
15 | trait MyFunctor[F[_]] {
16 | def map[A, B](initialValue: F[A])(f: A => B): F[B]
17 | }
18 |
19 | import cats.Functor
20 | import cats.instances.list.*
21 | val listFunctor = Functor[List]
22 |
23 | // generalizable "mapping" APIs
24 | def increment[F[_]](container: F[Int])(using functor: Functor[F]): F[Int] =
25 | functor.map(container)(_ + 1)
26 |
27 | import cats.syntax.functor.*
28 | def increment_v2[F[_] : Functor](container: F[Int]): F[Int] =
29 | container.map(_ + 1)
30 |
31 | // applicative - the ability to "wrap" types
32 | trait MyApplicative[F[_]] extends MyFunctor[F] {
33 | def pure[A](value: A): F[A]
34 | }
35 |
36 | import cats.Applicative
37 | val applicativeList = Applicative[List]
38 | val aSimpleList: List[Int] = applicativeList.pure(43)
39 | import cats.syntax.applicative.* // import the pure extension method
40 | val aSimpleList_v2: List[Int] = 43.pure[List]
41 |
42 | // FlatMap - ability to chain multiple wrapper computations
43 | trait MyFlatMap[F[_]] extends MyFunctor[F] {
44 | def flatMap[A, B](container: F[A])(f: A => F[B]): F[B]
45 | }
46 |
47 | import cats.FlatMap
48 | val flatMapList = FlatMap[List]
49 | import cats.syntax.flatMap.* // flatMap extension method
50 | def crossProduct[F[_] : FlatMap, A, B](fa: F[A], fb: F[B]): F[(A, B)] =
51 | fa.flatMap(a => fb.map(b => (a, b)))
52 |
53 | // Monad - applicative + flatMap
54 | trait MyMonad[F[_]] extends MyApplicative[F] with MyFlatMap[F] {
55 | override def map[A, B](initialValue: F[A])(f: A => B): F[B] =
56 | flatMap(initialValue)(a => pure(f(a)))
57 | }
58 |
59 | import cats.Monad
60 | val monadList = Monad[List]
61 | def crossProduct_v2[F[_] : Monad, A, B](fa: F[A], fb: F[B]): F[(A, B)] =
62 | for {
63 | a <- fa
64 | b <- fb
65 | } yield (a, b)
66 |
67 | /*
68 | Functor -> FlatMap -->
69 | \ \
70 | Applicative -> Monad
71 | */
72 |
73 | // error-like type classes
74 | trait MyApplicativeError[F[_], E] extends MyApplicative[F] {
75 | def raiseError[A](e: E): F[A]
76 | }
77 |
78 | import cats.ApplicativeError
79 | type ErrorOr[A] = Either[String, A]
80 | val appErrorEither = ApplicativeError[ErrorOr, String]
81 | val desirableValue: ErrorOr[Int] = appErrorEither.pure(42)
82 | val failedValue: ErrorOr[Int] = appErrorEither.raiseError("Something failed")
83 | import cats.syntax.applicativeError.* // raiseError extension method
84 | val failedValue_v2: ErrorOr[Int] = "Something failed".raiseError[ErrorOr, Int]
85 |
86 | trait MyMonadError[F[_], E] extends MyApplicativeError[F, E] with Monad[F]
87 | import cats.MonadError
88 | val monadErrorEither = MonadError[ErrorOr, String]
89 |
90 | // traverse
91 | trait MyTraverse[F[_]] extends MyFunctor[F] {
92 | def traverse[G[_], A, B](container: F[A])(f: A => G[B]): G[F[B]]
93 | }
94 |
95 | // turn nested wrappers inside out
96 | val listOfOptions: List[Option[Int]] = List(Some(1), Some(2), Some(43))
97 | import cats.Traverse
98 | val listTraverse = Traverse[List]
99 | val optionList: Option[List[Int]] = listTraverse.traverse(List(1,2,3))(x => Option(x))
100 | import cats.syntax.traverse.*
101 | val optionList_v2: Option[List[Int]] = List(1,2,3).traverse(x => Option(x))
102 |
103 | /*
104 | Big(ger) type class hierarchy in Cats:
105 |
106 | Semigroup -> Monoid
107 |
108 | Semigroupal -> Apply -> FlatMap -->
109 | / \ \
110 | Functor ---> Applicative -> Monad ---------> MonadError
111 | | \ /
112 | | ApplicativeError
113 | |
114 | ----> Traverse
115 | */
116 | def main(args: Array[String]): Unit = {
117 |
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part1recap/ContextualAbstractionsScala2.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part1recap
2 |
3 | object ContextualAbstractionsScala2 {
4 |
5 | // implicit classes
6 | case class Person(name: String) {
7 | def greet(): String = s"Hi, my name is $name"
8 | }
9 |
10 | implicit class ImpersonableString(name: String) {
11 | def greet(): String =
12 | Person(name).greet()
13 | }
14 |
15 | // extension method
16 | val greeting = "Peter".greet() // new ImpersonableString("Peter").greet()
17 |
18 | // example: scala.concurrent.duration
19 | import scala.concurrent.duration._
20 | val oneSecond = 1.second
21 |
22 | // implicit arguments and values
23 | def increment(x: Int)(implicit amount: Int) = x + amount
24 | implicit val defaultAmount: Int = 10
25 | val twelve = increment(2) // implicit argument 10 passed by the compiler
26 |
27 | def multiply(x: Int)(implicit factor: Int) = x * factor
28 | val aHundred = multiply(10) // same implicit argument passed by the compiler
29 |
30 | // more complex example
31 | trait JSONSerializer[T] {
32 | def toJson(value: T): String
33 | }
34 |
35 | def convert2Json[T](value: T)(implicit serializer: JSONSerializer[T]): String =
36 | serializer.toJson(value)
37 |
38 | implicit val personSerializer: JSONSerializer[Person] = new JSONSerializer[Person] {
39 | override def toJson(person: Person) = "{\"name\" : \"" + person.name + "\"}"
40 | }
41 |
42 | val davidsJson = convert2Json(Person("David")) // implicit serializer passed here
43 |
44 | // implicit defs
45 | implicit def createListSerializer[T](implicit serializer: JSONSerializer[T]): JSONSerializer[List[T]] =
46 | new JSONSerializer[List[T]] {
47 | override def toJson(list: List[T]) = s"[${list.map(serializer.toJson).mkString(",")}]"
48 | }
49 |
50 | val personsJson = convert2Json(List(Person("Alice"), Person("Bob")))
51 |
52 | // implicit conversions (not recommended)
53 | case class Cat(name: String) {
54 | def meow(): String = s"$name is meowing"
55 | }
56 |
57 | implicit def string2Cat(name: String): Cat = Cat(name)
58 | val aCat: Cat = "Garfield" // string2Cat("Garfield")
59 | val garfieldMeowing = "Garfield".meow() // string2Cat("Garfield").meow()
60 |
61 | def main(args: Array[String]): Unit = {
62 | println(davidsJson)
63 | println(personsJson)
64 | }
65 | }
66 |
67 | object TypeClassesScala2 {
68 | case class Person(name: String, age: Int)
69 |
70 | // part 1 - Type class definition
71 | trait JSONSerializer[T] {
72 | def toJson(value: T): String
73 | }
74 |
75 | // part 2 - type class instances
76 | implicit object StringSerializer extends JSONSerializer[String] {
77 | override def toJson(value: String) = "\"" + value + "\""
78 | }
79 |
80 | implicit object IntSerializer extends JSONSerializer[Int] {
81 | override def toJson(value: Int) = value.toString
82 | }
83 |
84 | implicit object PersonSerializer extends JSONSerializer[Person] {
85 | override def toJson(value: Person) =
86 | s"""
87 | |{"name" : "${value.name}", "age" : ${value.age}}
88 | |""".stripMargin.trim
89 | }
90 |
91 | // part 3 - offer some API
92 | def convertToJson[T](value: T)(implicit serializer: JSONSerializer[T]): String =
93 | serializer.toJson(value)
94 |
95 | def convertListToJson[T](list: List[T])(implicit serializer: JSONSerializer[T]): String =
96 | list.map(value => serializer.toJson(value)).mkString("[",",","]")
97 |
98 | // part 4 - add extension methods
99 | object JSONSyntax {
100 | implicit class JSONSerializable[T](value: T)(implicit serializer: JSONSerializer[T]) {
101 | def toJson: String = serializer.toJson(value)
102 | }
103 | }
104 |
105 | def main(args: Array[String]): Unit = {
106 | println(convertListToJson(List(Person("Alice", 23), Person("Xavier", 45))))
107 | val bob = Person("Bob", 68)
108 |
109 | import JSONSyntax._
110 | println(bob.toJson)
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part1recap/ContextualAbstractionsScala3.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part1recap
2 |
3 | object ContextualAbstractionsScala3 {
4 |
5 | // given/using combo
6 | def increment(x: Int)(using amount: Int): Int = x + amount
7 | given defaultAmount: Int = 10
8 | val twelve = increment(2) // (10) automatically by the compiler
9 |
10 | def multiply(x: Int)(using factor: Int): Int = x * factor
11 | val aHundred = multiply(10) // defaultAmount is passed automatically
12 |
13 | // more complex use case
14 | trait Combiner[A] {
15 | def combine(x: A, y: A): A
16 | def empty: A
17 | }
18 |
19 | def combineAll[A](values: List[A])(using combiner: Combiner[A]): A =
20 | values.foldLeft(combiner.empty)(combiner.combine)
21 |
22 | given intCombiner: Combiner[Int] with {
23 | override def combine(x: Int, y: Int) = x + y
24 | override def empty = 0
25 | }
26 |
27 | val numbers = (1 to 10).toList
28 | val sum10 = combineAll(numbers) // intCombiner passed automatically
29 |
30 | // synthesize given instances
31 | given optionCombiner[T](using combiner: Combiner[T]): Combiner[Option[T]] with {
32 | override def empty = Some(combiner.empty)
33 | override def combine(x: Option[T], y: Option[T]): Option[T] = for {
34 | vx <- x
35 | vy <- y
36 | } yield combiner.combine(vx, vy)
37 | }
38 |
39 | val sumOptions: Option[Int] = combineAll(List(Some(1), None, Some(2)))
40 |
41 | // extension methods
42 | case class Person(name: String) {
43 | def greet(): String = s"Hi, my name is $name"
44 | }
45 |
46 | extension (name: String)
47 | def greet(): String = Person(name).greet()
48 |
49 | val alicesGreeting = "Alice".greet()
50 |
51 | // generic extension
52 | extension [T](list: List[T])
53 | def reduceAll(using combiner: Combiner[T]): T =
54 | list.foldLeft(combiner.empty)(combiner.combine)
55 |
56 | val sum10_v2 = numbers.reduceAll
57 |
58 |
59 | def main(args: Array[String]): Unit = {
60 |
61 | }
62 | }
63 |
64 | object TypeClassesScala3 {
65 | case class Person(name: String, age: Int)
66 |
67 | // type classes
68 |
69 | // part 1 - Type class definition
70 | trait JSONSerializer[T] {
71 | def toJson(value: T): String
72 | }
73 |
74 | // part 2 - type class instances
75 | given stringSerializer: JSONSerializer[String] with {
76 | override def toJson(value: String) = "\"" + value + "\""
77 | }
78 |
79 | given intSerializer: JSONSerializer[Int] with {
80 | override def toJson(value: Int) = value.toString
81 | }
82 |
83 | given personSerializer: JSONSerializer[Person] with {
84 | override def toJson(person: Person) =
85 | s"""
86 | |{"name": "${person.name}", "age": ${person.age}}
87 | |""".stripMargin.trim
88 | }
89 |
90 | // part 3 - user-facing API
91 | def convert2Json[T](value: T)(using serializer: JSONSerializer[T]): String =
92 | serializer.toJson(value)
93 |
94 | def convertList2Json[T](list: List[T])(using serializer: JSONSerializer[T]): String =
95 | list.map(value => serializer.toJson(value)).mkString("[", ",", "]")
96 |
97 | // part 4 - extension methods just for the types we support
98 | extension [T](value: T)
99 | def toJson(using serializer: JSONSerializer[T]): String =
100 | serializer.toJson(value)
101 |
102 | def main(args: Array[String]): Unit = {
103 | println(convertList2Json(List(Person("Alice", 23), Person("Bob", 46))))
104 | val bob = Person("Bob", 46)
105 | println(bob.toJson)
106 | }
107 | }
108 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part1recap/Essentials.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part1recap
2 |
3 | import java.util.concurrent.Executors
4 | import scala.util.{Failure, Success, Try}
5 | import scala.concurrent.{ExecutionContext, Future}
6 |
7 | object Essentials {
8 |
9 | // values
10 | val aBoolean: Boolean = false
11 |
12 | // expressions are EVALUATED to a value
13 | val anIfExpression = if (2 > 3) "bigger" else "smaller"
14 |
15 | // instructions vs expressions
16 | val theUnit = println("Hello, Scala") // Unit = "void" in other languages
17 |
18 | // OOP
19 | class Animal
20 | class Cat extends Animal
21 | trait Carnivore {
22 | def eat(animal: Animal): Unit
23 | }
24 |
25 | // inheritance model: extend <= 1 class, but inherit from >= 0 traits
26 | class Crocodile extends Animal with Carnivore {
27 | override def eat(animal: Animal): Unit = println("Crunch!")
28 | }
29 |
30 | // singleton
31 | object MySingleton // singleton pattern in one line
32 |
33 | // companions
34 | object Carnivore // companion object of the class Carnivore
35 |
36 | // generics
37 | class MyList[A]
38 |
39 | // method notation
40 | val three = 1 + 2
41 | val anotherThree = 1.+(2)
42 |
43 | // functional programming
44 | val incrementer: Int => Int = x => x + 1
45 | val incremented = incrementer(45) // 46
46 |
47 | // map, flatMap, filter
48 | val processedList = List(1,2,3).map(incrementer) // List(2,3,4)
49 | val aLongerList = List(1,2,3).flatMap(x => List(x, x + 1)) // List(1,2, 2,3, 3,4)
50 |
51 | // for-comprehensions
52 | val checkerboard = List(1,2,3).flatMap(n => List('a', 'b', 'c').map(c => (n, c)))
53 | val anotherCheckerboard = for {
54 | n <- List(1, 2, 3)
55 | c <- List('a', 'b', 'c')
56 | } yield (n, c) // equivalent expression
57 |
58 | // options and try
59 | val anOption: Option[Int] = Option(/* something that might be null */ 3) // Some(3)
60 | val doubledOption: Option[Int] = anOption.map(_ * 2)
61 |
62 | val anAttempt = Try(/* something that might throw */ 42) // Success(42)
63 | val aModifiedAttempt: Try[Int] = anAttempt.map(_ + 10)
64 |
65 | // pattern matching
66 | val anUnknown: Any = 45
67 | val ordinal = anUnknown match {
68 | case 1 => "first"
69 | case 2 => "second"
70 | case _ => "unknown"
71 | }
72 |
73 | val optionDescription: String = anOption match {
74 | case Some(value) => s"the option is not empty: $value"
75 | case None => "the option is empty"
76 | }
77 |
78 | // Futures
79 | implicit val ec: ExecutionContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(8))
80 | val aFuture = Future {
81 | // a bit of code
82 | 42
83 | }
84 |
85 | // wait for completion (async)
86 | aFuture.onComplete {
87 | case Success(value) => println(s"The async meaning of life is $value")
88 | case Failure(exception) => println(s"Meaning of value failed: $exception")
89 | }
90 |
91 | // map a Future
92 | val anotherFuture = aFuture.map(_ + 1) // Future(43) when it completes
93 |
94 | // partial functions
95 | val aPartialFunction: PartialFunction[Int, Int] = {
96 | case 1 => 43
97 | case 8 => 56
98 | case 100 => 999
99 | }
100 |
101 | // some more advanced stuff
102 | trait HigherKindedType[F[_]]
103 | trait SequenceChecker[F[_]] {
104 | def isSequential: Boolean
105 | }
106 |
107 | val listChecker = new SequenceChecker[List] {
108 | override def isSequential = true
109 | }
110 |
111 | def main(args: Array[String]): Unit = {
112 |
113 | }
114 | }
115 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part2effects/Effects.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part2effects
2 |
3 | import scala.concurrent.Future
4 | import scala.io.StdIn
5 |
6 | object Effects {
7 |
8 | // pure functional programming
9 | // substitution
10 | def combine(a: Int, b: Int): Int = a + b
11 | val five = combine(2, 3)
12 | val five_v2 = 2 + 3
13 | val five_v3 = 5
14 |
15 | // referential transparency = can replace an expression with its value
16 | // as many times as we want without changing behavior
17 |
18 | // example: print to the console
19 | val printSomething: Unit = println("Cats Effect")
20 | val printSomething_v2: Unit = () // not the same
21 |
22 | // example: change a variable
23 | var anInt = 0
24 | val changingVar: Unit = (anInt += 1)
25 | val changingVar_v2: Unit = () // not the same
26 |
27 | // side effects are inevitable for useful programs
28 |
29 | /*
30 | Effect types
31 | Properties:
32 | - type signature describes the kind of calculation that will be performed
33 | - type signature describes the VALUE that will be calculated
34 | - when side effects are needed, effect construction is separate from effect execution
35 | */
36 |
37 | /*
38 | example: Option is an effect type
39 | - describes a possibly absent value
40 | - computes a value of type A, if it exists
41 | - side effects are not needed
42 | */
43 | val anOption: Option[Int] = Option(42)
44 |
45 | /*
46 | example: Future is NOT an effect type
47 | - describes an asynchronous computation
48 | - computes a value of type A, if it's successful
49 | - side effect is required (allocating/scheduling a thread), execution is NOT separate from construction
50 | */
51 | import scala.concurrent.ExecutionContext.Implicits.global
52 | val aFuture: Future[Int] = Future(42)
53 |
54 |
55 | /*
56 | example: MyIO data type from the Monads lesson - it IS an effect type
57 | - describes any computation that might produce side effects
58 | - calculates a value of type A, if it's successful
59 | - side effects are required for the evaluation of () => A
60 | - YES, the creation of MyIO does NOT produce the side effects on construction
61 | */
62 | case class MyIO[A](unsafeRun: () => A) {
63 | def map[B](f: A => B): MyIO[B] =
64 | MyIO(() => f(unsafeRun()))
65 |
66 | def flatMap[B](f: A => MyIO[B]): MyIO[B] =
67 | MyIO(() => f(unsafeRun()).unsafeRun())
68 | }
69 |
70 | val anIO: MyIO[Int] = MyIO(() => {
71 | println("I'm writing something...")
72 | 42
73 | })
74 |
75 | /**
76 | * Exercises
77 | * 1. An IO which returns the current time of the system
78 | * 2. An IO which measures the duration of a computation (hint: use ex 1)
79 | * 3. An IO which prints something to the console
80 | * 4. An IO which reads a line (a string) from the std input
81 | */
82 |
83 | // 1
84 | val clock: MyIO[Long] = MyIO(() => System.currentTimeMillis())
85 |
86 | // 2
87 | def measure[A](computation: MyIO[A]): MyIO[Long] = for {
88 | startTime <- clock
89 | _ <- computation
90 | finishTime <- clock
91 | } yield finishTime - startTime
92 |
93 | /*
94 | Deconstruction:
95 |
96 | clock.flatMap(startTime => computation.flatMap(_ => clock.map(finishTime => finishTime - startTime)))
97 |
98 | Part 3:
99 | clock.map(finishTime => finishTime - startTime) = MyIO(() => System.currentTimeMillis() - startTime)
100 | => clock.flatMap(startTime => computation.flatMap(_ => MyIO(() => System.currentTimeMillis() - startTime)))
101 |
102 | Part 2:
103 | computation.flatMap(lambda) = MyIO(() => lambda(___COMP___).unsafeRun())
104 | = MyIO(() => MyIO(() => System.currentTimeMillis() - startTime)).unsafeRun())
105 | = MyIO(() => System.currentTimeMillis_after_computation() - startTime)
106 |
107 | Part 1:
108 | clock.flatMap(startTime => MyIO(() => System.currentTimeMillis_after_computation() - startTime))
109 | = MyIO(() => MyIO(() => System.currentTimeMillis_after_computation() - System.currentTimeMillis()).unsafeRun())
110 | = MyIO(() => System.currentTimeMillis_after_computation() - System.currentTimeMillis_at_start())
111 |
112 | Conclusion:
113 | Deconstructing effects manually is hard. Scala & pure FP free up mental space for us to write complex code quickly.
114 | Cats Effect will simply be a set of tools to do that easily.
115 | */
116 |
117 | def testTimeIO(): Unit = {
118 | val test = measure(MyIO(() => Thread.sleep(1000)))
119 | println(test.unsafeRun())
120 | }
121 |
122 | // 3
123 | def putStrLn(line: String): MyIO[Unit] = MyIO(() => println(line))
124 |
125 | // 4
126 | val read: MyIO[String] = MyIO(() => StdIn.readLine())
127 |
128 | def testConsole(): Unit = {
129 | val program: MyIO[Unit] = for {
130 | line1 <- read
131 | line2 <- read
132 | _ <- putStrLn(line1 + line2)
133 | } yield ()
134 |
135 | program.unsafeRun()
136 | }
137 |
138 | def main(args: Array[String]): Unit = {
139 | testConsole()
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part2effects/IOApps.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part2effects
2 |
3 | import cats.effect.{ExitCode, IO, IOApp}
4 |
5 | import scala.io.StdIn
6 |
7 | object IOApps {
8 | val program = for {
9 | line <- IO(StdIn.readLine())
10 | _ <- IO(println(s"You've just written: $line"))
11 | } yield ()
12 | }
13 |
14 | object TestApp {
15 | import IOApps._
16 |
17 | def main(args: Array[String]): Unit = {
18 | import cats.effect.unsafe.implicits.global
19 | program.unsafeRunSync()
20 | }
21 | }
22 |
23 | object FirstCEApp extends IOApp {
24 | import IOApps._
25 |
26 | override def run(args: List[String]) =
27 | program.as(ExitCode.Success)
28 | }
29 |
30 | object MySimpleApp extends IOApp.Simple {
31 | import IOApps._
32 |
33 | override def run = program
34 | }
35 |
36 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part2effects/IOErrorHandling.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part2effects
2 |
3 | import cats.effect.IO
4 |
5 | import scala.util.{Failure, Success, Try}
6 |
7 | object IOErrorHandling {
8 |
9 | // IO: pure, delay, defer
10 | // create failed effects
11 | val aFailedCompute: IO[Int] = IO.delay(throw new RuntimeException("A FAILURE"))
12 | val aFailure: IO[Int] = IO.raiseError(new RuntimeException("a proper fail"))
13 |
14 | // handle exceptions
15 | val dealWithIt = aFailure.handleErrorWith {
16 | case _: RuntimeException => IO.delay(println("I'm still here"))
17 | // add more cases
18 | }
19 |
20 | // turn into an Either
21 | val effectAsEither: IO[Either[Throwable, Int]] = aFailure.attempt
22 | // redeem: transform the failure and the success in one go
23 | val resultAsString: IO[String] = aFailure.redeem(ex => s"FAIL: $ex", value => s"SUCCESS: $value")
24 | // redeemWith
25 | val resultAsEffect: IO[Unit] = aFailure.redeemWith(ex => IO(println(s"FAIL: $ex")), value => IO(println(s"SUCCESS: $value")))
26 |
27 | /**
28 | * Exercises
29 | */
30 | // 1 - construct potentially failed IOs from standard data types (Option, Try, Either)
31 | def option2IO[A](option: Option[A])(ifEmpty: Throwable): IO[A] =
32 | option match {
33 | case Some(value) => IO.pure(value)
34 | case None => IO.raiseError(ifEmpty)
35 | }
36 |
37 | def try2IO[A](aTry: Try[A]): IO[A] =
38 | aTry match {
39 | case Success(value) => IO.pure(value)
40 | case Failure(ex) => IO.raiseError(ex)
41 | }
42 |
43 | def either2IO[A](anEither: Either[Throwable, A]): IO[A] =
44 | anEither match {
45 | case Left(ex) => IO.raiseError(ex)
46 | case Right(value) => IO.pure(value)
47 | }
48 |
49 | // 2 - handleError, handleErrorWith
50 | def handleIOError[A](io: IO[A])(handler: Throwable => A): IO[A] =
51 | io.redeem(handler, identity)
52 |
53 | def handleIOErrorWith[A](io: IO[A])(handler: Throwable => IO[A]): IO[A] =
54 | io.redeemWith(handler, IO.pure)
55 |
56 | def main(args: Array[String]): Unit = {
57 | import cats.effect.unsafe.implicits.global
58 | resultAsEffect.unsafeRunSync()
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part2effects/IOIntroduction.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part2effects
2 |
3 | import cats.effect.IO
4 |
5 | import scala.io.StdIn
6 |
7 | object IOIntroduction {
8 |
9 | // IO
10 | val ourFirstIO: IO[Int] = IO.pure(42) // arg that should not have side effects
11 | val aDelayedIO: IO[Int] = IO.delay {
12 | println("I'm producing an integer")
13 | 54
14 | }
15 |
16 | val aDelayedIO_v2: IO[Int] = IO { // apply == delay
17 | println("I'm producing an integer")
18 | 54
19 | }
20 |
21 | // map, flatMap
22 | val improvedMeaningOfLife = ourFirstIO.map(_ * 2)
23 | val printedMeaningOfLife = ourFirstIO.flatMap(mol => IO.delay(println(mol)))
24 |
25 | def smallProgram(): IO[Unit] = for {
26 | line1 <- IO(StdIn.readLine())
27 | line2 <- IO(StdIn.readLine())
28 | _ <- IO.delay(println(line1 + line2))
29 | } yield ()
30 |
31 | // mapN - combine IO effects as tuples
32 | import cats.syntax.apply._
33 | val combinedMeaningOfLife: IO[Int] = (ourFirstIO, improvedMeaningOfLife).mapN(_ + _)
34 | def smallProgram_v2(): IO[Unit] =
35 | (IO(StdIn.readLine()), IO(StdIn.readLine())).mapN(_ + _).map(println)
36 |
37 |
38 | /**
39 | * Exercises
40 | */
41 |
42 | // 1 - sequence two IOs and take the result of the LAST one
43 | // hint: use flatMap
44 | def sequenceTakeLast[A, B](ioa: IO[A], iob: IO[B]): IO[B] =
45 | ioa.flatMap(_ => iob)
46 |
47 | def sequenceTakeLast_v2[A, B](ioa: IO[A], iob: IO[B]): IO[B] =
48 | ioa *> iob // "andThen"
49 |
50 | def sequenceTakeLast_v3[A, B](ioa: IO[A], iob: IO[B]): IO[B] =
51 | ioa >> iob // "andThen" with by-name call
52 |
53 | // 2 - sequence two IOs and take the result of the FIRST one
54 | // hint: use flatMap
55 | def sequenceTakeFirst[A, B](ioa: IO[A], iob: IO[B]): IO[A] =
56 | ioa.flatMap(a => iob.map(_ => a))
57 |
58 | def sequenceTakeFirst_v2[A, B](ioa: IO[A], iob: IO[B]): IO[A] =
59 | ioa <* iob
60 |
61 | // 3 - repeat an IO effect forever
62 | // hint: use flatMap + recursion
63 | def forever[A](io: IO[A]): IO[A] =
64 | io.flatMap(_ => forever(io))
65 |
66 | def forever_v2[A](io: IO[A]): IO[A] =
67 | io >> forever_v2(io) // same
68 |
69 | def forever_v3[A](io: IO[A]): IO[A] =
70 | io *> forever_v3(io) // same
71 |
72 | def forever_v4[A](io: IO[A]): IO[A] =
73 | io.foreverM // with tail recursion
74 |
75 | // 4 - convert an IO to a different type
76 | // hint: use map
77 | def convert[A, B](ioa: IO[A], value: B): IO[B] =
78 | ioa.map(_ => value)
79 |
80 | def convert_v2[A, B](ioa: IO[A], value: B): IO[B] =
81 | ioa.as(value) // same
82 |
83 | // 5 - discard value inside an IO, just return Unit
84 | def asUnit[A](ioa: IO[A]): IO[Unit] =
85 | ioa.map(_ => ())
86 |
87 | def asUnit_v2[A](ioa: IO[A]): IO[Unit] =
88 | ioa.as(()) // discouraged - don't use this
89 |
90 | def asUnit_v3[A](ioa: IO[A]): IO[Unit] =
91 | ioa.void // same - encouraged
92 |
93 | // 6 - fix stack recursion
94 | def sum(n: Int): Int =
95 | if (n <= 0) 0
96 | else n + sum(n - 1)
97 |
98 | def sumIO(n: Int): IO[Int] =
99 | if (n <= 0) IO(0)
100 | else for {
101 | lastNumber <- IO(n)
102 | prevSum <- sumIO(n - 1)
103 | } yield prevSum + lastNumber
104 |
105 | // 7 (hard) - write a fibonacci IO that does NOT crash on recursion
106 | // hints: use recursion, ignore exponential complexity, use flatMap heavily
107 | def fibonacci(n: Int): IO[BigInt] =
108 | if (n < 2) IO(1)
109 | else for {
110 | last <- IO.defer(fibonacci(n - 1)) // same as .delay(...).flatten
111 | prev <- IO.defer(fibonacci(n - 2))
112 | } yield last + prev
113 |
114 | def main(args: Array[String]): Unit = {
115 | import cats.effect.unsafe.implicits.global // "platform"
116 | // "end of the world"
117 | (1 to 100).foreach(i => println(fibonacci(i).unsafeRunSync()))
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part2effects/IOParallelism.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part2effects
2 |
3 | import cats.Parallel
4 | import cats.effect.{IO, IOApp}
5 |
6 | object IOParallelism extends IOApp.Simple {
7 |
8 | // IOs are usually sequential
9 | val aniIO = IO(s"[${Thread.currentThread().getName}] Ani")
10 | val kamranIO = IO(s"[${Thread.currentThread().getName}] Kamran")
11 |
12 | val composedIO = for {
13 | ani <- aniIO
14 | kamran <- kamranIO
15 | } yield s"$ani and $kamran love Rock the JVM"
16 |
17 | // debug extension method
18 | import com.rockthejvm.utils._
19 | // mapN extension method
20 | import cats.syntax.apply._
21 | val meaningOfLife: IO[Int] = IO.delay(42)
22 | val favLang: IO[String] = IO.delay("Scala")
23 | val goalInLife = (meaningOfLife.debug, favLang.debug).mapN((num, string) => s"my goal in life is $num and $string")
24 |
25 | // parallelism on IOs
26 | // convert a sequential IO to parallel IO
27 | val parIO1: IO.Par[Int] = Parallel[IO].parallel(meaningOfLife.debug)
28 | val parIO2: IO.Par[String] = Parallel[IO].parallel(favLang.debug)
29 | import cats.effect.implicits._
30 | val goalInLifeParallel: IO.Par[String] = (parIO1, parIO2).mapN((num, string) => s"my goal in life is $num and $string")
31 | // turn back to sequential
32 | val goalInLife_v2: IO[String] = Parallel[IO].sequential(goalInLifeParallel)
33 |
34 | // shorthand:
35 | import cats.syntax.parallel._
36 | val goalInLife_v3: IO[String] = (meaningOfLife.debug, favLang.debug).parMapN((num, string) => s"my goal in life is $num and $string")
37 |
38 | // regarding failure:
39 | val aFailure: IO[String] = IO.raiseError(new RuntimeException("I can't do this!"))
40 | // compose success + failure
41 | val parallelWithFailure = (meaningOfLife.debug, aFailure.debug).parMapN((num, string) => s"$num $string")
42 | // compose failure + failure
43 | val anotherFailure: IO[String] = IO.raiseError(new RuntimeException("Second failure"))
44 | val twoFailures: IO[String] = (aFailure.debug, anotherFailure.debug).parMapN(_ + _)
45 | // the first effect to fail gives the failure of the result
46 | val twoFailuresDelayed: IO[String] = (IO(Thread.sleep(1000)) >> aFailure.debug, anotherFailure.debug).parMapN(_ + _)
47 |
48 |
49 | override def run: IO[Unit] =
50 | twoFailuresDelayed.debug.void
51 | }
52 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part2effects/IOTraversal.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part2effects
2 |
3 | import cats.effect.{IO, IOApp}
4 |
5 | import scala.concurrent.Future
6 | import scala.util.Random
7 |
8 | object IOTraversal extends IOApp.Simple {
9 |
10 | import scala.concurrent.ExecutionContext.Implicits.global
11 |
12 | def heavyComputation(string: String): Future[Int] = Future {
13 | Thread.sleep(Random.nextInt(1000))
14 | string.split(" ").length
15 | }
16 |
17 | val workLoad: List[String] = List("I quite like CE", "Scala is great", "looking forward to some awesome stuff")
18 |
19 | def clunkyFutures(): Unit = {
20 | val futures: List[Future[Int]] = workLoad.map(heavyComputation)
21 | // Future[List[Int]] would be hard to obtain
22 | futures.foreach(_.foreach(println))
23 | }
24 |
25 | import cats.Traverse
26 | import cats.instances.list._
27 | val listTraverse = Traverse[List]
28 |
29 | def traverseFutures(): Unit = {
30 | // traverse
31 | val singleFuture: Future[List[Int]] = listTraverse.traverse(workLoad)(heavyComputation)
32 | // ^^ this stores ALL the results
33 | singleFuture.foreach(println)
34 | }
35 |
36 | import com.rockthejvm.utils._
37 |
38 | // traverse for IO
39 | def computeAsIO(string: String): IO[Int] = IO {
40 | Thread.sleep(Random.nextInt(1000))
41 | string.split(" ").length
42 | }.debug
43 |
44 | val ios: List[IO[Int]] = workLoad.map(computeAsIO)
45 | val singleIO: IO[List[Int]] = listTraverse.traverse(workLoad)(computeAsIO)
46 |
47 | // parallel traversal
48 | import cats.syntax.parallel._ // parTraverse extension method
49 | val parallelSingleIO: IO[List[Int]] = workLoad.parTraverse(computeAsIO)
50 |
51 | /**
52 | * Exercises
53 | */
54 | // hint: use Traverse API
55 | def sequence[A](listOfIOs: List[IO[A]]): IO[List[A]] =
56 | listTraverse.traverse(listOfIOs)(x => x)
57 |
58 | // hard version
59 | def sequence_v2[F[_] : Traverse, A](wrapperOfIOs: F[IO[A]]): IO[F[A]] =
60 | Traverse[F].traverse(wrapperOfIOs)(x => x)
61 |
62 | // parallel version
63 | def parSequence[A](listOfIOs: List[IO[A]]): IO[List[A]] =
64 | listOfIOs.parTraverse(x => x)
65 |
66 | // hard version
67 | def parSequence_v2[F[_] : Traverse, A](wrapperOfIOs: F[IO[A]]): IO[F[A]] =
68 | wrapperOfIOs.parTraverse(x => x)
69 |
70 | // existing sequence API
71 | val singleIO_v2: IO[List[Int]] = listTraverse.sequence(ios)
72 |
73 | // parallel sequencing
74 | val parallelSingleIO_v2: IO[List[Int]] = parSequence(ios) // from the exercise
75 | val parallelSingleIO_v3: IO[List[Int]] = ios.parSequence // extension method from the Parallel syntax package
76 |
77 | override def run =
78 | parallelSingleIO_v3.map(_.sum).debug.void
79 | }
80 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part3concurrency/AsyncIOs.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part3concurrency
2 |
3 | import cats.effect.{IO, IOApp}
4 |
5 | import java.util.concurrent.Executors
6 | import scala.concurrent.{ExecutionContext, Future}
7 | import scala.util.Try
8 | import com.rockthejvm.utils._
9 |
10 | object AsyncIOs extends IOApp.Simple {
11 |
12 | // IOs can run asynchronously on fibers, without having to manually manage the fiber lifecycle
13 | val threadPool = Executors.newFixedThreadPool(8)
14 | implicit val ec: ExecutionContext = ExecutionContext.fromExecutorService(threadPool)
15 | type Callback[A] = Either[Throwable, A] => Unit
16 |
17 | def computeMeaningOfLife(): Int = {
18 | Thread.sleep(1000)
19 | println(s"[${Thread.currentThread().getName}] computing the meaning of life on some other thread...")
20 | 42
21 | }
22 |
23 | def computeMeaningOfLifeEither(): Either[Throwable, Int] = Try {
24 | computeMeaningOfLife()
25 | }.toEither
26 |
27 | def computeMolOnThreadPool(): Unit =
28 | threadPool.execute(() => computeMeaningOfLife())
29 |
30 | // lift computation to an IO
31 | // async is a FFI
32 | val asyncMolIO: IO[Int] = IO.async_ { (cb: Callback[Int]) => // CE thread blocks (semantically) until this cb is invoked (by some other thread)
33 | threadPool.execute { () => // computation not managed by CE
34 | val result = computeMeaningOfLifeEither()
35 | cb(result) // CE thread is notified with the result
36 | }
37 | }
38 |
39 | /**
40 | * Exercise: lift an async computation on ec to an IO.
41 | */
42 | def asyncToIO[A](computation: () => A)(ec: ExecutionContext): IO[A] =
43 | IO.async_[A] { (cb: Callback[A]) =>
44 | ec.execute { () =>
45 | val result = Try(computation()).toEither
46 | cb(result)
47 | }
48 | }
49 |
50 | val asyncMolIO_v2: IO[Int] = asyncToIO(computeMeaningOfLife)(ec)
51 |
52 | /**
53 | * Exercise: lift an async computation as a Future, to an IO.
54 | */
55 | def convertFutureToIO[A](future: => Future[A]): IO[A] =
56 | IO.async_ { (cb: Callback[A]) =>
57 | future.onComplete { tryResult =>
58 | val result = tryResult.toEither
59 | cb(result)
60 | }
61 | }
62 |
63 | lazy val molFuture: Future[Int] = Future { computeMeaningOfLife() }
64 | val asyncMolIO_v3: IO[Int] = convertFutureToIO(molFuture)
65 | val asyncMolIO_v4: IO[Int] = IO.fromFuture(IO(molFuture))
66 |
67 | /**
68 | * Exercise: a never-ending IO?
69 | */
70 | val neverEndingIO: IO[Int] = IO.async_[Int](_ => ()) // no callback, no finish
71 | val neverEndingIO_v2: IO[Int] = IO.never
72 |
73 | import scala.concurrent.duration._
74 |
75 | /*
76 | FULL ASYNC Call
77 | */
78 | def demoAsyncCancellation() = {
79 | val asyncMeaningOfLifeIO_v2: IO[Int] = IO.async { (cb: Callback[Int]) =>
80 | /*
81 | finalizer in case computation gets cancelled.
82 | finalizers are of type IO[Unit]
83 | not specifying finalizer => Option[IO[Unit]]
84 | creating option is an effect => IO[Option[IO[Unit]]]
85 | */
86 | // return IO[Option[IO[Unit]]]
87 | IO {
88 | threadPool.execute { () =>
89 | val result = computeMeaningOfLifeEither()
90 | cb(result)
91 | }
92 | }.as(Some(IO("Cancelled!").debug.void))
93 | }
94 |
95 | for {
96 | fib <- asyncMeaningOfLifeIO_v2.start
97 | _ <- IO.sleep(500.millis) >> IO("cancelling...").debug >> fib.cancel
98 | _ <- fib.join
99 | } yield ()
100 | }
101 |
102 |
103 | override def run = demoAsyncCancellation().debug >> IO(threadPool.shutdown())
104 | }
105 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part3concurrency/BlockingIOs.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part3concurrency
2 |
3 | import cats.effect.{IO, IOApp}
4 | import com.rockthejvm.utils.*
5 |
6 | import java.util.concurrent.Executors
7 | import scala.concurrent.ExecutionContext
8 | import scala.concurrent.duration.*
9 |
10 | object BlockingIOs extends IOApp.Simple {
11 |
12 | val someSleeps = for {
13 | _ <- IO.sleep(1.second).debug // SEMANTIC BLOCKING - no threads are actually blocked, CE assigns this thread to some other fiber
14 | _ <- IO.sleep(1.second).debug
15 | } yield ()
16 |
17 | // really blocking IOs
18 | val aBlockingIO = IO.blocking {
19 | Thread.sleep(1000)
20 | println(s"[${Thread.currentThread().getName}] computed a blocking code")
21 | 42
22 | } // will evaluate on a thread from ANOTHER thread pool specific for blocking calls
23 |
24 | // yielding
25 | val iosOnManyThreads = for {
26 | _ <- IO("first").debug
27 | _ <- IO.cede // a signal to yield control over the thread - equivalent to IO.shift from CE2
28 | _ <- IO("second").debug // the rest of this effect may run on another thread (not necessarily)
29 | _ <- IO.cede
30 | _ <- IO("third").debug
31 | } yield ()
32 |
33 | def testThousandEffectsSwitch() = {
34 | val ec: ExecutionContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(8))
35 | (1 to 1000).map(IO.pure).reduce(_.debug >> IO.cede >> _.debug).evalOn(ec)
36 | }
37 |
38 | /*
39 | Blocking calls & IO.sleep and yield control over the calling thread automatically.
40 | */
41 |
42 | override def run = testThousandEffectsSwitch().void
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part3concurrency/CancellingIOs.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part3concurrency
2 |
3 | import cats.effect.{IO, IOApp}
4 | import scala.concurrent.duration._
5 |
6 | object CancellingIOs extends IOApp.Simple {
7 |
8 | import com.rockthejvm.utils._
9 |
10 | /*
11 | Cancelling IOs
12 | - fib.cancel
13 | - IO.race & other APIs
14 | - manual cancellation
15 | */
16 | val chainOfIOs: IO[Int] = IO("waiting").debug >> IO.canceled >> IO(42).debug
17 |
18 | // uncancelable
19 | // example: online store, payment processor
20 | // payment process must NOT be canceled
21 | val specialPaymentSystem = (
22 | IO("Payment running, don't cancel me...").debug >>
23 | IO.sleep(1.second) >>
24 | IO("Payment completed.").debug
25 | ).onCancel(IO("MEGA CANCEL OF DOOM!").debug.void)
26 |
27 | val cancellationOfDoom = for {
28 | fib <- specialPaymentSystem.start
29 | _ <- IO.sleep(500.millis) >> fib.cancel
30 | _ <- fib.join
31 | } yield ()
32 |
33 | val atomicPayment = IO.uncancelable(_ => specialPaymentSystem) // "masking"
34 | val atomicPayment_v2 = specialPaymentSystem.uncancelable // same
35 |
36 | val noCancellationOfDoom = for {
37 | fib <- atomicPayment.start
38 | _ <- IO.sleep(500.millis) >> IO("attempting cancellation...").debug >> fib.cancel
39 | _ <- fib.join
40 | } yield ()
41 |
42 | /*
43 | The uncancelable API is more complex and more general.
44 | It takes a function from Poll[IO] to IO. In the example above, we aren't using that Poll instance.
45 | The Poll object can be used to mark sections within the returned effect which CAN BE CANCELED.
46 | */
47 |
48 | /*
49 | Example: authentication service. Has two parts:
50 | - input password, can be cancelled, because otherwise we might block indefinitely on user input
51 | - verify password, CANNOT be cancelled once it's started
52 | */
53 | val inputPassword = IO("Input password:").debug >> IO("(typing password)").debug >> IO.sleep(2.seconds) >> IO("RockTheJVM1!")
54 | val verifyPassword = (pw: String) => IO("verifying...").debug >> IO.sleep(2.seconds) >> IO(pw == "RockTheJVM1!")
55 |
56 | val authFlow: IO[Unit] = IO.uncancelable { poll =>
57 | for {
58 | pw <- poll(inputPassword).onCancel(IO("Authentication timed out. Try again later.").debug.void) // this is cancelable
59 | verified <- verifyPassword(pw) // this is NOT cancelable
60 | _ <- if (verified) IO("Authentication successful.").debug // this is NOT cancelable
61 | else IO("Authentication failed.").debug
62 | } yield ()
63 | }
64 |
65 | val authProgram = for {
66 | authFib <- authFlow.start
67 | _ <- IO.sleep(3.seconds) >> IO("Authentication timeout, attempting cancel...").debug >> authFib.cancel
68 | _ <- authFib.join
69 | } yield ()
70 |
71 | /*
72 | Uncancelable calls are MASKS which suppress cancellation.
73 | Poll calls are "gaps opened" in the uncancelable region.
74 | */
75 |
76 | /**
77 | * Exercises: what do you think the following effects will do?
78 | * 1. Anticipate
79 | * 2. Run to see if you're correct
80 | * 3. Prove your theory
81 | */
82 | // 1
83 | val cancelBeforeMol = IO.canceled >> IO(42).debug
84 | val uncancelableMol = IO.uncancelable(_ => IO.canceled >> IO(42).debug)
85 | // uncancelable will eliminate ALL cancel points
86 |
87 | // 2
88 | val invincibleAuthProgram = for {
89 | authFib <- IO.uncancelable(_ => authFlow).start
90 | _ <- IO.sleep(1.seconds) >> IO("Authentication timeout, attempting cancel...").debug >> authFib.cancel
91 | _ <- authFib.join
92 | } yield ()
93 | /*
94 | Lesson: Uncancelable calls are masks which suppress all existing cancelable gaps (including from a previous uncancelable).
95 | */
96 |
97 | // 3
98 | def threeStepProgram(): IO[Unit] = {
99 | val sequence = IO.uncancelable { poll =>
100 | poll(IO("cancelable").debug >> IO.sleep(1.second) >> IO("cancelable end").debug) >>
101 | IO("uncancelable").debug >> IO.sleep(1.second) >> IO("uncancelable end").debug >>
102 | poll(IO("second cancelable").debug >> IO.sleep(1.second) >> IO("second cancelable end").debug)
103 | }
104 |
105 | for {
106 | fib <- sequence.start
107 | _ <- IO.sleep(1500.millis) >> IO("CANCELING").debug >> fib.cancel
108 | _ <- fib.join
109 | } yield ()
110 | }
111 | /*
112 | Lesson: Uncancelable regions ignore cancellation signals, but that doesn't mean the next CANCELABLE region won't take them.
113 | */
114 |
115 |
116 | override def run = threeStepProgram()
117 | }
118 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part3concurrency/Fibers.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part3concurrency
2 |
3 | import cats.effect.kernel.Outcome.{Canceled, Errored, Succeeded}
4 | import cats.effect.{Fiber, IO, IOApp, Outcome}
5 | import scala.concurrent.duration._
6 |
7 | object Fibers extends IOApp.Simple {
8 |
9 | val meaningOfLife = IO.pure(42)
10 | val favLang = IO.pure("Scala")
11 |
12 | import com.rockthejvm.utils._
13 |
14 | def sameThreadIOs() = for {
15 | _ <- meaningOfLife.debug
16 | _ <- favLang.debug
17 | } yield ()
18 |
19 | // introducing Fiber: a data structure describing an effect running on some thread
20 | def createFiber: Fiber[IO, Throwable, String] = ??? // almost impossible to create fibers manually
21 |
22 | // the fiber is not actually started, but the fiber allocation is wrapped in another effect
23 | val aFiber: IO[Fiber[IO, Throwable, Int]] = meaningOfLife.debug.start
24 |
25 | def differentThreadIOs() = for {
26 | _ <- aFiber
27 | _ <- favLang.debug
28 | } yield ()
29 |
30 | // joining a fiber
31 | def runOnSomeOtherThread[A](io: IO[A]): IO[Outcome[IO, Throwable, A]] = for {
32 | fib <- io.start
33 | result <- fib.join // an effect which waits for the fiber to terminate
34 | } yield result
35 | /*
36 | possible outcomes:
37 | - success with an IO
38 | - failure with an exception
39 | - cancelled
40 | */
41 |
42 | val someIOOnAnotherThread = runOnSomeOtherThread(meaningOfLife)
43 | val someResultFromAnotherThread = someIOOnAnotherThread.flatMap {
44 | case Succeeded(effect) => effect
45 | case Errored(e) => IO(0)
46 | case Canceled() => IO(0)
47 | }
48 |
49 | def throwOnAnotherThread() = for {
50 | fib <- IO.raiseError[Int](new RuntimeException("no number for you")).start
51 | result <- fib.join
52 | } yield result
53 |
54 | def testCancel() = {
55 | val task = IO("starting").debug >> IO.sleep(1.second) >> IO("done").debug
56 | // onCancel is a "finalizer", allowing you to free up resources in case you get canceled
57 | val taskWithCancellationHandler = task.onCancel(IO("I'm being cancelled!").debug.void)
58 |
59 | for {
60 | fib <- taskWithCancellationHandler.start // on a separate thread
61 | _ <- IO.sleep(500.millis) >> IO("cancelling").debug // running on the calling thread
62 | _ <- fib.cancel
63 | result <- fib.join
64 | } yield result
65 | }
66 |
67 |
68 | /**
69 | * Exercises:
70 | * 1. Write a function that runs an IO on another thread, and, depending on the result of the fiber
71 | * - return the result in an IO
72 | * - if errored or cancelled, return a failed IO
73 | *
74 | * 2. Write a function that takes two IOs, runs them on different fibers and returns an IO with a tuple containing both results.
75 | * - if both IOs complete successfully, tuple their results
76 | * - if the first IO returns an error, raise that error (ignoring the second IO's result/error)
77 | * - if the first IO doesn't error but second IO returns an error, raise that error
78 | * - if one (or both) canceled, raise a RuntimeException
79 | *
80 | * 3. Write a function that adds a timeout to an IO:
81 | * - IO runs on a fiber
82 | * - if the timeout duration passes, then the fiber is canceled
83 | * - the method returns an IO[A] which contains
84 | * - the original value if the computation is successful before the timeout signal
85 | * - the exception if the computation is failed before the timeout signal
86 | * - a RuntimeException if it times out (i.e. cancelled by the timeout)
87 | */
88 | // 1
89 | def processResultsFromFiber[A](io: IO[A]): IO[A] = {
90 | val ioResult = for {
91 | fib <- io.debug.start
92 | result <- fib.join
93 | } yield result
94 |
95 | ioResult.flatMap {
96 | case Succeeded(fa) => fa
97 | case Errored(e) => IO.raiseError(e)
98 | case Canceled() => IO.raiseError(new RuntimeException("Computation canceled."))
99 | }
100 | }
101 |
102 | def testEx1() = {
103 | val aComputation = IO("starting").debug >> IO.sleep(1.second) >> IO("done!").debug >> IO(42)
104 | processResultsFromFiber(aComputation).void
105 | }
106 |
107 | // 2
108 | def tupleIOs[A, B](ioa: IO[A], iob: IO[B]): IO[(A, B)] = {
109 | val result = for {
110 | fiba <- ioa.start
111 | fibb <- iob.start
112 | resulta <- fiba.join
113 | resultb <- fibb.join
114 | } yield (resulta, resultb)
115 |
116 | result.flatMap {
117 | case (Succeeded(fa), Succeeded(fb)) => for {
118 | a <- fa
119 | b <- fb
120 | } yield (a, b)
121 | case (Errored(e), _) => IO.raiseError(e)
122 | case (_, Errored(e)) => IO.raiseError(e)
123 | case _ => IO.raiseError(new RuntimeException("Some computation canceled."))
124 | }
125 | }
126 |
127 | def testEx2() = {
128 | val firstIO = IO.sleep(2.seconds) >> IO(1).debug
129 | val secondIO = IO.sleep(3.seconds) >> IO(2).debug
130 | tupleIOs(firstIO, secondIO).debug.void
131 | }
132 |
133 | // 3
134 | def timeout[A](io: IO[A], duration: FiniteDuration): IO[A] = {
135 | val computation = for {
136 | fib <- io.start
137 | _ <- (IO.sleep(duration) >> fib.cancel).start // careful - fibers can leak
138 | result <- fib.join
139 | } yield result
140 |
141 | computation.flatMap {
142 | case Succeeded(fa) => fa
143 | case Errored(e) => IO.raiseError(e)
144 | case Canceled() => IO.raiseError(new RuntimeException("Computation canceled."))
145 | }
146 | }
147 |
148 | def testEx3() = {
149 | val aComputation = IO("starting").debug >> IO.sleep(1.second) >> IO("done!").debug >> IO(42)
150 | timeout(aComputation, 500.millis).debug.void
151 | }
152 |
153 | override def run = testEx3()
154 | }
155 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part3concurrency/RacingIOs.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part3concurrency
2 |
3 | import cats.effect.kernel.Outcome
4 | import cats.effect.kernel.Outcome.{Canceled, Errored, Succeeded}
5 | import cats.effect.{Fiber, IO, IOApp}
6 |
7 | import scala.concurrent.duration.{FiniteDuration, *}
8 |
9 | object RacingIOs extends IOApp.Simple {
10 |
11 | import com.rockthejvm.utils.*
12 |
13 | def runWithSleep[A](value: A, duration: FiniteDuration): IO[A] =
14 | (
15 | IO(s"starting computation: $value").debug >>
16 | IO.sleep(duration) >>
17 | IO(s"computation for $value: done") >>
18 | IO(value)
19 | ).onCancel(IO(s"computation CANCELED for $value").debug.void)
20 |
21 | def testRace() = {
22 | val meaningOfLife = runWithSleep(42, 1.second)
23 | val favLang = runWithSleep("Scala", 2.seconds)
24 | val first: IO[Either[Int, String]] = IO.race(meaningOfLife, favLang)
25 | /*
26 | - both IOs run on separate fibers
27 | - the first one to finish will complete the result
28 | - the loser will be canceled
29 | */
30 |
31 | first.flatMap {
32 | case Left(mol) => IO(s"Meaning of life won: $mol")
33 | case Right(lang) => IO(s"Fav language won: $lang")
34 | }
35 | }
36 |
37 | def testRacePair() = {
38 | val meaningOfLife = runWithSleep(42, 1.second)
39 | val favLang = runWithSleep("Scala", 2.seconds)
40 | val raceResult: IO[Either[
41 | (Outcome[IO, Throwable, Int], Fiber[IO, Throwable, String]), // (winner result, loser fiber)
42 | (Fiber[IO, Throwable, Int], Outcome[IO, Throwable, String]) // (loser fiber, winner result)
43 | ]] = IO.racePair(meaningOfLife, favLang)
44 |
45 | raceResult.flatMap {
46 | case Left((outMol, fibLang)) => fibLang.cancel >> IO("MOL won").debug >> IO(outMol).debug
47 | case Right((fibMol, outLang)) => fibMol.cancel >> IO("Language won").debug >> IO(outLang).debug
48 | }
49 | }
50 |
51 | /**
52 | * Exercises:
53 | * 1 - implement a timeout pattern with race
54 | * 2 - a method to return a LOSING effect from a race (hint: use racePair)
55 | * 3 - implement race in terms of racePair
56 | */
57 | // 1
58 | def timeout[A](io: IO[A], duration: FiniteDuration): IO[A] = {
59 | val timeoutEffect = IO.sleep(duration)
60 | val result = IO.race(io, timeoutEffect)
61 |
62 | result.flatMap {
63 | case Left(v) => IO(v)
64 | case Right(_) => IO.raiseError(new RuntimeException("Computation timed out."))
65 | }
66 | }
67 |
68 | val importantTask = IO.sleep(2.seconds) >> IO(42).debug
69 | val testTimeout = timeout(importantTask, 1.seconds)
70 | val testTimeout_v2 = importantTask.timeout(1.seconds)
71 |
72 | // 2
73 | def unrace[A, B](ioa: IO[A], iob: IO[B]): IO[Either[A, B]] =
74 | IO.racePair(ioa, iob).flatMap {
75 | case Left((_, fibB)) => fibB.join.flatMap {
76 | case Succeeded(resultEffect) => resultEffect.map(result => Right(result))
77 | case Errored(e) => IO.raiseError(e)
78 | case Canceled() => IO.raiseError(new RuntimeException("Loser canceled."))
79 | }
80 | case Right((fibA, _)) => fibA.join.flatMap {
81 | case Succeeded(resultEffect) => resultEffect.map(result => Left(result))
82 | case Errored(e) => IO.raiseError(e)
83 | case Canceled() => IO.raiseError(new RuntimeException("Loser canceled."))
84 | }
85 | }
86 |
87 | // 3
88 | def simpleRace[A, B](ioa: IO[A], iob: IO[B]): IO[Either[A, B]] =
89 | IO.racePair(ioa, iob).flatMap {
90 | case Left((outA, fibB)) => outA match {
91 | case Succeeded(effectA) => fibB.cancel >> effectA.map(a => Left(a))
92 | case Errored(e) => fibB.cancel >> IO.raiseError(e)
93 | case Canceled() => fibB.join.flatMap {
94 | case Succeeded(effectB) => effectB.map(b => Right(b))
95 | case Errored(e) => IO.raiseError(e)
96 | case Canceled() => IO.raiseError(new RuntimeException("Both computations canceled."))
97 | }
98 | }
99 | case Right((fibA, outB)) => outB match {
100 | case Succeeded(effectB) => fibA.cancel >> effectB.map(b => Right(b))
101 | case Errored(e) => fibA.cancel >> IO.raiseError(e)
102 | case Canceled() => fibA.join.flatMap {
103 | case Succeeded(effectA) => effectA.map(a => Left(a))
104 | case Errored(e) => IO.raiseError(e)
105 | case Canceled() => IO.raiseError(new RuntimeException("Both computations canceled."))
106 | }
107 | }
108 | }
109 |
110 | override def run = testRace().debug.void
111 | }
112 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part3concurrency/Resources.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part3concurrency
2 |
3 | import cats.effect.kernel.Outcome.{Canceled, Errored, Succeeded}
4 | import cats.effect.{IO, IOApp, Resource}
5 |
6 | import java.io.{File, FileReader}
7 | import java.util.Scanner
8 | import scala.concurrent.duration._
9 |
10 |
11 | object Resources extends IOApp.Simple {
12 |
13 | import com.rockthejvm.utils._
14 |
15 | // use-case: manage a connection lifecycle
16 | class Connection(url: String) {
17 | def open(): IO[String] = IO(s"opening connection to $url").debug
18 | def close(): IO[String] = IO(s"closing connection to $url").debug
19 | }
20 |
21 | val asyncFetchUrl = for {
22 | fib <- (new Connection("rockthejvm.com").open() *> IO.sleep((Int.MaxValue).seconds)).start
23 | _ <- IO.sleep(1.second) *> fib.cancel
24 | } yield ()
25 | // problem: leaking resources
26 |
27 | val correctAsyncFetchUrl = for {
28 | conn <- IO(new Connection("rockthejvm.com"))
29 | fib <- (conn.open() *> IO.sleep((Int.MaxValue).seconds)).onCancel(conn.close().void).start
30 | _ <- IO.sleep(1.second) *> fib.cancel
31 | } yield ()
32 |
33 | /*
34 | bracket pattern: someIO.bracket(useResourceCb)(releaseResourceCb)
35 | bracket is equivalent to try-catches (pure FP)
36 | */
37 | val bracketFetchUrl = IO(new Connection("rockthejvm.com"))
38 | .bracket(conn => conn.open() *> IO.sleep(Int.MaxValue.seconds))(conn => conn.close().void)
39 |
40 | val bracketProgram = for {
41 | fib <- bracketFetchUrl.start
42 | _ <- IO.sleep(1.second) *> fib.cancel
43 | } yield ()
44 |
45 | /**
46 | * Exercise: read the file with the bracket pattern
47 | * - open a scanner
48 | * - read the file line by line, every 100 millis
49 | * - close the scanner
50 | * - if cancelled/throws error, close the scanner
51 | */
52 | def openFileScanner(path: String): IO[Scanner] =
53 | IO(new Scanner(new FileReader(new File(path))))
54 |
55 | def readLineByLine(scanner: Scanner): IO[Unit] =
56 | if (scanner.hasNextLine) IO(scanner.nextLine()).debug >> IO.sleep(100.millis) >> readLineByLine(scanner)
57 | else IO.unit
58 |
59 | def bracketReadFile(path: String): IO[Unit] =
60 | IO(s"opening file at $path") >>
61 | openFileScanner(path).bracket { scanner =>
62 | readLineByLine(scanner)
63 | } { scanner =>
64 | IO(s"closing file at $path").debug >> IO(scanner.close())
65 | }
66 |
67 | /**
68 | * Resources
69 | */
70 | def connFromConfig(path: String): IO[Unit] =
71 | openFileScanner(path)
72 | .bracket { scanner =>
73 | // acquire a connection based on the file
74 | IO(new Connection(scanner.nextLine())).bracket { conn =>
75 | conn.open() >> IO.never
76 | }(conn => conn.close().void)
77 | }(scanner => IO("closing file").debug >> IO(scanner.close()))
78 | // nesting resources are tedious
79 |
80 | val connectionResource = Resource.make(IO(new Connection("rockthejvm.com")))(conn => conn.close().void)
81 | // ... at a later part of your code
82 |
83 | val resourceFetchUrl = for {
84 | fib <- connectionResource.use(conn => conn.open() >> IO.never).start
85 | _ <- IO.sleep(1.second) >> fib.cancel
86 | } yield ()
87 |
88 | // resources are equivalent to brackets
89 | val simpleResource = IO("some resource")
90 | val usingResource: String => IO[String] = string => IO(s"using the string: $string").debug
91 | val releaseResource: String => IO[Unit] = string => IO(s"finalizing the string: $string").debug.void
92 |
93 | val usingResourceWithBracket = simpleResource.bracket(usingResource)(releaseResource)
94 | val usingResourceWithResource = Resource.make(simpleResource)(releaseResource).use(usingResource)
95 |
96 | /**
97 | * Exercise: read a text file with one line every 100 millis, using Resource
98 | * (refactor the bracket exercise to use Resource)
99 | */
100 | def getResourceFromFile(path: String) = Resource.make(openFileScanner(path)) { scanner =>
101 | IO(s"closing file at $path").debug >> IO(scanner.close())
102 | }
103 |
104 | def resourceReadFile(path: String) =
105 | IO(s"opening file at $path") >>
106 | getResourceFromFile(path).use { scanner =>
107 | readLineByLine(scanner)
108 | }
109 |
110 | def cancelReadFile(path: String) = for {
111 | fib <- resourceReadFile(path).start
112 | _ <- IO.sleep(2.seconds) >> fib.cancel
113 | } yield ()
114 |
115 | // nested resources
116 | def connFromConfResource(path: String) =
117 | Resource.make(IO("opening file").debug >> openFileScanner(path))(scanner => IO("closing file").debug >> IO(scanner.close()))
118 | .flatMap(scanner => Resource.make(IO(new Connection(scanner.nextLine())))(conn => conn.close().void))
119 |
120 | // equivalent
121 | def connFromConfResourceClean(path: String) = for {
122 | scanner <- Resource.make(IO("opening file").debug >> openFileScanner(path))(scanner => IO("closing file").debug >> IO(scanner.close()))
123 | conn <- Resource.make(IO(new Connection(scanner.nextLine())))(conn => conn.close().void)
124 | } yield conn
125 |
126 | val openConnection = connFromConfResourceClean("cats-effect/src/main/resources/connection.txt").use(conn => conn.open() >> IO.never)
127 | val canceledConnection = for {
128 | fib <- openConnection.start
129 | _ <- IO.sleep(1.second) >> IO("cancelling!").debug >> fib.cancel
130 | } yield ()
131 |
132 | // connection + file will close automatically
133 |
134 | // finalizers to regular IOs
135 | val ioWithFinalizer = IO("some resource").debug.guarantee(IO("freeing resource").debug.void)
136 | val ioWithFinalizer_v2 = IO("some resource").debug.guaranteeCase {
137 | case Succeeded(fa) => fa.flatMap(result => IO(s"releasing resource: $result").debug).void
138 | case Errored(e) => IO("nothing to release").debug.void
139 | case Canceled() => IO("resource got canceled, releasing what's left").debug.void
140 | }
141 |
142 |
143 | override def run = ioWithFinalizer.void
144 | }
145 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part4coordination/CountdownLatches.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part4coordination
2 |
3 | import cats.effect
4 | import cats.effect.kernel.Deferred
5 | import cats.effect.std.CountDownLatch
6 | import cats.effect.{IO, IOApp, Ref, Resource}
7 |
8 | import scala.concurrent.duration._
9 | import com.rockthejvm.utils._
10 | import cats.syntax.parallel._
11 | import cats.syntax.traverse._
12 |
13 | import java.io.{File, FileWriter}
14 | import scala.io.Source
15 | import scala.util.Random
16 |
17 | object CountdownLatches extends IOApp.Simple {
18 |
19 | /*
20 | CDLatches are a coordination primitive initialized with a count.
21 | All fibers calling await() on the CDLatch are (semantically) blocked.
22 | When the internal count of the latch reaches 0 (via release() calls from other fibers), all waiting fibers are unblocked.
23 | */
24 |
25 | def announcer(latch: CountDownLatch[IO]): IO[Unit] = for {
26 | _ <- IO("Starting race shortly...").debug >> IO.sleep(2.seconds)
27 | _ <- IO("5...").debug >> IO.sleep(1.second)
28 | _ <- latch.release
29 | _ <- IO("4...").debug >> IO.sleep(1.second)
30 | _ <- latch.release
31 | _ <- IO("3...").debug >> IO.sleep(1.second)
32 | _ <- latch.release
33 | _ <- IO("2...").debug >> IO.sleep(1.second)
34 | _ <- latch.release
35 | _ <- IO("1...").debug >> IO.sleep(1.second)
36 | _ <- latch.release // gun firing
37 | _ <- IO("GO GO GO!").debug
38 | } yield ()
39 |
40 | def createRunner(id: Int, latch: CountDownLatch[IO]): IO[Unit] = for {
41 | _ <- IO(s"[runner $id] waiting for signal...").debug
42 | _ <- latch.await // block this fiber until the count reaches 0
43 | _ <- IO(s"[runner $id] RUNNING!").debug
44 | } yield ()
45 |
46 | def sprint(): IO[Unit] = for {
47 | latch <- CountDownLatch[IO](5)
48 | announcerFib <- announcer(latch).start
49 | _ <- (1 to 10).toList.parTraverse(id => createRunner(id, latch))
50 | _ <- announcerFib.join
51 | } yield ()
52 |
53 |
54 | /**
55 | * Exercise: simulate a file downloader on multiple threads
56 | */
57 | object FileServer {
58 | val fileChunksList = Array(
59 | "I love Scala.",
60 | "Cats Effect seems quite fun.",
61 | "Never would I have thought I would do low-level concurrency WITH pure FP."
62 | )
63 |
64 | def getNumChunks: IO[Int] = IO(fileChunksList.length)
65 | def getFileChunk(n: Int): IO[String] = IO(fileChunksList(n))
66 | }
67 |
68 | def writeToFile(path: String, contents: String): IO[Unit] = {
69 | val fileResource = Resource.make(IO(new FileWriter(new File(path))))(writer => IO(writer.close()))
70 | fileResource.use { writer =>
71 | IO(writer.write(contents))
72 | }
73 | }
74 |
75 | def appendFileContents(fromPath: String, toPath: String): IO[Unit] = {
76 | val compositeResource = for {
77 | reader <- Resource.make(IO(Source.fromFile(fromPath)))(source => IO(source.close()))
78 | writer <- Resource.make(IO(new FileWriter(new File(toPath), true)))(writer => IO(writer.close()))
79 | } yield (reader, writer)
80 |
81 | compositeResource.use {
82 | case (reader, writer) => IO(reader.getLines().foreach(writer.write))
83 | }
84 | }
85 |
86 | def createFileDownloaderTask(id: Int, latch: CDLatch, filename: String, destFolder: String): IO[Unit] = for {
87 | _ <- IO(s"[task $id] downloading chunk...").debug
88 | _ <- IO.sleep((Random.nextDouble * 1000).toInt.millis)
89 | chunk <- FileServer.getFileChunk(id)
90 | _ <- writeToFile(s"$destFolder/$filename.part$id", chunk)
91 | _ <- IO(s"[task $id] chunk download complete").debug
92 | _ <- latch.release
93 | } yield ()
94 |
95 | /*
96 | - call file server API and get the number of chunks (n)
97 | - start a CDLatch
98 | - start n fibers which download a chunk of the file (use the file server's download chunk API)
99 | - block on the latch until each task has finished
100 | - after all chunks are done, stitch the files together under the same file on disk
101 | */
102 | def downloadFile(filename: String, destFolder: String): IO[Unit] = for {
103 | n <- FileServer.getNumChunks
104 | latch <- CDLatch(n)
105 | _ <- IO(s"Download started on $n fibers.").debug
106 | _ <- (0 until n).toList.parTraverse(id => createFileDownloaderTask(id, latch, filename, destFolder))
107 | _ <- latch.await
108 | _ <- (0 until n).toList.traverse(id => appendFileContents(s"$destFolder/$filename.part$id", s"$destFolder/$filename"))
109 | } yield ()
110 |
111 | override def run = downloadFile("myScalafile.txt", "cats-effect/src/main/resources")
112 | }
113 |
114 | /**
115 | * Exercise: implement your own CDLatch with Ref and Deferred.
116 | */
117 |
118 | abstract class CDLatch {
119 | def await: IO[Unit]
120 | def release: IO[Unit]
121 | }
122 |
123 | object CDLatch {
124 | sealed trait State
125 | case object Done extends State
126 | case class Live(remainingCount: Int, signal: Deferred[IO, Unit]) extends State
127 |
128 | def apply(count: Int): IO[CDLatch] = for {
129 | signal <- Deferred[IO, Unit]
130 | state <- Ref[IO].of[State](Live(count, signal))
131 | } yield new CDLatch {
132 |
133 | override def await = state.get.flatMap { s =>
134 | if (s == Done) IO.unit // continue, the latch is dead
135 | else signal.get // block here
136 | }
137 |
138 | override def release = state.modify {
139 | case Done => Done -> IO.unit
140 | case Live(1, signal) => Done -> signal.complete(()).void
141 | case Live(n, signal) => Live(n - 1, signal) -> IO.unit
142 | }.flatten.uncancelable
143 | }
144 | }
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part4coordination/CyclicBarriers.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part4coordination
2 |
3 | import cats.effect.kernel.{Deferred, Ref}
4 | import cats.effect.{IO, IOApp}
5 | import cats.effect.std.CyclicBarrier
6 |
7 | import scala.util.Random
8 | import scala.concurrent.duration._
9 | import com.rockthejvm.utils._
10 | import cats.syntax.parallel._
11 |
12 | object CyclicBarriers extends IOApp.Simple {
13 |
14 | /*
15 | A cyclic barrier is a coordination primitive that
16 | - is initialized with a count
17 | - has a single API: await
18 |
19 | A cyclic barrier will (semantically) block all fibers calling its await() method until we have exactly N fibers waiting,
20 | at which point the barrier will unblock all fibers and reset to its original state.
21 | Any further fiber will again block until we have exactly N fibers waiting.
22 | ...
23 | And so on.
24 | */
25 |
26 | // example: signing up for a social network just about to be launched
27 | def createUser(id: Int, barrier: CBarrier): IO[Unit] = for {
28 | _ <- IO.sleep((Random.nextDouble * 500).toInt.millis)
29 | _ <- IO(s"[user $id] Just heard there's a new social network - signing up for the waitlist...").debug
30 | _ <- IO.sleep((Random.nextDouble * 1500).toInt.millis)
31 | _ <- IO(s"[user $id] On the waitlist now, can't wait!").debug
32 | _ <- barrier.await // block the fiber when there are exactly N users waiting
33 | _ <- IO(s"[user $id] OMG this is so cool!").debug
34 | } yield ()
35 |
36 | def openNetwork(): IO[Unit] = for {
37 | _ <- IO("[announcer] The Rock the JVM social network is up for registration! Launching when we have 10 users!").debug
38 | barrier <- CBarrier(10)
39 | _ <- (1 to 20).toList.parTraverse(id => createUser(id, barrier))
40 | } yield ()
41 |
42 | /**
43 | * Exercise: Implement your own CB with Ref + Deferred. Ignore cancellation effects.
44 | * Test: use your CBarrier instead of CyclicBarrier[IO].
45 | */
46 |
47 | override def run = openNetwork()
48 | }
49 |
50 | abstract class CBarrier {
51 | def await: IO[Unit]
52 | }
53 |
54 | object CBarrier {
55 | case class State(nWaiting: Int, signal: Deferred[IO, Unit])
56 |
57 | def apply(count: Int): IO[CBarrier] = for {
58 | signal <- Deferred[IO, Unit]
59 | state <- Ref[IO].of(State(count, signal))
60 | } yield new CBarrier {
61 | override def await = Deferred[IO, Unit].flatMap { newSignal =>
62 | state.modify {
63 | case State(1, signal) => State(count, newSignal) -> signal.complete(()).void
64 | case State(n, signal) => State(n - 1, signal) -> signal.get
65 | }.flatten
66 | }
67 | }
68 | }
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part4coordination/Defers.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part4coordination
2 |
3 | import cats.effect.kernel.Outcome
4 | import cats.effect.{Deferred, Fiber, IO, IOApp, Ref}
5 | import com.rockthejvm.utils._
6 |
7 | import scala.concurrent.duration._
8 | import cats.syntax.traverse._
9 |
10 | object Defers extends IOApp.Simple {
11 |
12 | // deferred is a primitive for waiting for an effect, while some other effect completes with a value
13 |
14 | val aDeferred: IO[Deferred[IO, Int]] = Deferred[IO, Int]
15 | val aDeferred_v2: IO[Deferred[IO, Int]] = IO.deferred[Int] // same
16 |
17 | // get blocks the calling fiber (semantically) until some other fiber completes the Deferred with a value
18 | val reader: IO[Int] = aDeferred.flatMap { signal =>
19 | signal.get // blocks the fiber
20 | }
21 |
22 | val writer = aDeferred.flatMap { signal =>
23 | signal.complete(42)
24 | }
25 |
26 | def demoDeferred(): IO[Unit] = {
27 | def consumer(signal: Deferred[IO, Int]) = for {
28 | _ <- IO("[consumer] waiting for result...").debug
29 | meaningOfLife <- signal.get // blocker
30 | _ <- IO(s"[consumer] got the result: $meaningOfLife").debug
31 | } yield ()
32 |
33 | def producer(signal: Deferred[IO, Int]) = for {
34 | _ <- IO("[producer] crunching numbers...").debug
35 | _ <- IO.sleep(1.second)
36 | _ <- IO("[producer] complete: 42").debug
37 | meaningOfLife <- IO(42)
38 | _ <- signal.complete(meaningOfLife)
39 | } yield ()
40 |
41 | for {
42 | signal <- Deferred[IO, Int]
43 | fibConsumer <- consumer(signal).start
44 | fibProducer <- producer(signal).start
45 | _ <- fibProducer.join
46 | _ <- fibConsumer.join
47 | } yield ()
48 | }
49 |
50 | // simulate downloading some content
51 | val fileParts = List("I ", "love S", "cala", " with Cat", "s Effect!")
52 |
53 | def fileNotifierWithRef(): IO[Unit] = {
54 | def downloadFile(contentRef: Ref[IO, String]): IO[Unit] =
55 | fileParts
56 | .map { part =>
57 | IO(s"[downloader] got '$part'").debug >> IO.sleep(1.second) >> contentRef.update(currentContent => currentContent + part)
58 | }
59 | .sequence
60 | .void
61 |
62 | def notifyFileComplete(contentRef: Ref[IO, String]): IO[Unit] = for {
63 | file <- contentRef.get
64 | _ <- if (file.endsWith("")) IO("[notifier] File download complete").debug
65 | else IO("[notifier] downloading...").debug >> IO.sleep(500.millis) >> notifyFileComplete(contentRef) // busy wait!
66 | } yield ()
67 |
68 | for {
69 | contentRef <- Ref[IO].of("")
70 | fibDownloader <- downloadFile(contentRef).start
71 | notifier <- notifyFileComplete(contentRef).start
72 | _ <- fibDownloader.join
73 | _ <- notifier.join
74 | } yield ()
75 | }
76 |
77 | // deferred works miracles for waiting
78 | def fileNotifierWithDeferred(): IO[Unit] = {
79 | def notifyFileComplete(signal: Deferred[IO, String]): IO[Unit] = for {
80 | _ <- IO("[notifier] downloading...").debug
81 | _ <- signal.get // blocks until the signal is completed
82 | _ <- IO("[notifier] File download complete").debug
83 | } yield ()
84 |
85 | def downloadFilePart(part: String, contentRef: Ref[IO, String], signal: Deferred[IO, String]): IO[Unit] = for {
86 | _ <- IO(s"[downloader] got '$part'").debug
87 | _ <- IO.sleep(1.second)
88 | latestContent <- contentRef.updateAndGet(currentContent => currentContent + part)
89 | _ <- if (latestContent.contains("")) signal.complete(latestContent) else IO.unit
90 | } yield ()
91 |
92 | for {
93 | contentRef <- Ref[IO].of("")
94 | signal <- Deferred[IO, String]
95 | notifierFib <- notifyFileComplete(signal).start
96 | fileTasksFib <- fileParts.map(part => downloadFilePart(part, contentRef, signal)).sequence.start
97 | _ <- notifierFib.join
98 | _ <- fileTasksFib.join
99 | } yield ()
100 | }
101 |
102 | /**
103 | * Exercises:
104 | * - (medium) write a small alarm notification with two simultaneous IOs
105 | * - one that increments a counter every second (a clock)
106 | * - one that waits for the counter to become 10, then prints a message "time's up!"
107 | *
108 | * - (mega hard) implement racePair with Deferred.
109 | * - use a Deferred which can hold an Either[outcome for ioa, outcome for iob]
110 | * - start two fibers, one for each IO
111 | * - on completion (with any status), each IO needs to complete that Deferred
112 | * (hint: use a finalizer from the Resources lesson)
113 | * (hint2: use a guarantee call to make sure the fibers complete the Deferred)
114 | * - what do you do in case of cancellation (the hardest part)?
115 | */
116 | // 1
117 | def eggBoiler(): IO[Unit] = {
118 | def eggReadyNotification(signal: Deferred[IO, Unit]) = for {
119 | _ <- IO("Egg boiling on some other fiber, waiting...").debug
120 | _ <- signal.get
121 | _ <- IO("EGG READY!").debug
122 | } yield ()
123 |
124 | def tickingClock(counter: Ref[IO, Int], signal: Deferred[IO, Unit]): IO[Unit] = for {
125 | _ <- IO.sleep(1.second)
126 | count <- counter.updateAndGet(_ + 1)
127 | _ <- IO(count).debug
128 | _ <- if (count >= 10) signal.complete(()) else tickingClock(counter, signal)
129 | } yield ()
130 |
131 | for {
132 | counter <- Ref[IO].of(0)
133 | signal <- Deferred[IO, Unit]
134 | notificationFib <- eggReadyNotification(signal).start
135 | clock <- tickingClock(counter, signal).start
136 | _ <- notificationFib.join
137 | _ <- clock.join
138 | } yield ()
139 | }
140 |
141 | type RaceResult[A, B] = Either[
142 | (Outcome[IO, Throwable, A], Fiber[IO, Throwable, B]), // (winner result, loser fiber)
143 | (Fiber[IO, Throwable, A], Outcome[IO, Throwable, B]) // (loser fiber, winner result)
144 | ]
145 |
146 | type EitherOutcome[A, B] = Either[Outcome[IO, Throwable, A], Outcome[IO, Throwable, B]]
147 |
148 | def ourRacePair[A, B](ioa: IO[A], iob: IO[B]): IO[RaceResult[A, B]] = IO.uncancelable { poll =>
149 | for {
150 | signal <- Deferred[IO, EitherOutcome[A, B]]
151 | fiba <- ioa.guaranteeCase(outcomeA => signal.complete(Left(outcomeA)).void).start
152 | fibb <- iob.guaranteeCase(outcomeB => signal.complete(Right(outcomeB)).void).start
153 | result <- poll(signal.get).onCancel { // blocking call - should be cancelable
154 | for {
155 | cancelFibA <- fiba.cancel.start
156 | cancelFibB <- fibb.cancel.start
157 | _ <- cancelFibA.join
158 | _ <- cancelFibB.join
159 | } yield ()
160 | }
161 | } yield result match {
162 | case Left(outcomeA) => Left((outcomeA, fibb))
163 | case Right(outcomeB) => Right((fiba, outcomeB))
164 | }
165 | }
166 |
167 |
168 | override def run = eggBoiler()
169 | }
170 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part4coordination/Mutex.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part4coordination
2 |
3 | import cats.effect
4 | import cats.effect.kernel.Deferred
5 | import cats.effect.kernel.Outcome.{Canceled, Errored, Succeeded}
6 | import cats.effect.{IO, IOApp, Ref, Concurrent}
7 |
8 | import scala.util.Random
9 | import scala.concurrent.duration._
10 | import com.rockthejvm.utils._
11 | import cats.syntax.parallel._
12 |
13 | import scala.collection.immutable.Queue
14 |
15 | import cats.syntax.flatMap._
16 | import cats.syntax.functor._
17 | import cats.effect.syntax.monadCancel._
18 |
19 | abstract class Mutex {
20 | def acquire: IO[Unit]
21 | def release: IO[Unit]
22 | }
23 |
24 | object Mutex {
25 | type Signal = Deferred[IO, Unit]
26 | case class State(locked: Boolean, waiting: Queue[Signal])
27 | val unlocked = State(locked = false, Queue())
28 |
29 | def createSignal(): IO[Signal] = Deferred[IO, Unit]
30 |
31 | def create: IO[Mutex] = Ref[IO].of(unlocked).map(createMutexWithCancellation)
32 |
33 | def createMutexWithCancellation(state: Ref[IO, State]): Mutex =
34 | new Mutex {
35 | override def acquire = IO.uncancelable { poll =>
36 | createSignal().flatMap { signal =>
37 |
38 | val cleanup = state.modify {
39 | case State(locked, queue) =>
40 | val newQueue = queue.filterNot(_ eq signal)
41 | State(locked, newQueue) -> release
42 | }.flatten
43 |
44 | state.modify {
45 | case State(false, _) => State(locked = true, Queue()) -> IO.unit
46 | case State(true, queue) => State(locked = true, queue.enqueue(signal)) -> poll(signal.get).onCancel(cleanup)
47 | }.flatten // modify returns IO[B], our B is IO[Unit], so modify returns IO[IO[Unit]], we need to flatten
48 | }
49 | }
50 |
51 | override def release = state.modify {
52 | case State(false, _) => unlocked -> IO.unit
53 | case State(true, queue) =>
54 | if (queue.isEmpty) unlocked -> IO.unit
55 | else {
56 | val (signal, rest) = queue.dequeue
57 | State(locked = true, rest) -> signal.complete(()).void
58 | }
59 | }.flatten
60 | }
61 |
62 | def createSimpleMutex(state: Ref[IO, State]): Mutex = new Mutex {
63 | /*
64 | Change the state of the Ref:
65 | - if the mutex is currently unlocked, state becomes (true, [])
66 | - if the mutex is locked, state becomes (true, queue + new signal) AND WAIT ON THAT SIGNAL.
67 | */
68 | override def acquire = createSignal().flatMap { signal =>
69 | state.modify {
70 | case State(false, _) => State(locked = true, Queue()) -> IO.unit
71 | case State(true, queue) => State(locked = true, queue.enqueue(signal)) -> signal.get
72 | }.flatten // modify returns IO[B], our B is IO[Unit], so modify returns IO[IO[Unit]], we need to flatten
73 | }
74 | /*
75 | Change the state of the Ref:
76 | - if the mutex is unlocked, leave the state unchanged
77 | - if the mutex is locked,
78 | - if the queue is empty, unlock the mutex, i.e. state becomes (false, [])
79 | - if the queue is not empty, take a signal out of the queue and complete it (thereby unblocking a fiber waiting on it)
80 | */
81 | override def release = state.modify {
82 | case State(false, _) => unlocked -> IO.unit
83 | case State(true, queue) =>
84 | if (queue.isEmpty) unlocked -> IO.unit
85 | else {
86 | val (signal, rest) = queue.dequeue
87 | State(locked = true, rest) -> signal.complete(()).void
88 | }
89 | }.flatten
90 | }
91 | }
92 |
93 |
94 | // generic mutex after the polymorphic concurrent exercise
95 | abstract class MutexV2[F[_]] {
96 | def acquire: F[Unit]
97 | def release: F[Unit]
98 | }
99 |
100 | object MutexV2 {
101 | type Signal[F[_]] = Deferred[F, Unit]
102 | case class State[F[_]](locked: Boolean, waiting: Queue[Signal[F]])
103 |
104 | def unlocked[F[_]] = State[F](locked = false, Queue())
105 | def createSignal[F[_]](using concurrent: Concurrent[F]): F[Signal[F]] = concurrent.deferred[Unit]
106 |
107 | def create[F[_]](using concurrent: Concurrent[F]): F[MutexV2[F]] =
108 | concurrent.ref(unlocked).map(initialState => createMutexWithCancellation(initialState))
109 |
110 | def createMutexWithCancellation[F[_]](state: Ref[F, State[F]])(using concurrent: Concurrent[F]): MutexV2[F] =
111 | new MutexV2[F] {
112 | override def acquire = concurrent.uncancelable { poll =>
113 | createSignal.flatMap { signal =>
114 |
115 | val cleanup = state.modify {
116 | case State(locked, queue) =>
117 | val newQueue = queue.filterNot(_ eq signal)
118 | State(locked, newQueue) -> release
119 | }.flatten
120 |
121 | state.modify {
122 | case State(false, _) => State[F](locked = true, Queue()) -> concurrent.unit
123 | case State(true, queue) => State[F](locked = true, queue.enqueue(signal)) -> poll(signal.get).onCancel(cleanup)
124 | }.flatten
125 | }
126 | }
127 |
128 | override def release = state.modify {
129 | case State(false, _) => unlocked[F] -> concurrent.unit
130 | case State(true, queue) =>
131 | if (queue.isEmpty) unlocked[F] -> concurrent.unit
132 | else {
133 | val (signal, rest) = queue.dequeue
134 | State[F](locked = true, rest) -> signal.complete(()).void
135 | }
136 | }.flatten
137 | }
138 |
139 | }
140 |
141 | object MutexPlayground extends IOApp.Simple {
142 |
143 | def criticalTask(): IO[Int] = IO.sleep(1.second) >> IO(Random.nextInt(100))
144 |
145 | def createNonLockingTask(id: Int): IO[Int] = for {
146 | _ <- IO(s"[task $id] working...").debug
147 | res <- criticalTask()
148 | _ <- IO(s"[task $id] got result: $res").debug
149 | } yield res
150 |
151 | def demoNonLockingTasks(): IO[List[Int]] = (1 to 10).toList.parTraverse(id => createNonLockingTask(id))
152 |
153 | def createLockingTask(id: Int, mutex: MutexV2[IO]): IO[Int] = for {
154 | _ <- IO(s"[task $id] waiting for permission...").debug
155 | _ <- mutex.acquire // blocks if the mutex has been acquired by some other fiber
156 | // critical section
157 | _ <- IO(s"[task $id] working...").debug
158 | res <- criticalTask()
159 | _ <- IO(s"[task $id] got result: $res").debug
160 | // critical section end
161 | _ <- mutex.release
162 | _ <- IO(s"[task $id] lock removed.").debug
163 | } yield res
164 |
165 | def demoLockingTasks() = for {
166 | mutex <- MutexV2.create[IO]
167 | results <- (1 to 10).toList.parTraverse(id => createLockingTask(id, mutex))
168 | } yield results
169 | // only one task will proceed at one time
170 |
171 | def createCancellingTask(id: Int, mutex: MutexV2[IO]): IO[Int] = {
172 | if (id % 2 == 0) createLockingTask(id, mutex)
173 | else for {
174 | fib <- createLockingTask(id, mutex).onCancel(IO(s"[task $id] received cancellation!").debug.void).start
175 | _ <- IO.sleep(2.seconds) >> fib.cancel
176 | out <- fib.join
177 | result <- out match {
178 | case Succeeded(effect) => effect
179 | case Errored(_) => IO(-1)
180 | case Canceled() => IO(-2)
181 | }
182 | } yield result
183 | }
184 |
185 | def demoCancellingTasks() = for {
186 | mutex <- MutexV2.create[IO]
187 | results <- (1 to 10).toList.parTraverse(id => createCancellingTask(id, mutex))
188 | } yield results
189 |
190 | override def run = demoCancellingTasks().debug.void
191 | }
192 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part4coordination/Refs.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part4coordination
2 |
3 | import cats.effect.{IO, IOApp, Ref}
4 | import com.rockthejvm.utils._
5 |
6 | import javax.sound.midi.SysexMessage
7 | import scala.concurrent.duration._
8 |
9 | object Refs extends IOApp.Simple {
10 |
11 | // ref = purely functional atomic reference
12 | val atomicMol: IO[Ref[IO, Int]] = Ref[IO].of(42)
13 | val atomicMol_v2: IO[Ref[IO, Int]] = IO.ref(42)
14 |
15 | // modifying is an effect
16 | val increasedMol: IO[Unit] = atomicMol.flatMap { ref =>
17 | ref.set(43) // thread-safe
18 | }
19 |
20 | // obtain a value
21 | val mol = atomicMol.flatMap { ref =>
22 | ref.get // thread-safe
23 | }
24 |
25 | val gsMol: IO[Int] = atomicMol.flatMap { ref =>
26 | ref.getAndSet(43)
27 | } // gets the old value, sets the new one
28 |
29 | // updating with a function
30 | val fMol: IO[Unit] = atomicMol.flatMap { ref =>
31 | ref.update(value => value * 10)
32 | }
33 |
34 | val updatedMol: IO[Int] = atomicMol.flatMap { ref =>
35 | ref.updateAndGet(value => value * 10) // get the new value
36 | // can also use getAndUpdate to get the OLD value
37 | }
38 |
39 | // modifying with a function returning a different type
40 | val modifiedMol: IO[String] = atomicMol.flatMap { ref =>
41 | ref.modify(value => (value * 10, s"my current value is $value"))
42 | }
43 |
44 | // why: concurrent + thread-safe reads/writes over shared values, in a purely functional way
45 |
46 | import cats.syntax.parallel._
47 | def demoConcurrentWorkImpure(): IO[Unit] = {
48 | var count = 0
49 |
50 | def task(workload: String): IO[Unit] = {
51 | val wordCount = workload.split(" ").length
52 | for {
53 | _ <- IO(s"Counting words for '$workload': $wordCount'").debug
54 | newCount <- IO(count + wordCount)
55 | _ <- IO(s"New total: $newCount").debug
56 | _ <- IO(count += wordCount)
57 | } yield ()
58 | }
59 |
60 | List("I love Cats Effect", "This ref thing is useless", "Daniel writes a lot of code")
61 | .map(task)
62 | .parSequence
63 | .void
64 | }
65 | /*
66 | Drawbacks:
67 | - hard to read/debug
68 | - mix pure/impure code
69 | - NOT THREAD SAFE
70 | */
71 |
72 | def demoConcurrentWorkPure(): IO[Unit] = {
73 | def task(workload: String, total: Ref[IO, Int]): IO[Unit] = {
74 | val wordCount = workload.split(" ").length
75 |
76 | for {
77 | _ <- IO(s"Counting words for '$workload': $wordCount'").debug
78 | newCount <- total.updateAndGet(currentCount => currentCount + wordCount)
79 | _ <- IO(s"New total: $newCount").debug
80 | } yield ()
81 | }
82 |
83 | for {
84 | initialCount <- Ref[IO].of(0)
85 | _ <- List("I love Cats Effect", "This ref thing is useless", "Daniel writes a lot of code")
86 | .map(string => task(string, initialCount))
87 | .parSequence
88 | } yield ()
89 | }
90 |
91 | /**
92 | * Exercise
93 | */
94 | def tickingClockImpure(): IO[Unit] = {
95 | var ticks: Long = 0L
96 | def tickingClock: IO[Unit] = for {
97 | _ <- IO.sleep(1.second)
98 | _ <- IO(System.currentTimeMillis()).debug
99 | _ <- IO(ticks += 1) // not thread safe
100 | _ <- tickingClock
101 | } yield ()
102 |
103 | def printTicks: IO[Unit] = for {
104 | _ <- IO.sleep(5.seconds)
105 | _ <- IO(s"TICKS: $ticks").debug
106 | _ <- printTicks
107 | } yield ()
108 |
109 | for {
110 | _ <- (tickingClock, printTicks).parTupled
111 | } yield ()
112 | }
113 |
114 | def tickingClockPure(): IO[Unit] = {
115 | def tickingClock(ticks: Ref[IO, Int]): IO[Unit] = for {
116 | _ <- IO.sleep(1.second)
117 | _ <- IO(System.currentTimeMillis()).debug
118 | _ <- ticks.update(_ + 1) // thread safe effect
119 | _ <- tickingClock(ticks)
120 | } yield ()
121 |
122 | def printTicks(ticks: Ref[IO, Int]): IO[Unit] = for {
123 | _ <- IO.sleep(5.seconds)
124 | t <- ticks.get
125 | _ <- IO(s"TICKS: $t").debug
126 | _ <- printTicks(ticks)
127 | } yield ()
128 |
129 | for {
130 | tickRef <- Ref[IO].of(0)
131 | _ <- (tickingClock(tickRef), printTicks(tickRef)).parTupled
132 | } yield ()
133 | }
134 |
135 | def tickingClockWeird(): IO[Unit] = {
136 | val ticks = Ref[IO].of(0) // IO[ref]
137 |
138 | def tickingClock: IO[Unit] = for {
139 | t <- ticks // ticks will give you a NEW Ref
140 | _ <- IO.sleep(1.second)
141 | _ <- IO(System.currentTimeMillis()).debug
142 | _ <- t.update(_ + 1) // thread safe effect
143 | _ <- tickingClock
144 | } yield ()
145 |
146 | def printTicks: IO[Unit] = for {
147 | t <- ticks // ticks will give you a NEW Ref
148 | _ <- IO.sleep(5.seconds)
149 | currentTicks <- t.get
150 | _ <- IO(s"TICKS: $currentTicks").debug
151 | _ <- printTicks
152 | } yield ()
153 |
154 | for {
155 | _ <- (tickingClock, printTicks).parTupled
156 | } yield ()
157 | }
158 |
159 | override def run = tickingClockWeird()
160 | }
161 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part4coordination/Semaphores.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part4coordination
2 |
3 | import cats.effect.std.Semaphore
4 | import cats.effect.{IO, IOApp}
5 | import cats.syntax.parallel.*
6 | import com.rockthejvm.utils.*
7 |
8 | import scala.concurrent.duration.*
9 | import scala.util.Random
10 |
11 | object Semaphores extends IOApp.Simple {
12 |
13 | val semaphore: IO[Semaphore[IO]] = Semaphore[IO](2) // 2 total permits
14 |
15 | // example: limiting the number of concurrent sessions on a server
16 | def doWorkWhileLoggedIn(): IO[Int] = IO.sleep(1.second) >> IO(Random.nextInt(100))
17 |
18 | def login(id: Int, sem: Semaphore[IO]): IO[Int] = for {
19 | _ <- IO(s"[session $id] waiting to log in...").debug
20 | _ <- sem.acquire
21 | // critical section
22 | _ <- IO(s"[session $id] logged in, working...").debug
23 | res <- doWorkWhileLoggedIn()
24 | _ <- IO(s"[session $id] done: $res, logging out...").debug
25 | // end of critical section
26 | _ <- sem.release
27 | } yield res
28 |
29 | def demoSemaphore() = for {
30 | sem <- Semaphore[IO](2)
31 | user1Fib <- login(1, sem).start
32 | user2Fib <- login(2, sem).start
33 | user3Fib <- login(3, sem).start
34 | _ <- user1Fib.join
35 | _ <- user2Fib.join
36 | _ <- user3Fib.join
37 | } yield ()
38 |
39 | def weightedLogin(id: Int, requiredPermits: Int, sem: Semaphore[IO]): IO[Int] = for {
40 | _ <- IO(s"[session $id] waiting to log in...").debug
41 | _ <- sem.acquireN(requiredPermits)
42 | // critical section
43 | _ <- IO(s"[session $id] logged in, working...").debug
44 | res <- doWorkWhileLoggedIn()
45 | _ <- IO(s"[session $id] done: $res, logging out...").debug
46 | // end of critical section
47 | _ <- sem.releaseN(requiredPermits)
48 | } yield res
49 |
50 | def demoWeightedSemaphore() = for {
51 | sem <- Semaphore[IO](2)
52 | user1Fib <- weightedLogin(1, 1, sem).start
53 | user2Fib <- weightedLogin(2, 2, sem).start
54 | user3Fib <- weightedLogin(3, 3, sem).start
55 | _ <- user1Fib.join
56 | _ <- user2Fib.join
57 | _ <- user3Fib.join
58 | } yield ()
59 |
60 | /**
61 | * Exercise:
62 | * 1. find out if there's something wrong with this code
63 | * 2. why
64 | * 3. fix it
65 | */
66 | // Semaphore with 1 permit == mutex
67 | val mutex = Semaphore[IO](1)
68 | val users: IO[List[Int]] = (1 to 10).toList.parTraverse { id =>
69 | for {
70 | sem <- mutex
71 | _ <- IO(s"[session $id] waiting to log in...").debug
72 | _ <- sem.acquire
73 | // critical section
74 | _ <- IO(s"[session $id] logged in, working...").debug
75 | res <- doWorkWhileLoggedIn()
76 | _ <- IO(s"[session $id] done: $res, logging out...").debug
77 | // end of critical section
78 | _ <- sem.release
79 | } yield res
80 | }
81 |
82 | // 1
83 | // expected: all tasks start at the same time, only one can work at one time
84 | // reality: all tasks are parallel
85 |
86 | // 2
87 | // mistake: we flatMap Semaphore[IO](1) so we create a new semaphore every time
88 |
89 | // 3
90 | val usersFixed: IO[List[Int]] = mutex.flatMap { sem =>
91 | (1 to 10).toList.parTraverse { id =>
92 | for {
93 | _ <- IO(s"[session $id] waiting to log in...").debug
94 | _ <- sem.acquire
95 | // critical section
96 | _ <- IO(s"[session $id] logged in, working...").debug
97 | res <- doWorkWhileLoggedIn()
98 | _ <- IO(s"[session $id] done: $res, logging out...").debug
99 | // end of critical section
100 | _ <- sem.release
101 | } yield res
102 | }
103 | }
104 |
105 |
106 | override def run = usersFixed.debug.void
107 | }
108 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part5polymorphic/PolymorphicAsync.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part5polymorphic
2 |
3 | import cats.effect.*
4 | import com.rockthejvm.utils.general.*
5 |
6 | import java.util.concurrent.{ConcurrentMap, Executors}
7 | import scala.concurrent.ExecutionContext
8 |
9 | object PolymorphicAsync extends IOApp.Simple {
10 |
11 | // Async - asynchronous computations, "suspended" in F
12 | trait MyAsync[F[_]] extends Sync[F] with Temporal[F] {
13 | // fundamental description of async computations
14 | def executionContext: F[ExecutionContext]
15 | def async[A](cb: (Either[Throwable, A] => Unit) => F[Option[F[Unit]]]): F[A]
16 | def evalOn[A](fa: F[A], ec: ExecutionContext): F[A]
17 |
18 | def async_[A](cb: (Either[Throwable, A] => Unit) => Unit): F[A] =
19 | async(kb => map(pure(cb(kb)))(_ => None))
20 | def never[A]: F[A] = async_(_ => ())
21 | }
22 |
23 | val asyncIO = Async[IO] // given/implicit Async[IO]
24 |
25 | // pure, map/flatMap, raiseError, uncancelable, start, ref/deferred, sleep, delay/defer/blocking, +
26 | val ec = asyncIO.executionContext
27 |
28 | // power: async_ + async: FFI
29 | val threadPool = Executors.newFixedThreadPool(10)
30 | type Callback[A] = Either[Throwable, A] => Unit
31 | val asyncMeaningOfLife: IO[Int] = IO.async_ { (cb: Callback[Int]) =>
32 | // start computation on some other thread pool
33 | threadPool.execute { () =>
34 | println(s"[${Thread.currentThread().getName}] Computing an async MOL")
35 | cb(Right(42))
36 | }
37 | }
38 |
39 | val asyncMeaningOfLife_v2: IO[Int] = asyncIO.async_ { (cb: Callback[Int]) =>
40 | // start computation on some other thread pool
41 | threadPool.execute { () =>
42 | println(s"[${Thread.currentThread().getName}] Computing an async MOL")
43 | cb(Right(42))
44 | }
45 | } // same
46 |
47 | val asyncMeaningOfLifeComplex: IO[Int] = IO.async { (cb: Callback[Int]) =>
48 | IO {
49 | threadPool.execute{ () =>
50 | println(s"[${Thread.currentThread().getName}] Computing an async MOL")
51 | cb(Right(42))
52 | }
53 | }.as(Some(IO("Cancelled!").debug.void)) // <-- finalizer in case the computation gets cancelled
54 | }
55 |
56 | val asyncMeaningOfLifeComplex_v2: IO[Int] = asyncIO.async { (cb: Callback[Int]) =>
57 | IO {
58 | threadPool.execute{ () =>
59 | println(s"[${Thread.currentThread().getName}] Computing an async MOL")
60 | cb(Right(42))
61 | }
62 | }.as(Some(IO("Cancelled!").debug.void)) // <-- finalizer in case the computation gets cancelled
63 | } // same
64 |
65 | val myExecutionContext = ExecutionContext.fromExecutorService(threadPool)
66 | val asyncMeaningOfLife_v3 = asyncIO.evalOn(IO(42).debug, myExecutionContext).guarantee(IO(threadPool.shutdown()))
67 |
68 | // never
69 | val neverIO = asyncIO.never
70 |
71 | /**
72 | * Exercises
73 | * 1 - implement never and async_ in terms of the big async.
74 | * 2 - tuple two effects with different requirements.
75 | */
76 | def firstEffect[F[_]: Concurrent, A](a: A): F[A] = Concurrent[F].pure(a)
77 | def secondEffect[F[_]: Sync, A](a: A): F[A] = Sync[F].pure(a)
78 |
79 | import cats.syntax.flatMap.*
80 | import cats.syntax.functor.* // flatMap extension method
81 |
82 | def tupledEffect[F[_]: Async, A](a: A): F[(A, A)] = for {
83 | first <- firstEffect(a)
84 | second <- secondEffect(a)
85 | } yield (first, second)
86 |
87 |
88 | override def run = ???
89 | }
90 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part5polymorphic/PolymorphicCancellation.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part5polymorphic
2 |
3 | import cats.{Applicative, Monad}
4 | import cats.effect.{IO, IOApp, MonadCancel, Poll}
5 | import cats.effect.kernel.Outcome.{Canceled, Errored, Succeeded}
6 |
7 | import scala.concurrent.duration.FiniteDuration
8 |
9 | object PolymorphicCancellation extends IOApp.Simple {
10 |
11 | trait MyApplicativeError[F[_], E] extends Applicative[F] {
12 | def raiseError[A](error: E): F[A]
13 | def handleErrorWith[A](fa: F[A])(f: E => F[A]): F[A]
14 | }
15 |
16 | trait MyMonadError[F[_], E] extends MyApplicativeError[F, E] with Monad[F]
17 |
18 | // MonadCancel describes the capability to cancel & prevent cancellation
19 |
20 | trait MyPoll[F[_]] {
21 | def apply[A](fa: F[A]): F[A]
22 | }
23 |
24 | trait MyMonadCancel[F[_], E] extends MyMonadError[F, E] {
25 | def canceled: F[Unit]
26 | def uncancelable[A](poll: Poll[F] => F[A]): F[A]
27 | }
28 |
29 | // monadCancel for IO
30 | val monadCancelIO: MonadCancel[IO, Throwable] = MonadCancel[IO]
31 |
32 | // we can create values, because MonadCancel is a Monad
33 | val molIO: IO[Int] = monadCancelIO.pure(42)
34 | val ambitiousMolIO: IO[Int] = monadCancelIO.map(molIO)(_ * 10)
35 |
36 | val mustCompute = monadCancelIO.uncancelable { _ =>
37 | for {
38 | _ <- monadCancelIO.pure("once started, I can't go back...")
39 | res <- monadCancelIO.pure(56)
40 | } yield res
41 | }
42 |
43 | import cats.syntax.flatMap._ // flatMap
44 | import cats.syntax.functor._ // map
45 |
46 | // goal: can generalize code
47 | def mustComputeGeneral[F[_], E](using mc: MonadCancel[F, E]): F[Int] = mc.uncancelable { _ =>
48 | for {
49 | _ <- mc.pure("once started, I can't go back...")
50 | res <- mc.pure(56)
51 | } yield res
52 | }
53 |
54 | val mustCompute_v2 = mustComputeGeneral[IO, Throwable]
55 |
56 | // allow cancellation listeners
57 | val mustComputeWithListener = mustCompute.onCancel(IO("I'm being cancelled!").void)
58 | val mustComputeWithListener_v2 = monadCancelIO.onCancel(mustCompute, IO("I'm being cancelled!").void) // same
59 | // .onCancel as extension method
60 | import cats.effect.syntax.monadCancel._ // .onCancel
61 |
62 | // allow finalizers: guarantee, guaranteeCase
63 | val aComputationWithFinalizers = monadCancelIO.guaranteeCase(IO(42)) {
64 | case Succeeded(fa) => fa.flatMap(a => IO(s"successful: $a").void)
65 | case Errored(e) => IO(s"failed: $e").void
66 | case Canceled() => IO("canceled").void
67 | }
68 |
69 | // bracket pattern is specific to MonadCancel
70 | val aComputationWithUsage = monadCancelIO.bracket(IO(42)) { value =>
71 | IO(s"Using the meaning of life: $value")
72 | } { value =>
73 | IO("releasing the meaning of life...").void
74 | }
75 | // therefore Resources can only be built in the presence of a MonadCancel instance
76 |
77 | /**
78 | * Exercise - generalize a piece of code (the auth-flow example from the Cancellation lesson)
79 | */
80 | import com.rockthejvm.utils.general._
81 | import scala.concurrent.duration._
82 |
83 | // hint: use this instead of IO.sleep
84 | def unsafeSleep[F[_], E](duration: FiniteDuration)(using mc: MonadCancel[F, E]): F[Unit] =
85 | mc.pure(Thread.sleep(duration.toMillis))
86 |
87 | def inputPassword[F[_], E](using mc: MonadCancel[F,E]): F[String] = for {
88 | _ <- mc.pure("Input password:").debug
89 | _ <- mc.pure("(typing password)").debug
90 | _ <- unsafeSleep[F, E](5.seconds)
91 | pw <- mc.pure("RockTheJVM1!")
92 | } yield pw
93 |
94 | def verifyPassword[F[_], E](pw: String)(using mc: MonadCancel[F, E]): F[Boolean] = for {
95 | _ <- mc.pure("verifying...").debug
96 | _ <- unsafeSleep[F,E](2.seconds)
97 | check <- mc.pure(pw == "RockTheJVM1!")
98 | } yield check
99 |
100 | def authFlow[F[_], E](using mc: MonadCancel[F,E]): F[Unit] = mc.uncancelable { poll =>
101 | for {
102 | pw <- poll(inputPassword).onCancel(mc.pure("Authentication timed out. Try again later.").debug.void) // this is cancelable
103 | verified <- verifyPassword(pw) // this is NOT cancelable
104 | _ <- if (verified) mc.pure("Authentication successful.").debug // this is NOT cancelable
105 | else mc.pure("Authentication failed.").debug
106 | } yield ()
107 | }
108 |
109 | val authProgram: IO[Unit] = for {
110 | authFib <- authFlow[IO, Throwable].start
111 | _ <- IO.sleep(3.seconds) >> IO("Authentication timeout, attempting cancel...").debug >> authFib.cancel
112 | _ <- authFib.join
113 | } yield ()
114 |
115 |
116 | override def run = authProgram
117 | }
118 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part5polymorphic/PolymorphicCoordination.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part5polymorphic
2 |
3 | import cats.effect.{MonadCancel, Concurrent, IO, IOApp, Ref, Outcome, Fiber}
4 | import cats.effect.kernel.{Deferred, Spawn}
5 |
6 | object PolymorphicCoordination extends IOApp.Simple {
7 |
8 | // Concurrent - Ref + Deferred for ANY effect type
9 | trait MyConcurrent[F[_]] extends Spawn[F] {
10 | def ref[A](a: A): F[Ref[F, A]]
11 | def deferred[A]: F[Deferred[F, A]]
12 | }
13 |
14 | val concurrentIO = Concurrent[IO] // given instance of Concurrent[IO]
15 | val aDeferred = Deferred[IO, Int] // given/implicit Concurrent[IO] in scope
16 | val aDeferred_v2 = concurrentIO.deferred[Int]
17 | val aRef = concurrentIO.ref(42)
18 |
19 | // capabilities: pure, map/flatMap, raiseError, uncancelable, start (fibers), + ref/deferred
20 |
21 | import com.rockthejvm.utils.general._
22 | import scala.concurrent.duration._
23 |
24 | def eggBoiler(): IO[Unit] = {
25 | def eggReadyNotification(signal: Deferred[IO, Unit]) = for {
26 | _ <- IO("Egg boiling on some other fiber, waiting...").debug
27 | _ <- signal.get
28 | _ <- IO("EGG READY!").debug
29 | } yield ()
30 |
31 | def tickingClock(counter: Ref[IO, Int], signal: Deferred[IO, Unit]): IO[Unit] = for {
32 | _ <- IO.sleep(1.second)
33 | count <- counter.updateAndGet(_ + 1)
34 | _ <- IO(count).debug
35 | _ <- if (count >= 10) signal.complete(()) else tickingClock(counter, signal)
36 | } yield ()
37 |
38 | for {
39 | counter <- Ref[IO].of(0)
40 | signal <- Deferred[IO, Unit]
41 | notificationFib <- eggReadyNotification(signal).start
42 | clock <- tickingClock(counter, signal).start
43 | _ <- notificationFib.join
44 | _ <- clock.join
45 | } yield ()
46 | }
47 |
48 | import cats.syntax.flatMap._ // flatMap
49 | import cats.syntax.functor._ // map
50 | import cats.effect.syntax.spawn._ // start extension method
51 |
52 | // added here explicitly due to a Scala 3 bug that we discovered during lesson recording
53 | def unsafeSleepDupe[F[_], E](duration: FiniteDuration)(using mc: MonadCancel[F, E]): F[Unit] =
54 | mc.pure(Thread.sleep(duration.toMillis))
55 |
56 | def polymorphicEggBoiler[F[_]](using concurrent: Concurrent[F]): F[Unit] = {
57 | def eggReadyNotification(signal: Deferred[F, Unit]) = for {
58 | _ <- concurrent.pure("Egg boiling on some other fiber, waiting...").debug
59 | _ <- signal.get
60 | _ <- concurrent.pure("EGG READY!").debug
61 | } yield ()
62 |
63 | def tickingClock(counter: Ref[F, Int], signal: Deferred[F, Unit]): F[Unit] = for {
64 | _ <- unsafeSleepDupe[F, Throwable](1.second)
65 | count <- counter.updateAndGet(_ + 1)
66 | _ <- concurrent.pure(count).debug
67 | _ <- if (count >= 10) signal.complete(()).void else tickingClock(counter, signal)
68 | } yield ()
69 |
70 | for {
71 | counter <- concurrent.ref(0)
72 | signal <- concurrent.deferred[Unit]
73 | notificationFib <- eggReadyNotification(signal).start
74 | clock <- tickingClock(counter, signal).start
75 | _ <- notificationFib.join
76 | _ <- clock.join
77 | } yield ()
78 | }
79 |
80 | /**
81 | * Exercises:
82 | * 1. Generalize racePair
83 | * 2. Generalize the Mutex concurrency primitive for any F
84 | */
85 | type RaceResult[F[_], A, B] = Either[
86 | (Outcome[F, Throwable, A], Fiber[F, Throwable, B]), // (winner result, loser fiber)
87 | (Fiber[F, Throwable, A], Outcome[F, Throwable, B]) // (loser fiber, winner result)
88 | ]
89 |
90 | type EitherOutcome[F[_], A, B] = Either[Outcome[F, Throwable, A], Outcome[F, Throwable, B]]
91 |
92 | import cats.effect.syntax.monadCancel.* // guaranteeCase extension method
93 | import cats.effect.syntax.spawn.* // start extension method
94 |
95 | def ourRacePair[F[_], A, B](fa: F[A], fb: F[B])(using concurrent: Concurrent[F]): F[RaceResult[F, A, B]] =
96 | concurrent.uncancelable { poll =>
97 | for {
98 | signal <- concurrent.deferred[EitherOutcome[F, A, B]]
99 | fiba <- fa.guaranteeCase(outcomeA => signal.complete(Left(outcomeA)).void).start
100 | fibb <- fb.guaranteeCase(outcomeB => signal.complete(Right(outcomeB)).void).start
101 | result <- poll(signal.get).onCancel { // blocking call - should be cancelable
102 | for {
103 | cancelFibA <- fiba.cancel.start
104 | cancelFibB <- fibb.cancel.start
105 | _ <- cancelFibA.join
106 | _ <- cancelFibB.join
107 | } yield ()
108 | }
109 | } yield result match {
110 | case Left(outcomeA) => Left((outcomeA, fibb))
111 | case Right(outcomeB) => Right((fiba, outcomeB))
112 | }
113 | }
114 |
115 |
116 |
117 | override def run = polymorphicEggBoiler[IO]
118 | }
119 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part5polymorphic/PolymorphicFibers.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part5polymorphic
2 |
3 | import cats.effect.{Outcome, Fiber, IO, IOApp, MonadCancel, Spawn}
4 | import cats.effect.kernel.Outcome.{Canceled, Errored, Succeeded}
5 |
6 | object PolymorphicFibers extends IOApp.Simple {
7 |
8 | // Spawn = create fibers for any effect
9 | trait MyGenSpawn[F[_], E] extends MonadCancel[F, E] {
10 | def start[A](fa: F[A]): F[Fiber[F, Throwable, A]] // creates a fiber
11 | def never[A]: F[A] // a forever-suspending effect
12 | def cede: F[Unit] // a "yield" effect
13 |
14 | def racePair[A, B](fa: F[A], fb: F[B]): F[Either[ // fundamental racing
15 | (Outcome[F, E, A], Fiber[F, E, B]),
16 | (Fiber[F, E, A], Outcome[F, E, B])
17 | ]]
18 | }
19 |
20 | trait MySpawn[F[_]] extends MyGenSpawn[F, Throwable]
21 |
22 | val mol = IO(42)
23 | val fiber: IO[Fiber[IO, Throwable, Int]] = mol.start
24 |
25 | // capabilities: pure, map/flatMap, raiseError, uncancelable, start
26 |
27 | val spawnIO = Spawn[IO] // fetch the given/implicit Spawn[IO]
28 |
29 | def ioOnSomeThread[A](io: IO[A]): IO[Outcome[IO, Throwable, A]] = for {
30 | fib <- spawnIO.start(io) // io.start assumes the presence of a Spawn[IO]
31 | result <- fib.join
32 | } yield result
33 |
34 | import cats.syntax.functor.* // map
35 | import cats.syntax.flatMap.* // flatMap
36 |
37 | // generalize
38 | import cats.effect.syntax.spawn.* // start extension method
39 | def effectOnSomeThread[F[_], A](fa: F[A])(using spawn: Spawn[F]): F[Outcome[F, Throwable, A]] = for {
40 | fib <- fa.start
41 | result <- fib.join
42 | } yield result
43 |
44 | val molOnFiber = ioOnSomeThread(mol)
45 | val molOnFiber_v2 = effectOnSomeThread(mol)
46 |
47 | /**
48 | * Exercise - generalize the following code (race implementation from the Racing lesson)
49 | */
50 |
51 | def ioRace[A, B](ioa: IO[A], iob: IO[B]): IO[Either[A, B]] =
52 | IO.racePair(ioa, iob).flatMap {
53 | case Left((outA, fibB)) => outA match {
54 | case Succeeded(effectA) => fibB.cancel >> effectA.map(a => Left(a))
55 | case Errored(e) => fibB.cancel >> IO.raiseError(e)
56 | case Canceled() => fibB.join.flatMap {
57 | case Succeeded(effectB) => effectB.map(b => Right(b))
58 | case Errored(e) => IO.raiseError(e)
59 | case Canceled() => IO.raiseError(new RuntimeException("Both computations canceled."))
60 | }
61 | }
62 | case Right((fibA, outB)) => outB match {
63 | case Succeeded(effectB) => fibA.cancel >> effectB.map(b => Right(b))
64 | case Errored(e) => fibA.cancel >> IO.raiseError(e)
65 | case Canceled() => fibA.join.flatMap {
66 | case Succeeded(effectA) => effectA.map(a => Left(a))
67 | case Errored(e) => IO.raiseError(e)
68 | case Canceled() => IO.raiseError(new RuntimeException("Both computations canceled."))
69 | }
70 | }
71 | }
72 |
73 | def generalRace[F[_], A, B](fa: F[A], fb: F[B])(using spawn: Spawn[F]): F[Either[A, B]] =
74 | spawn.racePair(fa, fb).flatMap {
75 | case Left((outA, fibB)) => outA match {
76 | case Succeeded(effectA) => fibB.cancel.flatMap(_ => effectA.map(a => Left(a)))
77 | case Errored(e) => fibB.cancel.flatMap(_ => spawn.raiseError(e))
78 | case Canceled() => fibB.join.flatMap {
79 | case Succeeded(effectB) => effectB.map(b => Right(b))
80 | case Errored(e) => spawn.raiseError(e)
81 | case Canceled() => spawn.raiseError(new RuntimeException("Both computations canceled."))
82 | }
83 | }
84 | case Right((fibA, outB)) => outB match {
85 | case Succeeded(effectB) => fibA.cancel.flatMap(_ => effectB.map(b => Right(b)))
86 | case Errored(e) => fibA.cancel.flatMap(_ => spawn.raiseError(e))
87 | case Canceled() => fibA.join.flatMap {
88 | case Succeeded(effectA) => effectA.map(a => Left(a))
89 | case Errored(e) => spawn.raiseError(e)
90 | case Canceled() => spawn.raiseError(new RuntimeException("Both computations canceled."))
91 | }
92 | }
93 | }
94 | // beware this is a simple implementation - certain cases are not taken into account
95 | // (which would make the code more complicated)
96 |
97 | import scala.concurrent.duration._
98 | import com.rockthejvm.utils.general._
99 | val fast = IO.sleep(1.second) >> IO(42).debug
100 | val slow = IO.sleep(2.seconds) >> IO("Scala").debug
101 | val race = ioRace(fast, slow)
102 | val race_v2 = generalRace(fast, slow)
103 |
104 | override def run = race.void
105 | }
106 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part5polymorphic/PolymorphicSync.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part5polymorphic
2 |
3 | import cats.Defer
4 | import cats.effect.{IO, IOApp, MonadCancel, Sync}
5 |
6 | import java.io.{BufferedReader, InputStreamReader}
7 |
8 | object PolymorphicSync extends IOApp.Simple {
9 |
10 | val aDelayedIO = IO.delay { // "suspend" computations in IO
11 | println("I'm an effect!")
12 | 42
13 | }
14 |
15 | val aBlockingIO = IO.blocking { // on some specific thread pool for blocking computations
16 | println("loading...")
17 | Thread.sleep(1000)
18 | 42
19 | }
20 |
21 | // synchronous computation
22 |
23 | trait MySync[F[_]] extends MonadCancel[F, Throwable] with Defer[F] {
24 | def delay[A](thunk: => A): F[A] // "suspension" of a computation - will run on the CE thread pool
25 | def blocking[A](thunk: => A): F[A] // runs on the blocking thread pool
26 |
27 | // defer comes for free
28 | def defer[A](thunk: => F[A]): F[A] =
29 | flatMap(delay(thunk))(identity)
30 | }
31 |
32 | val syncIO = Sync[IO] // given Sync[IO] in scope
33 |
34 | // abilities: pure, map/flatMap, raiseError, uncancelable, + delay/blocking
35 | val aDelayedIO_v2 = syncIO.delay {
36 | println("I'm an effect!")
37 | 42
38 | } // same as IO.delay
39 |
40 | val aBlockingIO_v2 = syncIO.blocking {
41 | println("loading...")
42 | Thread.sleep(1000)
43 | 42
44 | } // same as IO.blocking
45 |
46 | val aDeferredIO = IO.defer(aDelayedIO)
47 |
48 | /**
49 | * Exercise - write a polymorphic console
50 | */
51 | trait Console[F[_]] {
52 | def println[A](a: A): F[Unit]
53 | def readLine(): F[String]
54 | }
55 |
56 | import cats.syntax.functor._ // map extension method
57 | object Console {
58 | def make[F[_]](using sync: Sync[F]): F[Console[F]] = sync.pure((System.in, System.out)).map {
59 | case (in, out) => new Console[F] {
60 | def println[A](a: A): F[Unit] =
61 | sync.blocking(out.println(a))
62 |
63 | def readLine(): F[String] = {
64 | val bufferedReader = new BufferedReader(new InputStreamReader(in))
65 | sync.blocking(bufferedReader.readLine())
66 |
67 | /*
68 | There's a potential problem hanging one of the threads from the blocking thread pool
69 | (or - oh my! - one of the CE threads).
70 |
71 | There's also sync.interruptible(true/false) which attempts to block the thread via thread interrupts in case of cancellation.
72 | The flag tells whether you want the thread interrupt signals to be sent repeatedly (true) or not (false).
73 | */
74 | }
75 | }
76 | }
77 | }
78 |
79 | def consoleReader(): IO[Unit] = for {
80 | console <- Console.make[IO]
81 | _ <- console.println("Hi, what's your name?")
82 | name <- console.readLine()
83 | _ <- console.println(s"Hi $name, nice to meet you!")
84 | } yield ()
85 |
86 | override def run = consoleReader()
87 | }
88 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/part5polymorphic/PolymorphicTemporalSuspension.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.part5polymorphic
2 |
3 | import cats.effect.{IO, IOApp, Temporal}
4 | import cats.effect.kernel.Concurrent
5 |
6 | import scala.concurrent.duration.FiniteDuration
7 | import com.rockthejvm.utils.general._
8 | import scala.concurrent.duration._
9 |
10 | object PolymorphicTemporalSuspension extends IOApp.Simple {
11 |
12 | // Temporal - time-blocking effects
13 | trait MyTemporal[F[_]] extends Concurrent[F] {
14 | def sleep(time: FiniteDuration): F[Unit] // semantically blocks this fiber for a specified time
15 | }
16 |
17 | // abilites: pure, map/flatMap, raiseError, uncancelable, start, ref/deferred, +sleep
18 | val temporalIO = Temporal[IO] // given Temporal[IO] in scope
19 | val chainOfEffects = IO("Loading...").debug *> IO.sleep(1.second) *> IO("Game ready!").debug
20 | val chainOfEffects_v2 = temporalIO.pure("Loading...").debug *> temporalIO.sleep(1.second) *> temporalIO.pure("Game ready!").debug // same
21 |
22 | /**
23 | * Exercise: generalize the following piece
24 | */
25 | import cats.syntax.flatMap._
26 | def timeout[F[_], A](fa: F[A], duration: FiniteDuration)(using temporal: Temporal[F]): F[A] = {
27 | val timeoutEffect = temporal.sleep(duration)
28 | val result = temporal.race(fa, timeoutEffect)
29 |
30 | result.flatMap {
31 | case Left(v) => temporal.pure(v)
32 | case Right(_) => temporal.raiseError(new RuntimeException("Computation timed out."))
33 | }
34 | }
35 |
36 | override def run = ???
37 | }
38 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/playground/Playground.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.playground
2 |
3 | import cats.effect.{IO, IOApp}
4 |
5 | object Playground extends IOApp.Simple {
6 |
7 | override def run: IO[Unit] =
8 | IO.println("Learning Cats Effect 3! Looking forward to it...")
9 | }
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/utils/Utils.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.utils
2 |
3 | import cats.effect.IO
4 |
5 | extension [A](io: IO[A])
6 | def debug: IO[A] = for {
7 | a <- io
8 | t = Thread.currentThread().getName
9 | _ = println(s"[$t] $a")
10 | } yield a
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/utils/general/Utils.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.utils.general
2 |
3 | import cats.Functor
4 | import cats.syntax.functor.*
5 |
6 | import cats.effect.MonadCancel
7 | import scala.concurrent.duration.FiniteDuration
8 |
9 | extension [F[_], A](fa: F[A]) {
10 | def debug(using functor: Functor[F]): F[A] = fa.map { a =>
11 | val t = Thread.currentThread().getName
12 | println(s"[$t] $a")
13 | a
14 | }
15 | }
16 |
17 | def unsafeSleep[F[_], E](duration: FiniteDuration)(using mc: MonadCancel[F, E]): F[Unit] =
18 | mc.pure(Thread.sleep(duration.toMillis))
19 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/utilsScala2/general/package.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm.utilsScala2
2 |
3 | import cats.Functor
4 | import cats.syntax.functor.*
5 |
6 | import cats.effect.MonadCancel
7 | import scala.concurrent.duration.FiniteDuration
8 |
9 | package object general {
10 |
11 | implicit class DebugWrapper[F[_], A](fa: F[A]) {
12 | def debug(implicit functor: Functor[F]): F[A] = fa.map { a =>
13 | val t = Thread.currentThread().getName
14 | println(s"[$t] $a")
15 | a
16 | }
17 | }
18 |
19 | def unsafeSleep[F[_], E](duration: FiniteDuration)(implicit mc: MonadCancel[F, E]): F[Unit] =
20 | mc.pure(Thread.sleep(duration.toMillis))
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/scala/com/rockthejvm/utilsScala2/package.scala:
--------------------------------------------------------------------------------
1 | package com.rockthejvm
2 |
3 | import cats.effect.IO
4 |
5 | package object utilsScala2 {
6 | implicit class DebugWrapper[A](io: IO[A]) {
7 | def debug: IO[A] = for {
8 | a <- io
9 | t = Thread.currentThread().getName
10 | _ = println(s"[$t] $a")
11 | } yield a
12 | }
13 | }
14 |
--------------------------------------------------------------------------------