├── .gitignore ├── README.md └── notes ├── progfun └── src │ ├── week1 │ ├── factorial.sc │ └── worksheet.sc │ ├── week2 │ ├── excersize.sc │ ├── product.sc │ └── rationals.sc │ ├── week3 │ ├── List.scala │ ├── excersize.sc │ └── nth.sc │ ├── week4 │ ├── Expr.scala │ ├── Nat.scala │ └── show.sc │ ├── week5 │ └── listfun.sc │ └── week6 │ ├── mnemonics.sc │ ├── nqueens.sc │ ├── pairs.sc │ └── polynomials.sc ├── week 1 ├── 001-getting-started.md ├── 002-elements-of-programming.md ├── 003-conditionals-and-value-definitions.md ├── 004-example-newtons-square-roots.md ├── 005-blocks-and-lexical-scoping.md └── 006-tail-recursion.md ├── week 2 ├── 001-higher-order-functions.md ├── 002-currying.md ├── 003-example-finding-fixed-points.md ├── 004-scala-syntax-review.md ├── 005-functions-and-data.md ├── 006-more-fun-with-rationals.md └── 007-evaluation-and-operators.md ├── week 3 ├── 001-class-hierarchies.md ├── 002-how-classes-are-organized.md └── 003-polymorphism.md ├── week 4 ├── 001-functions-as-objects.md ├── 002-objects-everywhere.md ├── 003-subtyping-and-generics.md ├── 004-variance.md ├── 005-decomposition.md ├── 006-pattern-matching.md └── 007-lists.md ├── week 5 ├── 001-more-functions-on-lists.md ├── 002-pairs-and-tuples.md ├── 003-implicit-parameters.md ├── 004-higher-order-list-functions.md ├── 005-reduction-of-lists.md ├── 006-reasoning-about-concat.md └── 007-a-larger-equational-proof-on-lists.md ├── week 6 ├── 001-other-collections.md ├── 002-combinatorial-search-and-for-expressions.md ├── 003-combinatorial-search-example.md ├── 004-queries-with-for.md ├── 005-translation-of-for.md ├── 006-maps.md └── 007-putting-the-pieces-together.md └── week 7 ├── 001-structural-induction-on-trees.md └── 002-streams.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | 4 | # sbt specific 5 | dist/* 6 | target/ 7 | lib_managed/ 8 | src_managed/ 9 | project/boot/ 10 | project/plugins/project/ 11 | 12 | # Scala-IDE specific 13 | .scala_dependencies 14 | .cache 15 | 16 | # Eclipse 17 | .metadata 18 | .classpath 19 | .project 20 | .worksheet 21 | bin/ 22 | 23 | # IntelliJ 24 | *.iml 25 | .idea/* 26 | 27 | hw/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | functional-programming-in-scala 2 | =============================== 3 | 4 | https://class.coursera.org/progfun-003/class/index 5 | -------------------------------------------------------------------------------- /notes/progfun/src/week1/factorial.sc: -------------------------------------------------------------------------------- 1 | object excersie { 2 | def factorial(n: Int): Int = { 3 | def loop(acc: Int, n: Int): Int = 4 | if (n == 0) acc 5 | else loop(acc * n, n-1) 6 | loop(1, n) 7 | } //> factorial: (n: Int)Int 8 | factorial(3) //> res0: Int = 6 9 | 10 | 11 | class IntSet 12 | class NonEmpty extends IntSet 13 | } -------------------------------------------------------------------------------- /notes/progfun/src/week1/worksheet.sc: -------------------------------------------------------------------------------- 1 | object worksheet { 2 | def abs(x: Double) = if (x < 0) -x else x //> abs: (x: Double)Double 3 | 4 | def sqrt(x: Double) = { 5 | def sqrtIter(guess: Double): Double = 6 | if (isGoodEnough(guess)) guess 7 | else sqrtIter(improve(guess)) 8 | 9 | def isGoodEnough(guess: Double) = 10 | abs(guess * guess - x) / x < .001 11 | 12 | def improve(guess: Double) = 13 | (guess + x / guess) / 2 14 | 15 | sqrtIter(1.0) 16 | } //> sqrt: (x: Double)Double 17 | 18 | } -------------------------------------------------------------------------------- /notes/progfun/src/week2/excersize.sc: -------------------------------------------------------------------------------- 1 | object excersize { 2 | def sum(f: Int => Int, a: Int, b: Int): Int = { 3 | def loop(a: Int, acc: Int): Int = { 4 | if (a > b) acc 5 | else loop(a + 1, f(a) + acc) 6 | } 7 | loop(a, 0) 8 | } //> sum: (f: Int => Int, a: Int, b: Int)Int 9 | 10 | sum(x => x * x, 3, 5) //> res0: Int = 50 11 | } -------------------------------------------------------------------------------- /notes/progfun/src/week2/product.sc: -------------------------------------------------------------------------------- 1 | object product { 2 | def mapReduce(f: Int => Int, combine: (Int, Int) => Int, zero: Int)(a: Int, b: Int): Int = 3 | if (a > b) zero 4 | else combine(f(a), mapReduce(f, combine, zero)(a + 1, b)) 5 | //> mapReduce: (f: Int => Int, combine: (Int, Int) => Int, zero: Int)(a: Int, b: 6 | //| Int)Int 7 | 8 | def product(f: Int => Int)(a: Int, b : Int): Int = 9 | mapReduce(f, (x, y) => x * y, 1)(a,b) //> product: (f: Int => Int)(a: Int, b: Int)Int 10 | 11 | product(x => x * x)(3,4) //> res0: Int = 144 12 | 13 | def factorial(n: Int) = product(x => x)(1, n) //> factorial: (n: Int)Int 14 | 15 | factorial(5) //> res1: Int = 120 16 | 17 | 18 | 19 | } -------------------------------------------------------------------------------- /notes/progfun/src/week2/rationals.sc: -------------------------------------------------------------------------------- 1 | object rationals { 2 | val x = new Rational(3, 6) //> x : Rational = 1/2 3 | x.numer //> res0: Int = 3 4 | x.denom //> res1: Int = 6 5 | 6 | val y = new Rational(5, 7) //> y : Rational = 5/7 7 | x + y //> res2: Rational = 17/14 8 | 9 | val z = new Rational(3, 2) //> z : Rational = 3/2 10 | 11 | x - y - z //> res3: Rational = 12/-7 12 | 13 | x < z //> res4: Boolean = true 14 | x.max(z) //> res5: Rational = 3/2 15 | } 16 | 17 | class Rational(x: Int, y: Int) { 18 | require(y != 0, "denominator must be nonzero") 19 | private def gcd(a: Int, b: Int): Int = if (b == 0) a else gcd(b, a % b) 20 | private val g = gcd(x, y) 21 | def numer = x 22 | def denom = y 23 | 24 | def + (that: Rational) = 25 | new Rational( 26 | numer * that.denom + that.numer * denom, 27 | denom * that.denom) 28 | 29 | def < (that: Rational) = numer * that.denom < that.numer * denom 30 | 31 | def max(that: Rational) = if (this < that) that else this 32 | 33 | def - (that: Rational) = this + -that 34 | 35 | def unary_- : Rational = new Rational(-numer, denom) 36 | 37 | override def toString = numer / g + "/" + denom / g 38 | } -------------------------------------------------------------------------------- /notes/progfun/src/week3/List.scala: -------------------------------------------------------------------------------- 1 | package week3 2 | 3 | trait List[T] { 4 | def isEmpty: Boolean 5 | def head: T 6 | def tail: List[T] 7 | } 8 | 9 | class Cons[T](val head: T, val tail: List[T]) extends List[T] { 10 | def isEmpty = false 11 | } 12 | 13 | class Nil[T] extends List[T] { 14 | def isEmpty = true 15 | def head = throw new NoSuchElementException("Nil.head") 16 | def tail = throw new NoSuchElementException("Nil.tail") 17 | } 18 | 19 | object List { 20 | def apply[T](x1: T, x2: T): List[T] = new Cons(x1, new Cons(x2, new Nil)) 21 | def apply[T]() = new Nil 22 | } -------------------------------------------------------------------------------- /notes/progfun/src/week3/excersize.sc: -------------------------------------------------------------------------------- 1 | object excersize { 2 | val t1 = new NonEmpty(3, new Empty, new Empty) 3 | //> t1 : NonEmpty = {.3.} 4 | val t2 = t1 incl 4 //> t2 : IntSet = {.3{.4.}} 5 | t2 contains 5 //> res0: Boolean = false 6 | } 7 | 8 | abstract class IntSet { 9 | def incl(x: Int): IntSet 10 | def contains(x: Int): Boolean 11 | def union(other: IntSet): IntSet 12 | } 13 | 14 | class Empty extends IntSet { 15 | def contains(x: Int): Boolean = false 16 | def incl(x:Int): IntSet = new NonEmpty(x, new Empty, new Empty) 17 | def union(other: IntSet): IntSet = other 18 | override def toString = "." 19 | } 20 | 21 | class NonEmpty(elem: Int, left: IntSet, right: IntSet) extends IntSet { 22 | def contains(x: Int): Boolean = 23 | if (x < elem) left contains x 24 | else if (x > elem) right contains x 25 | else true 26 | 27 | def incl(x: Int): IntSet = 28 | if (x < elem) new NonEmpty(elem, left incl x, right) 29 | else if (x > elem) new NonEmpty(elem, left, right incl x) 30 | else this 31 | 32 | def union(other: IntSet): IntSet = 33 | ((left union right) union other) incl elem 34 | 35 | override def toString = "{" + left + elem + right + "}" 36 | } -------------------------------------------------------------------------------- /notes/progfun/src/week3/nth.sc: -------------------------------------------------------------------------------- 1 | package Cons 2 | 3 | object nth { 4 | def nth[T](n: Int, xs: List[T]): T = 5 | if (xs.isEmpty) throw new IndexOutOfBoundsException 6 | else if (n == 0) xs.head 7 | else nth(n - 1, xs.tail) //> nth: [T](n: Int, xs: Cons.List[T])T 8 | 9 | val list = new Cons(1, new Cons(2, new Cons(3, new Nil))) 10 | //> list : Cons.Cons[Int] = Cons.Cons@418c56d 11 | 12 | nth(2, list) //> res0: Int = 3 13 | nth(-1, list) //> java.lang.IndexOutOfBoundsException 14 | //| at Cons.nth$$anonfun$main$1.nth$1(Cons.nth.scala:5) 15 | //| at Cons.nth$$anonfun$main$1.apply$mcV$sp(Cons.nth.scala:12) 16 | //| at org.scalaide.worksheet.runtime.library.WorksheetSupport$$anonfun$$exe 17 | //| cute$1.apply$mcV$sp(WorksheetSupport.scala:76) 18 | //| at org.scalaide.worksheet.runtime.library.WorksheetSupport$.redirected(W 19 | //| orksheetSupport.scala:65) 20 | //| at org.scalaide.worksheet.runtime.library.WorksheetSupport$.$execute(Wor 21 | //| ksheetSupport.scala:75) 22 | //| at Cons.nth$.main(Cons.nth.scala:3) 23 | //| at Cons.nth.main(Cons.nth.scala) 24 | } -------------------------------------------------------------------------------- /notes/progfun/src/week4/Expr.scala: -------------------------------------------------------------------------------- 1 | package week4 2 | 3 | trait Expr 4 | case class Number(n: Int) extends Expr 5 | case class Sum(e1: Expr, e2: Expr) extends Expr -------------------------------------------------------------------------------- /notes/progfun/src/week4/Nat.scala: -------------------------------------------------------------------------------- 1 | package week4 2 | 3 | abstract class Nat { 4 | def isZero: Boolean 5 | def predecessor: Nat 6 | def successor = new Succ(this) 7 | def +(that: Nat): Nat 8 | def -(that: Nat): Nat 9 | } 10 | 11 | object Zero extends Nat { 12 | def isZero = true 13 | def predecessor = throw new Error("0.predecessor") 14 | def +(that: Nat) = that 15 | def -(that: Nat) = if (that.isZero) this else throw new Error("negative number") 16 | } 17 | 18 | class Succ(n: Nat) extends Nat { 19 | def isZero = false 20 | def predecessor = n 21 | def +(that: Nat) = new Succ(n + that) 22 | def -(that: Nat) = n - that.predecessor 23 | } -------------------------------------------------------------------------------- /notes/progfun/src/week4/show.sc: -------------------------------------------------------------------------------- 1 | package week4 2 | 3 | object show { 4 | def show(e: Expr): String = e match { 5 | case Number(x) => x.toString 6 | case Sum(l, r) => show(l) + " + " + show(r) 7 | } //> show: (e: week4.Expr)String 8 | 9 | show(Sum(Number(1), Number(44))) //> res0: String = 1 + 44 10 | } -------------------------------------------------------------------------------- /notes/progfun/src/week5/listfun.sc: -------------------------------------------------------------------------------- 1 | package week5 2 | 3 | object listfun { 4 | println("Welcome to the Scala worksheet") //> Welcome to the Scala worksheet 5 | 6 | val data = List("a", "a", "a", "b", "c", "c", "a") 7 | //> data : List[String] = List(a, a, a, b, c, c, a) 8 | 9 | def pack[T](xs: List[T]): List[List[T]] = xs match { 10 | case Nil => Nil 11 | case x :: xs1 => 12 | val (first, rest) = xs span (y => y == x) 13 | first :: pack(rest) 14 | } //> pack: [T](xs: List[T])List[List[T]] 15 | 16 | val what = pack(data) //> what : List[List[String]] = List(List(a, a, a), List(b), List(c, c), List(a 17 | //| )) 18 | 19 | def encode[T](xs: List[T]): List[(T, Int)] = 20 | pack(xs) map (ys => (ys.head, ys.length)) //> encode: [T](xs: List[T])List[(T, Int)] 21 | 22 | encode(data) //> res0: List[(String, Int)] = List((a,3), (b,1), (c,2), (a,1)) 23 | } -------------------------------------------------------------------------------- /notes/progfun/src/week6/mnemonics.sc: -------------------------------------------------------------------------------- 1 | package week6 2 | 3 | import scala.io.Source 4 | 5 | object mnemonics { 6 | val in = Source.fromURL("https://raw.github.com/jpalmour/progfun/master/forcomp/src/main/resources/forcomp/linuxwords.txt") 7 | //> in : scala.io.BufferedSource = non-empty iterator 8 | 9 | val words = in.getLines.toList filter (word => word forall (chr => chr.isLetter)) 10 | //> words : List[String] = List(Aarhus, Aaron, Ababa, aback, abaft, abandon, ab 11 | //| andoned, abandoning, abandonment, abandons, abase, abased, abasement, abasem 12 | //| ents, abases, abash, abashed, abashes, abashing, abasing, abate, abated, aba 13 | //| tement, abatements, abater, abates, abating, Abba, abbe, abbey, abbeys, abbo 14 | //| t, abbots, Abbott, abbreviate, abbreviated, abbreviates, abbreviating, abbre 15 | //| viation, abbreviations, Abby, abdomen, abdomens, abdominal, abduct, abducted 16 | //| , abduction, abductions, abductor, abductors, abducts, Abe, abed, Abel, Abel 17 | //| ian, Abelson, Aberdeen, Abernathy, aberrant, aberration, aberrations, abet, 18 | //| abets, abetted, abetter, abetting, abeyance, abhor, abhorred, abhorrent, abh 19 | //| orrer, abhorring, abhors, abide, abided, abides, abiding, Abidjan, Abigail, 20 | //| Abilene, abilities, ability, abject, abjection, abjections, abjectly, abject 21 | //| ness, abjure, abjured, abjures, abjuring, ablate, ablated, ablates, ablating 22 | //| , ablation, ablative, ab 23 | //| Output exceeds cutoff limit. 24 | 25 | val mnem = Map( 26 | '2' -> "ABC", '3' -> "DEF", '4' -> "GHI", '5' -> "JKL", 27 | '6' -> "MNO", '7' -> "PQRS", '8' -> "TUV", '9' -> "WXYZ") 28 | //> mnem : scala.collection.immutable.Map[Char,String] = Map(8 -> TUV, 4 -> GHI 29 | //| , 9 -> WXYZ, 5 -> JKL, 6 -> MNO, 2 -> ABC, 7 -> PQRS, 3 -> DEF) 30 | 31 | val charCode: Map[Char, Char] = 32 | for ((digit, str) <- mnem; ltr <- str) yield ltr -> digit 33 | //> charCode : Map[Char,Char] = Map(E -> 3, X -> 9, N -> 6, T -> 8, Y -> 9, J - 34 | //| > 5, U -> 8, F -> 3, A -> 2, M -> 6, I -> 4, G -> 4, V -> 8, Q -> 7, L -> 5, 35 | //| B -> 2, P -> 7, C -> 2, H -> 4, W -> 9, K -> 5, R -> 7, O -> 6, D -> 3, Z - 36 | //| > 9, S -> 7) 37 | 38 | def wordCode(word: String): String = 39 | word.toUpperCase map charCode //> wordCode: (word: String)String 40 | 41 | val wordsForNum: Map[String, Seq[String]] = 42 | words groupBy wordCode withDefaultValue Seq() //> wordsForNum : Map[String,Seq[String]] = Map(63972278 -> List(newscast), 292 43 | //| 37638427 -> List(cybernetics), 782754448 -> List(starlight), 2559464 -> List 44 | //| (allying), 862532733 -> List(uncleared), 365692259 -> List(enjoyably), 86843 45 | //| 7 -> List(unties), 33767833 -> List(deportee), 742533 -> List(picked), 33646 46 | //| 46489 -> List(femininity), 3987267346279 -> List(extraordinary), 7855397 -> 47 | //| List(pulleys), 67846493 -> List(optimize), 4723837 -> List(grafter), 386583 48 | //| -> List(evolve), 78475464 -> List(Stirling), 746459 -> List(singly), 847827 49 | //| -> List(vistas), 546637737 -> List(lionesses), 28754283 -> List(curlicue), 8 50 | //| 4863372658 -> List(thunderbolt), 46767833 -> List(imported), 26437464 -> Lis 51 | //| t(angering, cohering), 8872267 -> List(turbans), 77665377 -> List(spoolers), 52 | //| 46636233 -> List(homemade), 7446768759 -> List(rigorously), 74644647 -> Lis 53 | //| t(ringings), 633738 -> List(offset), 847825 -> List(visual), 772832 -> List( 54 | //| Pravda), 4729378 -> List 55 | //| Output exceeds cutoff limit. 56 | 57 | def encode(number: String): Set[List[String]] = 58 | if (number.isEmpty) Set(List()) 59 | else { 60 | for { 61 | split <- 1 to number.length 62 | word <- wordsForNum(number take split) 63 | rest <- encode(number drop split) 64 | } yield word :: rest 65 | }.toSet //> encode: (number: String)Set[List[String]] 66 | 67 | encode("847825") //> res0: Set[List[String]] = Set(List(visual)) 68 | } -------------------------------------------------------------------------------- /notes/progfun/src/week6/nqueens.sc: -------------------------------------------------------------------------------- 1 | package week6 2 | 3 | object nqueens { 4 | def queens(n: Int): Set[List[Int]] = { 5 | def placeQueens(k: Int): Set[List[Int]] = 6 | if (k == 0) Set(List()) 7 | else 8 | for { 9 | queens <- placeQueens(k - 1) 10 | col <- 0 until n 11 | if isSafe(col, queens) 12 | } yield col :: queens 13 | placeQueens(n) 14 | } //> queens: (n: Int)Set[List[Int]] 15 | 16 | def isSafe(col: Int, queens: List[Int]): Boolean = { 17 | val row = queens.length 18 | val queensWithRow = (row - 1 to 0 by -1) zip queens 19 | queensWithRow forall { 20 | case (r, c) => col != c && math.abs(col - c) != row - r 21 | } 22 | } //> isSafe: (col: Int, queens: List[Int])Boolean 23 | 24 | def show(queens: List[Int]) = { 25 | val lines = 26 | for (col <- queens.reverse) 27 | yield Vector.fill(queens.length)("* ").updated(col, "X ").mkString 28 | "\n" + (lines mkString "\n") 29 | } //> show: (queens: List[Int])String 30 | 31 | (queens(4) map show) mkString "\n" //> res0: String = " 32 | //| * * X * 33 | //| X * * * 34 | //| * * * X 35 | //| * X * * 36 | //| 37 | //| * X * * 38 | //| * * * X 39 | //| X * * * 40 | //| * * X * " 41 | (queens(8) take 3 map show) mkString "\n" //> res1: String = " 42 | //| * * * * * X * * 43 | //| * * * X * * * * 44 | //| * X * * * * * * 45 | //| * * * * * * * X 46 | //| * * * * X * * * 47 | //| * * * * * * X * 48 | //| X * * * * * * * 49 | //| * * X * * * * * 50 | //| 51 | //| * * * * X * * * 52 | //| * * * * * * X * 53 | //| * X * * * * * * 54 | //| * * * X * * * * 55 | //| * * * * * * * X 56 | //| X * * * * * * * 57 | //| * * X * * * * * 58 | //| * * * * * X * * 59 | //| 60 | //| * * * * * X * * 61 | //| * * X * * * * * 62 | //| * * * * * * X * 63 | //| * * * X * * * * 64 | //| X * * * * * * * 65 | //| * * * * * * * X 66 | //| * X * * * * * * 67 | //| * * * * X * * * " 68 | } -------------------------------------------------------------------------------- /notes/progfun/src/week6/pairs.sc: -------------------------------------------------------------------------------- 1 | package week6 2 | 3 | object pairs { 4 | def isPrime(n: Int) = (2 until n) forall (n % _ != 0) 5 | //> isPrime: (n: Int)Boolean 6 | 7 | val n = 7 //> n : Int = 7 8 | (1 until n) flatMap (i => 9 | (1 until i) map (j => (i, j))) filter (pair => 10 | isPrime(pair._1 + pair._2)) 11 | //> res0: scala.collection.immutable.IndexedSeq[(Int, Int)] = Vector((2,1), (3,2 12 | //| ), (4,1), (4,3), (5,2), (6,1), (6,5)) 13 | } -------------------------------------------------------------------------------- /notes/progfun/src/week6/polynomials.sc: -------------------------------------------------------------------------------- 1 | package week6 2 | 3 | object polynomials { 4 | class Poly(terms0: Map[Int, Double]) { 5 | def this(bindings: (Int, Double)*) = this(bindings.toMap) //star here means it's a repeat parameter, we can pass an arbitrary number of concrete arguments 6 | val terms = terms0 withDefaultValue 0.0 7 | 8 | //how would we add two polynomials together? we'd need to merge coefficients that have the same exponent. 9 | //here's one way: 10 | //def + (other: Poly) = new Poly(terms ++ (other.terms map adjust)) 11 | 12 | //def adjust(term: (Int, Double)): (Int, Double) = { 13 | // val (exp, coeff) = term 14 | // exp -> (coeff + terms(exp)) 15 | //} 16 | 17 | //here's a more efficient way with fold left: 18 | def + (other: Poly) = new Poly((other.terms foldLeft terms)(addTerm)) 19 | 20 | def addTerm(terms: Map[Int, Double], term: (Int, Double)): Map[Int, Double] = { 21 | val (exp, coeff) = term 22 | terms + (exp -> (coeff + terms(exp))) 23 | } 24 | 25 | override def toString = 26 | (for ((exp, coeff) <- terms.toList.sorted.reverse) yield coeff + "x^"+exp) mkString " + " 27 | } 28 | 29 | val p1 = new Poly(1-> 2.0, 3 -> 4.0, 5 -> 6.2) //> p1 : week6.polynomials.Poly = 6.2x^5 + 4.0x^3 + 2.0x^1 30 | val p2 = new Poly(0 -> 3.0, 3 -> 7.0) //> p2 : week6.polynomials.Poly = 7.0x^3 + 3.0x^0 31 | p1 + p2 //> res0: week6.polynomials.Poly = 6.2x^5 + 11.0x^3 + 2.0x^1 + 3.0x^0 32 | 33 | 34 | } -------------------------------------------------------------------------------- /notes/week 1/001-getting-started.md: -------------------------------------------------------------------------------- 1 | # Week One: Getting Started 2 | 3 | ## Programming Paradigms 4 | Three main programming paradigms: 5 | 6 | * imperative programming 7 | * functional programming 8 | * a lesser known one called logic programming 9 | 10 | Let's review what imperative programming is as a paradigm: 11 | 12 | * modifying mutable variables 13 | * using assignments 14 | * and control structures such as if-then-else, loops, break, continue, and return 15 | 16 | The most common informal way to understand imperative programs is as instruction sequences for a [Von Neumann computer](http://en.wikipedia.org/wiki/Von_Neumann_computer) 17 | 18 | ![http://upload.wikimedia.org/wikipedia/commons/e/e5/Von_Neumann_Architecture.svg](http://upload.wikimedia.org/wikipedia/commons/e/e5/Von_Neumann_Architecture.svg) 19 | 20 | Consists of essentially a processor and memory, and a bus that reads both instructions and data from the memory into the processor. 21 | 22 | What's important about this is that the width of that bus is about 1 [machine word](http://en.wikipedia.org/wiki/Word_(computer_architecture)), 32/64 bits. 23 | 24 | It turns out that this model of a computer has shaped programming to no small degree. A strong correspondence between: 25 | 26 | * mutable variables -> memory cells 27 | * variable dereferences -> load instructions 28 | * variable assignments -> store instructions 29 | * control instructions -> jumps 30 | 31 | That's all very well - but the problem is scaling up. We want to avoid thinking about programs just word by word. We want to reason in larger structures... 32 | 33 | ## Scaling Up 34 | In the end, the pure imperative programming paradigm is limited by the "Von Neumann" bottleneck: 35 | >*One tends to conceptualize data structures word-by word.* 36 | 37 | If want to scale up, we have to define higher level abstractions; collections, polynomials, geometric shapes, strings, documents... 38 | 39 | Ideally, to be thorough, we need to develop *theories* of these higher level abstractions so that we are able to reason about them. 40 | 41 | ## What is a theory precious 42 | In mathematics, a theory consists of: 43 | 44 | * one or more data types 45 | * operations on these types 46 | * laws that describe the relationships between values and operations 47 | 48 | Here's what's important: *a theory in mathematics does not describe mutations.* IE, changing something while keeping the identity the same. 49 | 50 | ## Theories without Mutation 51 | For instance, the theory of polynomials defines the sum of two polynomials by laws such as 52 | >_(a*x + b) + (c*x + d) = (a+c)*x + (b+d)_ 53 | 54 | IE,. to sum two polynomials of degree 1 we take their two coefficients of the same degree and we sum those coefficients. 55 | 56 | There would be laws of all the other useful operators for polynomials. But what the theory does *not* do is define an operator to change a coefficient while keeping the polynomial the same. Whereas if we look at imperative programming, one can do precisely that... 57 | 58 | class Polynomial { double[] coefficient; } 59 | Polynomial p = ...; 60 | p.coefficient[0] = 42 61 | 62 | The polynomial p is still the same, but we've changed it's coefficient. This isn't available in mathematics - it would detract from the theory and in fact could damage it by breaking laws 63 | 64 | Another example - strings. Most programming languages have strings, and would define a concatenation operator. One of the laws of concatenation is that it is associative, such that 65 | >_(a ++ b) ++ c = a ++ (b ++ c)_ 66 | 67 | But it does not define an operator to change a sequence element while keeping the sequence the same 68 | 69 | Some languages do get this right; ie, Java's strings are immutable; Java does not give you an operator to change a character in a string while keeping the string the same 70 | 71 | ## Consequences for Programming 72 | If we want to implement high-level concepts following their mathematical theories, there's no place for mutation 73 | 74 | * the theories do not admit it 75 | * mutation can destroy useful laws in the theories 76 | 77 | Therefore, let's: 78 | 79 | * concentrate on defining theories for operators expressed as functions 80 | * avoid mutations 81 | * have powerful ways to abstract and compose functions 82 | 83 | ## Functional Programming 84 | In a *restricted* sense, functional programming means programming without mutable variables, assignments, loops, or other imperative control structures... It takes a lot of things away. 85 | 86 | In a *wider* sense, FP means focusing on the functions. In particular, function can be values that are produced, consumed, and composed 87 | 88 | Functional Programming languages can be viewed the same way - in a restricted sense, a functional programming language is one which does not have mutable variables, assignments, or imperative control structures. 89 | 90 | In a wider sense, a FPL enables the construction of elegant programs that focus on functions. In particular, functions are first-class citizens, meaning essentially that we can do with a function what we could do with any other piece of data: 91 | 92 | * they can be defined anywhere, including inside other functions; you can define a string anywhere, you should be able to define a function anywhere 93 | * like any other value, they can be passed as parameters to functions and returned as results 94 | * as for other values, there exists a set of operators to compose functions into richer functions 95 | 96 | -------------------------------------------------------------------------------- /notes/week 1/002-elements-of-programming.md: -------------------------------------------------------------------------------- 1 | # Elements of Programming 2 | Every non trivial programming language provides: 3 | 4 | * primitive expressions representing the simplest elements 5 | * ways to combine expressions 6 | * ways to *abstract* expressions, which introduce a name for an expression by which it can then be referred to 7 | 8 | 9 | ### The read-eval-print Loop 10 | Functional programming is a bit like using a calculator... most functional languages have an interactive shell, or REPL, that lets us write expressions and responds with their value. We can start the scala repl by typing scala, or by typing sbt console 11 | 12 | ### Evaluation 13 | A non-primitive expression is evaluated as follows: 14 | 15 | * take the leftmost operator 16 | * evaluate its operands (left before right) 17 | * apply the operator to the operands 18 | 19 | A name is evaluated by replacing it with the right hand side of its definition. 20 | 21 | We apply these steps one by one until an evaluation results in a value - for the moment, a value is just a number. 22 | 23 | #### Example 24 | >_(2 * pi) * radius_ 25 | 26 | First we look up the value of pi: 27 | >_(2 * 3.14159) * radius_ 28 | 29 | Then we perform the arithmetic operation on the left: 30 | >_6.28318 * radius_ 31 | 32 | Then we look up the value for radius and finally we perform our final multiplication, yielding the result 33 | >_6.28318 * 10_ 34 | 35 | > _62.8318_ 36 | 37 | ### Parameters 38 | Definitions can have parameters - for instance: 39 | 40 | def square(x: Double) = x * x 41 | def sumOfSquares(x: Double, y: Double) = square(x) + square(y) 42 | 43 | ### Parameter and Return Types 44 | Function parameters come with their type, given after a colon. If a return type is given, it follows the parameter list 45 | 46 | def power(x: Double, y: Int): Double = ... 47 | 48 | ### Evaluation of Function Applications 49 | Applications of parameterized functions are evaluated in a similar way as operators: 50 | 51 | * evaluate all function arguments, left to right 52 | * replace the function application by the function's right hand side, and at the same time 53 | * replace the formal parameters of the function by the actual arguments 54 | 55 | > _sumOfSquares(3, 2+2)_ 56 | 57 | evaluates to 58 | 59 | > _sumOfSquares(3, 4)_ 60 | 61 | then, we take the definition of sum of squares and plop it in: 62 | 63 | >_square(3) + square(4)_ 64 | 65 | then we repeat the process with the square application 66 | 67 | >_3 * 3 + square(4)_ 68 | 69 | >_9 + square(4)_ 70 | 71 | >_9 + 4 * 4_ 72 | 73 | >_9 + 16_ 74 | 75 | >_25_ 76 | 77 | ### The substitution model 78 | This scheme of expression evaluation is called the substitution model. The idea is that all evaluation does is *reduce an expression to a value* 79 | 80 | Simple as it is, it's been shown that it can express every algorithm, and thus is equivalent to a Turing machine. ([Alonzo Church and the lambda calculus](http://en.wikipedia.org/wiki/Lambda_calculus)) 81 | 82 | The substitution can be applied to all expressions, as long as they have no side effects. What is a side effect? 83 | 84 | c++ 85 | 86 | Evaluating this expression means that we would: return the old value of c, and at the same time, we increment the value. Turns out there's no good way to represent this evaluation sequence by a simple rewriting of the term; we need something else, like a store where the current value of the variable is kept. 87 | 88 | In other words, the express c++ has a side effect on the current value of the value; that side effect cannot be expressed by the substitution model. 89 | 90 | One of the motivations for ruling out side effects in FP is that we can keep to a simple model of evaluation. 91 | 92 | ### Termination 93 | Once we have the substitution model, another question comes up: does every expression reduce to a value (in a finite number of steps?) 94 | 95 | Nope. Eg: 96 | 97 | def loop: Int = loop 98 | loop 99 | 100 | So, what would happen here? According to our model, we have to evaluate that name by replacing it with what's on its right hand side. 101 | 102 | loop -> loop -> loop.... 103 | 104 | We have reduced the name to itself, so this expression will never terminate. 105 | 106 | ### Changing the evaluation strategy 107 | The scala interpreter will reduce function arguments to values before rewriting the function application - that's not the only evaluation strategy. 108 | 109 | One could alternatively apply the function to unreduced arguments: 110 | 111 | >_sumOfSquares(3, 2 + 2)_ 112 | 113 | In this strategy, we keep the right hand side, and don't reduce 2+2 to 4. We simply pass it as an expression to the square function 114 | 115 | >_square(3) + square(2+2)_ 116 | 117 | We keep it around until the last possible evaluation, even ending up with an evaluation of square(2+2) that yields (2 + 2) * (2 + 2) 118 | 119 | >_3*3 + square(2+2)_ 120 | 121 | >_9 + square(2+2)_ 122 | 123 | >_9 + (2 + 2) * (2 + 2)_ 124 | 125 | >_9 + 4 * (2+2)_ 126 | 127 | >_9 + 4 * 4_ 128 | 129 | >_25_ 130 | 131 | ### Call by name and Call by value 132 | We've seen two evaluation strategies - the first is call by value, and the last is call by name. 133 | 134 | An important theory of lamda calculus is that both strategies reduce to the same final values, as long as: 135 | 136 | * the reduced expression consists of pure functions and 137 | * both evaluations terminate 138 | 139 | Call by value has the advantage that every function argument is only evaluated once. 140 | 141 | Call by name has the advantage that a function argument is not evaluated at all if the corresponding parameter is unused in the evaluation of the function body 142 | 143 | Call by value is basically, reduce all parameters first, then apply functions 144 | Call by name is basically, apply functions, then reduce parameters 145 | 146 | ### CBN, CBV, and termination 147 | We know that the two evaluation strategies reduce an expression to the same value, as long as both evaluations terminate. What if they don't terminate? 148 | 149 | There we have an important theorum, which says if the CBV evaluation of an expression e terminates, then CBN evaluation of e terminates too. 150 | 151 | The other direction is *not* true. 152 | 153 | For example: 154 | 155 | def first(x: Int, y: Int) = x 156 | 157 | consider the expression first(1, loop): 158 | 159 | Under CBN, we'll reduce the first expression without reducing the argument, and it would just yield 1 in the first step, since first() doesn't do anything with the second parameter. 160 | 161 | Under CBV, we have to reduce the arguments to this expression, so we have to reduce loop - loop reduces to itself, infinitely, and we'd make no progress 162 | 163 | ### Scala's evaluation strategy 164 | Scala normally uses call by value. Why? Well, in practice it turns out that CBV is exponentially more efficient than CBN, because it avoids this repeated recomputation of argument expressions that CBN entails. 165 | 166 | Other argument for CBV is that it plays much nicer with imperative effects and side effects, because you tend to know much better when expressions will be evaluated 167 | 168 | Except that, sometimes you really want to force CBN - you can do that in Scala by adding a name in front of the type: 169 | 170 | def constOne(x: Int, y: => Int) = 1 171 | -------------------------------------------------------------------------------- /notes/week 1/003-conditionals-and-value-definitions.md: -------------------------------------------------------------------------------- 1 | ### Conditional Expressions 2 | 3 | To express choosing between two alternatives, Scala has a conditional expression if-else 4 | 5 | It looks like a if-else in Java, but is used for expressions, not statements 6 | 7 | ```scala 8 | def abs(x: Int) = if (x >= 0) x else -x 9 | ``` 10 | 11 | x >=0 is a *predicate*, of type Boolean 12 | 13 | #### Boolean expressions 14 | 15 | can be composed of 16 | 17 | > true false //Constants 18 | 19 | > !b //Negation 20 | 21 | > b && b //Conjunction 22 | 23 | > b || b //Disjunction 24 | 25 | #### Rewrite rules for Booleans 26 | How do we define meaning for boolean expressions? Well, simply by giving rewrite rules, which give some template for boolean expressions on the left, and how to rewrite it on the right 27 | 28 | > !true --> false 29 | 30 | > !false --> true 31 | 32 | True and some other expression *e* will always give you the same as *e*; false and some other expression *e* will always give you false 33 | > true && e --> e 34 | 35 | > false && e --> false 36 | 37 | Rules for or are analogous for rules of and; they're the duals of those 38 | > true || e --> true 39 | 40 | > false || e --> e 41 | 42 | Note that && and || do not always need their right operand to be evaluated; we say these expressions use short-circuit evaluation 43 | 44 | #### Value Definitions 45 | We have seen that function parameters can be passed by value or be passed by name - the same distinction applies to definition. 46 | 47 | The ```def``` form is in a sense, call by name; it's right and side is evaluated on each use. 48 | 49 | There is also a ```val``` form, which is "by-value"; eg: 50 | 51 | ```scala 52 | val x = 2 53 | ``` 54 | 55 | ```scala 56 | val y = square(x) 57 | ``` 58 | 59 | The right hand side of a val definition is evaluated at the point of the definition itself. Afterwards, the name refers to the value - for instance, y above refers to 4, not to square(2). 60 | 61 | #### Value Definitions and Termination 62 | The difference between the ```val``` and ```def``` forms becomes apparent when the right hand side doesn't terminate 63 | 64 | ```scala 65 | def loop: Boolean = loop 66 | ``` 67 | 68 | If we say ```def x = loop```, nothing happens - we just defined another name for loop. Whereas, if we define ```val x = loop```, we're caught in an infinite loop. 69 | -------------------------------------------------------------------------------- /notes/week 1/004-example-newtons-square-roots.md: -------------------------------------------------------------------------------- 1 | ### Example: Square Roots with Newton's method 2 | 3 | ### Task 4 | We will define a function sqrt such that 5 | 6 | ```scala 7 | def sqrt(x: Double): Double = ... 8 | ``` 9 | 10 | The classical way to achieve this is by successive approximations using Newton's method. 11 | 12 | To compute sqrt(x): 13 | 14 | - start with an initial estimate y (let's pick y = 1) 15 | - repeatedly improve the estimate by taking the mean of y and x/y 16 | 17 | A typical way to code algorithms in functional languages is to go step by step - we take a small task and formulate it as a function; then probably, that task will need further tasks that will be defined in their own function 18 | 19 | First function we'd want to define in this case is the one that computes one iteration step; if we have a guess and the value we want to draw the root from, what do we do? 20 | 21 | Well, either we stop the iteration and return the result, or we go and do another iteration step. 22 | 23 | ```scala 24 | if (isGoodEnough(guess, x)) guess 25 | else sqrtIter(improve(guess, x), x) 26 | ``` 27 | 28 | The predicate that controls our iteration is called isGoodEnough - if our guess is good enough, we'll just return the guess. 29 | 30 | If it's *not* good enough, we have to improve our guess - we'll do that with another function improve. We'll call sqrtIter again with the improved guess. 31 | 32 | Note that sqrtIter is recursive - its right hand side calls itself. One pecularity in Scala - the return type of a recursive function *always needs to be defined* - for other functions it's optional. The reason being that to compute the return type of a recursive function, the Scala interpreter would have to look at the right hand side; because the sqrtIter function is recursive, it's stuck in a cycle. 33 | 34 | -------------------------------------------------------------------------------- /notes/week 1/005-blocks-and-lexical-scoping.md: -------------------------------------------------------------------------------- 1 | ## Blocks and Lexical Scoping 2 | 3 | It's good functional programming style to split up a task into many small functions; but the names of functions like sqrtIter, improve, and isGoodEnough matter only for the *implementation* of sqrt, not for its usage. 4 | 5 | Normally we would not like users to access these functions directly - we wan't to avoid namespace pollution by putting the auxciliary functions inside of sqrt 6 | 7 | The way we do that is using a block: 8 | 9 | ```scala 10 | { 11 | val x = f(3) 12 | x * x 13 | } 14 | ``` 15 | 16 | A block is delimited by braces; it contains a sequence of definitions or expressions. 17 | 18 | The last element of a block is an expression that defines its values. 19 | 20 | Blocks are expressions in scala, so they can be used everywhere an expression can - including the right hand side of a function definition. 21 | 22 | ```scala 23 | def sqrt(x: Double) = { } 24 | ``` 25 | 26 | ### Blocks and Visibility 27 | 28 | The definitions inside a block are only visible from within the block 29 | 30 | The definitions inside a block *shadow* definitions of the same names outside the block. 31 | 32 | ```scala 33 | val x = 0 34 | def f(y: Int) = y + 1 35 | val result = { 36 | val x = f(3) 37 | x * x 38 | } 39 | ``` 40 | 41 | f is visible here - it refers to the outer block. but the name x here refers to the inner x - it is shadowing the outer name. 42 | 43 | ### Lexical Scoping 44 | Definitions of outer blocks are visible inside a block unless they are shadowed; 45 | 46 | ### Semicolons 47 | In scala, semicolons at the end of lines are in most cases optional. The only case where you would need them is when you want to put multiple statements on one line, eg: 48 | 49 | ```scala 50 | val y = x + 1; y * y 51 | ``` 52 | 53 | How to write expressions that span several lines? Two ways: 54 | 55 | ![img](http://i.imgur.com/vSsO9eP.png) 56 | 57 | or, 58 | 59 | ![img](http://i.imgur.com/vWDT0lq.png) 60 | -------------------------------------------------------------------------------- /notes/week 1/006-tail-recursion.md: -------------------------------------------------------------------------------- 1 | ## Tail Recursion 2 | 3 | ### Review: Evaluating a Function Application 4 | One simple rule: one evaluates a function application f(e1,....,en) 5 | 6 | - by evaluating the expressions e1,...,en resulting in the values v1,...,vn, then 7 | - by replacing the application with the body of the function f, in which 8 | - the actual parameters v1,...,vn replace the formal parameters of f 9 | 10 | This can be formalized as a *rewriting of the program itself*: 11 | ![img](http://i.imgur.com/QD0aaaF.png) 12 | 13 | Say you have a program with a function definition f, with parameters x1 to xn, and a body B - and then, somewhere else you have a function call: f applied to argument values v1 through vn. That program can be rewritten to a program that contains the same function definition, but the function application has now been replaced by the body of f, B, and at the same time the formal parameters x1 to xn have been replaced by the argument values; the rest of the program is assumed to be unchanged. 14 | 15 | Here, [v1/x1,...,vn/xn] means "the expression B, in which all occurrences of xi have been replaced by vi". This notation is called a *substitution* 16 | 17 | Consider gcd, the function that computes the greatest common divisor of two numbers, with Euclid's algorithm. 18 | 19 | ```scala 20 | def gcd(a: Int, b: Int): Int = 21 | if (b == 0) a else gcd(b, a % b) 22 | ``` 23 | 24 | The evaluation of this algorithm occurs like so: 25 | 26 | ![img](http://i.imgur.com/ciOGYQ7.png) 27 | 28 | Consider factorial: 29 | 30 | ```scala 31 | def factorial(n: Int): Int = 32 | if (n== 0) 1 else n * factorial(n-1) 33 | ``` 34 | 35 | ![img](http://i.imgur.com/Xehw44B.png) 36 | 37 | What's the difference between these two sequences, gcd and factorial? 38 | 39 | If we come back to gcd, we see that the reduction sequence essentially oscillates - it goes from one call to gcd to the next through each step, until it terminates. 40 | 41 | Factorial on the other hand, in each couple of steps we add one more element to our expression; our expression becomes bigger and bigger, until the end when we reduce it to the the final value. 42 | 43 | ### Tail Recursion 44 | That difference in the rewriting rules translates to an actual difference in execution on a computer; in fact it turns out if you have a recursive function that calls itself as its last action, then you can reuse the stak frame of that function. This is called *tail recursion*. 45 | 46 | By applying that trick, a tail recursive function can execute in constant stack space; it's really just another formulation of an iterative process. You could say that a tail recursive function is just the functional form of a loop - and it executes just as efficiently as a loop. 47 | 48 | If we go back to gcd, we see that it's calling itself as its last action - that translates to a rewriting sequence that's essentially constant size, which will in the actual execution on a computer translate into a tail recursive call that can execute in constant space. 49 | 50 | On the other hand, in factorial after the call to factorial(n-1), there's still work to be done - we have to multiply the result of that call with the number n. That call here is not a tail recursive call. You can see that in the reduction sequence - there's a build up of intermediate results that we have to keep until we can compute the final value. 51 | 52 | Both factorial and gcd only call itself; of course, a function can also call other functions. The generalization of tail recursion is that if the last action of a function consists of calling another function, maybe the same, maybe another, the stack frame could be reused for both functions. Such calls are called *tail calls* 53 | 54 | ### Tail Recursion in Scala 55 | Should every function be tail recursive? Not really... in scala, only directly recursive calls to the current function are optimized. The interest of tail recursion is only to avoid very deep recursion chains.... most implementations of the JVM limit the amount of recursion to a couple thousand stack frames. If the input data is such that these deep recursive chains could happen, it makes sense to reformulate your function to be tail recursive to run in constant stack space, to avoid stack overflow exceptions. 56 | 57 | On the other hand, if your input data are not susceptible to deep recursive trains, clarity trumps efficiency - write your function the clearest way you can (which often is not tail recursive). 58 | 59 | Factorial grows very very quickly - even after a very low number of recursive steps you'll already exceed the range of integers. It's not worth making factorial a tail recursive function. 60 | -------------------------------------------------------------------------------- /notes/week 2/001-higher-order-functions.md: -------------------------------------------------------------------------------- 1 | ##Higher Order Functions 2 | 3 | One thing particular about functional languages is that they treat functions as *first class values* - meaning, like any other value, a function can be passed as a parameter to another function or returned as a result. This provides a flexible way to compose programs. 4 | 5 | Functions that take other functions as parameters or that return functions as results are called higher order functions - that's the opposite of a first order function, which is a function that operates on simple data types such as integers or lists, but not other functions. 6 | 7 | Let's suppose we want to take the sum of all the integers between a and b: 8 | 9 | ```scala 10 | def sumInts(a: Int, b, Int): Int = 11 | if (a > b) 0 else a + sumInts(a + 1, b) 12 | ``` 13 | 14 | Let's vary the problem a bit - now we want to take the sum of the cubes of all numbers between a and b. 15 | 16 | ```scala 17 | def cube(x: Int): Int = x * x * x 18 | def sumCubes(a: Int, b: Int): Int = 19 | if (a > b) 0 else cube(a) + sumCubes(a + 1, b) 20 | ``` 21 | 22 | Next let's take the sum of all factorials between a and b... 23 | 24 | ```scala 25 | def sumFactorials(a: Int, b: Int): Int = 26 | if (a > b) 0 else fact(a) + sumFactorials(a + 1, b) 27 | ``` 28 | 29 | By now we can see the principle - the program is exactly the same as sumInts and sumCubes. Of course these are all special cases of the mathematical sum of values of f(n), where f is a given function and n is taken from the interval between a and b 30 | 31 | ![img](http://i.imgur.com/ber3LCH.png) 32 | 33 | The question is that if mathematics has a special notation for that, shouldn't programming? Can we factor out the common pattern? 34 | 35 | ###Summing with Higher-Order Functions 36 | Let's define a function sum, which takes a parameter f of type Int to Int, and the two parameter bounds of a and b: 37 | 38 | ```scala 39 | def sum(f: Int => Int, a: Int, b: Int): Int = 40 | if (a > b) 0 41 | else f(a) + sum(f, a + 1, b) 42 | ``` 43 | 44 | The new thing here is that f is a parameter of the sum function - it's not a given function it's a parameter. 45 | 46 | Once we have that, we can then write sumInts/sumCubes/sumFactorials like 47 | 48 | ```scala 49 | sum(id, a, b) //the id function simply returns its parameter unchanged 50 | sum(cube, a, b) 51 | sum(fact, a, b) 52 | ``` 53 | 54 | What we've done effectively is reuse the pattern that defines the sum function so that we only had to write that once. 55 | 56 | ###Function Types 57 | The type A => B is the type of a *function* that takes an argument of type A and returns a result of type B. So, Int => Int is the type of functions that map integers to integers. 58 | 59 | So, we've shortened the definitions of sumInts et al, but there's an annoying detail - we had to name all the auxiliary functions (cube, factorial). We're adding code to our program that we don't need. 60 | 61 | Passing functions as parameters leads to the creation of many small functions - that's tedious. Compare to strings - we don't need to define a string using ```def``` - instead we just directly write ```println("abc")``` 62 | 63 | That's because strings exist as *literals* - since functions are important in our language, it makes sense to introduce function literals, which would let us write a function without giving it a name. These are called anonymous functions. 64 | 65 | Example - a function that raises its argument to a cube: 66 | 67 | ```scala 68 | (x: Int) => x * x * x 69 | ``` 70 | 71 | Here, (x: Int) is the parameter of the function, and x * x * x is it's body. The type of the parameter can be omitted if it can be inferred by the compiler from the context. 72 | 73 | If there are several parameters, they are seperated by commas: 74 | 75 | ```scala 76 | (x: Int, y: Int) => x + y 77 | ``` 78 | 79 | Are anonymous functions essential, or can they be defined in some other way? It turns out every anonymous function can be defined another way - using a def. One can therefore say that anonymous functions are syntactic sugar. 80 | 81 | Using anonymous functions, we can write sums in a shorter way: 82 | 83 | ```scala 84 | def sumInts(a:Int, b: Int) = sum(x => x, a, b) 85 | def sumCubes(a: Int, b: Int) = sum(x => x * x * x, a, b) 86 | ``` -------------------------------------------------------------------------------- /notes/week 2/002-currying.md: -------------------------------------------------------------------------------- 1 | ##Currying 2 | 3 | Let's look again at our summation functions - sumInts, sumCubes, sumFactorials: 4 | 5 | ![img](http://i.imgur.com/oehbY74.png) 6 | 7 | Last time we figured out how we could factor out the body of these functions and now simply pass the function to apply to each element in the interval, and the two bounds of the interval. 8 | 9 | But there's still repetition - each of the three functions takes two parameters, a and b, and then passes them on to the sum functions. Could we get rid of these two functions and thereby be even shorter? 10 | 11 | Let's rewrite the sum function like so: 12 | 13 | ```scala 14 | def sum(f: Int => Int): (Int, Int) => Int = { 15 | def sumF(a: Int, b: Int): Int = 16 | if (a > b) 0 17 | else f(a) + sumF(a + 1, b) 18 | sumF 19 | } 20 | ``` 21 | 22 | sum is now a function that returns another function. It takes a single parameter f, of type Int to Int, and it returns a function as its result (indicated by the (Int, Int => Int) function return type). So sum is now a function that returns another function, in particular, a locally defined function. 23 | 24 | 25 | Now we can define our sum functions like this: 26 | 27 | ```scala 28 | def sumInts = sum(x => x) 29 | def sumCubes = sum(x => x * x * x) 30 | def sumFactorials = sum(fact) 31 | ``` 32 | 33 | These functions can in turn be applied like any other function: 34 | 35 | ```scala sumCubes(1, 10) + sumFactorials(10, 20)``` 36 | 37 | Can we avoid the middlemen, sumInts, sumCubes, etc? Yup: 38 | 39 | ```scala 40 | sum(cube)(1, 10) 41 | ``` 42 | 43 | sum(cube) applies sum to cube and returns the *sum of cubes* function - it is therefor equivalent to sumCubes. This function is then applied to the arguments (1, 10). 44 | 45 | 46 | ###Multiple Parameter Lists 47 | 48 | Another piece of syntactic sugar - the definition of functions that return functions is so useful in functional programming that there is a special syntax for it in Scala; for example, the following definition of sum is equivalent to the one with the nested sumF function, but shorter 49 | 50 | ```scala 51 | def sum(f: Int => Int)(a: Int, b: Int): Int = 52 | if (a > b) 0 else f(a) + sum(f)(a + 1, b) 53 | ``` 54 | 55 | We can just combine the two parameter lists of the outer function and the nested function and write them one after the other. 56 | 57 | Question: given ```scala def sum(f: Int => Int)(a: Int, b: Int): Int = ...```, what is it's type? 58 | 59 | Answer: 60 | (Int => Int) => (Int, Int) => Int 61 | 62 | It first is a function that takes a function as an argument, so that would be the argument type - that returns a function that takes two integers as arguments, and that finally returns an Int 63 | 64 | Note that functional types associate to the right. That is to say that 65 | 66 | Int => Int => Int 67 | 68 | is equivalent to 69 | 70 | Int => (Int => Int) 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /notes/week 2/003-example-finding-fixed-points.md: -------------------------------------------------------------------------------- 1 | ##Fixed Points 2 | 3 | Whazzat?? 4 | 5 | A number x is called a *fixed point* of function f if `f(x) = x` 6 | 7 | Let's see a pitcher! 8 | 9 | Let's suppose we have a function that maps x to 1 + x/2. The graph of that function would look like this: 10 | 11 | ![img](http://i.imgur.com/KmDoSEA.png) 12 | 13 | The fixed point would be where the diagonal hits the graph of the function, in this case 2. 14 | 15 | Turns out that for some functions f we can locate the fixed points by starting with an initial estimate and then applying f in a repetitive way. 16 | 17 | > x, f(x), f(f(x)), f(f(f(x))), ... 18 | 19 | until the value does not vary anymore (or the change is sufficiently small). 20 | 21 | This leads to the following function for finding a fixed point: 22 | 23 | ```scala 24 | val tolerance = 0.0001 25 | def isCloseEnough(x: Double, y: Double) = 26 | abs((x - y) / x) / x < tolerance 27 | def fixedPoint(f: Double => Double)(firstGuess: Double) = { 28 | def iterate(guess: Double): Double = { 29 | val next = f(guess) 30 | if (isCloseEnough(guess, next)) next 31 | else iterate(next) 32 | } 33 | iterate(firstGuess) 34 | } 35 | ``` 36 | 37 | ###Return to Square Roots 38 | 39 | Here is a specification for the sqrt function: 40 | 41 | > sqrt(x) = the number y such that y * y =x 42 | 43 | Or, dividing both sides of the equation with y: 44 | 45 | > sqrt(x_ = the number y such that y = x / y) 46 | 47 | Consequently, sqrt(x) is a fixed point of the function (y => x / y) 48 | 49 | This suggests to calculate sqrt(x) by iteration towards a fixed point: 50 | 51 | ```scala 52 | def sqrt(x: Double) = 53 | fixedPoint(y => x/y)(1.0) 54 | ``` 55 | 56 | Unfortunately, this does not converge. We get an infinite computation. Doh. 57 | 58 | Maybe some println instructions to the fixedPoint function would help. For each iteration step we'll write what the current guess is. 59 | 60 | ![doh](http://i.imgur.com/npOzbsy.png) 61 | 62 | We can see that our guess oscillates between 1 and 2 all the time. If you do the execution, this is precisely what happens. 63 | 64 | ###Average Dampening 65 | How can we do better? 66 | 67 | One way to control such oscillations is to prevent the estimation from varying too much, by averaging successive values of the original sequence: 68 | 69 | ```scala 70 | def sqrt(x: Double) = fixedPoint(y => (y + x / y) / 2)(1.0) 71 | ``` 72 | 73 | ###Functions as Return Values 74 | 75 | It's pretty keen to be able to pass functions as arguments - but functions that return functions are pretty cool too. 76 | 77 | Consider iteration towards a fixed point. We begin by observing that sqrt(x) is a fixed point of the function `y => x / y`. 78 | 79 | Then, the iteration converges by averaging successive values. This technique of *stabilizing by averaging* is general enough to merit being abstracted into its own function: 80 | 81 | ```scala 82 | def averageDamp(f: Double => Double)(x: Double) = (x + f(x) / 2) 83 | ``` 84 | 85 | This function takes an arbitrary function of Double => Double, and a value x of type Double; it then computes the average of x and f(x) 86 | 87 | What would sqrt(x) look like with fixedPoint and averageDamp? 88 | 89 | ```scala 90 | def sqrt(x: Double) = 91 | fixedPoint(averageDamp(y => x / y))(1) 92 | ``` 93 | 94 | So, averageDamp is a function that takes a function (in this case, a function that is at the root of the square root specification), and it returns another function, namely, the function that is essentially the same iteration but with average damping applied. 95 | 96 | -------------------------------------------------------------------------------- /notes/week 2/004-scala-syntax-review.md: -------------------------------------------------------------------------------- 1 | ##Scala Syntax 2 | 3 | ###Language elements seen so far: 4 | 5 | We have seen language elements to express types, expressions, and definitions. 6 | 7 | Here's the Extended Backus-Naur form (EBNF) for them. Wait what's Extended Backus-Naur form (EBNF)? It's a standard grammar to describe language elements. 8 | 9 | > | denotes an alternative 10 | > [...] an option (0 or 1) 11 | > {...} a repetition (0 or more) 12 | 13 | ###Types 14 | 15 | ![types](http://i.imgur.com/gpOW5jm.png) 16 | 17 | We've seen two forms of types (thus we've got a bar for Type = ...). A type in our language is either a SimpleType, or a FunctionType. 18 | 19 | A simple type so far is just an identifier. 20 | 21 | A function Type always contains an arrow; the left hand side of the arrow could be a SimpleType, or a set of Types in parentheses. 22 | 23 | A *type* can be: 24 | 25 | * a numeric type - `Int`, `Double`, (and `Byte`, `Short`, `Char`, `Long`, `Float`) 26 | * The `Boolean` type with the values `true` and `false` 27 | * the `String` type 28 | * a *function type*, like `Int => Int`, `(Int, Int) => Int` 29 | 30 | Later we'll see more forms of types. 31 | 32 | ###Expressions 33 | 34 | ![expressions](http://i.imgur.com/rzLeGbP.png) 35 | 36 | An *expression* can be: 37 | 38 | * An identifier, such as `x`, `isGoodEnough` 39 | * a literal, like `0`, `1.0`, `"abc"` 40 | * a function application, like `sqrt(x)` 41 | * an operator application, like `-x`, `y + x` 42 | * a selection, like math.abs 43 | * a conditional expression like `if (x < 0) - x else x` 44 | * a block, like `{ val x = math.abs(y ; x * 2) }` 45 | * an anonymous function, like `x => x + 1` 46 | 47 | ###Definitions 48 | 49 | ![definitions](http://i.imgur.com/KuMIh5t.png) 50 | 51 | A *definition* can be: 52 | 53 | * a *function definition*, like `def square(x: Int) = x * x` 54 | * a *value definition*, like `val y = square(2)` 55 | 56 | A *parameter* can be 57 | 58 | * a call-by-value parameter, like `(x: Int)`, 59 | * a call-by-name parameter, like `(y: => Double)` 60 | 61 | That's all the syntax we've seen so far - we'll add more as we need them over the next few weeks. -------------------------------------------------------------------------------- /notes/week 2/005-functions-and-data.md: -------------------------------------------------------------------------------- 1 | ##Functions and Data 2 | 3 | In this section, we'll learn how functions create and encapsulate data structures. 4 | 5 | Here's our example: we want to design a package for doing rational arithmetic. 6 | 7 | Quick reminder - a rational number `x/y` is represented by two integers: 8 | 9 | * its *numerator* x 10 | * its *denominator* y 11 | 12 | In a sense we can already do that with what we know from scala; we could define two functions: 13 | ![img](http://i.imgur.com/yHvSPcA.png) 14 | 15 | They each would get all the bits of both rational numbers as arguments; the numerators and denominators of both rational numbers, and then they'd both implement the usual algorithms for rational arithmetic. 16 | 17 | The problem with this approach is that it would be difficult to manage all these numbers; it's a much better approach to combine the numerator and denominator of a rational number into a datastructure. In scala, we do this by defining a class. 18 | 19 | ###Classes 20 | ```scala 21 | class Rational(x: Int, y: Int) { 22 | def numer = x 23 | def denom = y 24 | } 25 | ``` 26 | 27 | This definition introduces two entities: 28 | 29 | * a new *type*, named `Rational` 30 | * a *constructor* `Rational` to create elements of this type 31 | 32 | Scala keeps the names of types and values in *different namespaces*; it always knows from the context whether you mean a type or the value. that way there's no conflict between the two definitions, the constructor and the type, of `Rational` 33 | 34 | ###Objects 35 | A *type* in a programming language is essentially a set of values; the values that belong to a class type are called *objects*. 36 | 37 | We create an object by prefixing an application of the constructor of the class with the operator `new`: `new Rational(1, 2)` 38 | 39 | ###Members of an Object 40 | Objects of class `Rational` have two *members*. `numer` and `denom`; we select the members of an object with the infix operator `.`, like in Java 41 | 42 | ###Rational Arithmetic 43 | We can now define the usual arithmetic functions that implement the standard rules; addition, subtraction, multiplication 44 | 45 | ![img](http://i.imgur.com/CTuXv8C.png) 46 | 47 | One thing we could do is use the class `Rational` as a pure datatype; something that just gives us the data. We'd define the operations as functions outside of the class; addRational would take two `Rationals` and give you a `Rational`: 48 | 49 | ```scala 50 | def addRational(r: Rational, s: Rational): Rational = 51 | new Rational( 52 | r.numer * s.denom + s.numer * r.denom, 53 | r.denom * s.denom) 54 | ``` 55 | 56 | To make things print nicely, we could also define a `makeString` function that takes a rational and produces the numerator of the rational and the denominator, seperated by a slash. 57 | 58 | ```scala 59 | def makeString(r: Rational) = 60 | r.numer + "/" + r.denom 61 | ``` 62 | 63 | When all is said and done, we'd have an invocation that looked something like this: 64 | 65 | ```scala 66 | makeString(addRational(new Rational(1,2), new Rational(2, 3))) // 7/6 67 | ``` 68 | 69 | ###Methods 70 | We can go deeper...... we can package functions operation on a data abstraction in the data abstraction itself. 71 | 72 | Such functions are called *methods*. 73 | 74 | For example, `Rationals` now would have, in addition to the functions `numer` and `denom`, the functions `add`, `sub`, `mul`, `div`, `equal`, `toString` 75 | 76 | -------------------------------------------------------------------------------- /notes/week 2/006-more-fun-with-rationals.md: -------------------------------------------------------------------------------- 1 | ##Classes and Objects, Dawg 2 | 3 | Our `Rational` class does not simplify... we get things like 79/40. Why is that? 4 | 5 | ###Data Abstraction 6 | 7 | One would expect rational numbers to be *simplified*, reduced to their smalled numerator and denominator by dividing both with a divisor. 8 | 9 | We could implement this in each rational operation, add a simplification step to add, multiply, etc, but it would be easy to forget this division in an operation. It'd also violate the principle of Don't Repeat Yourself 10 | 11 | A better alternative consists of simplifying the representation in the class when the objects are constructed. 12 | 13 | Let's implement this in our `Rational` class: 14 | 15 | ```scala 16 | class Rational(x: Int, y: Int) { 17 | private def gcd(a: Int, b: Int): Int = if (b == 0) a else gcd(b, a % b) 18 | private val g = gcd(x, y) 19 | def numer = x / g 20 | def denom = y / g 21 | ... 22 | } 23 | ``` 24 | 25 | gcd and g are *private* members; we can only access them from inside the `Rational` class. In this example, we calculate `gcd` immediately, so its value can be re-used in calculations of `numer` and `denom`. 26 | 27 | We could change that... we could call `gcd` in the code of `numer` and `denom`: 28 | 29 | ```scala 30 | class Rational(x: Int, y: Int) { 31 | private def gcd(a: Int, b: Int): Int = if (b == 0) a else gcd(b, a % b) 32 | def numer = x / gcd(x,y) 33 | def denom = y / gcd(x,y) 34 | } 35 | ``` 36 | 37 | That we we avoid the additional field `g`; it could be advantageous if it is expected that the functions `numer` and `denom` are called infrequently, we can amortize the cost of the `gcd` operations. 38 | 39 | We could also turn `numer` and `denom` into `val`s, so that they're computed only once: 40 | 41 | ```scala 42 | class Rational(x: Int, y: Int) { 43 | private def gcd(a: Int, b: Int): Int = if (b == 0) a else gcd(b, a % b) 44 | private val g = gcd(x, y) 45 | val numer = x / g 46 | val denom = y / g 47 | } 48 | ``` 49 | 50 | That would be advantageous if the functions numer and denom are called often; we've already computed what they are and we don't repeat the operations. 51 | 52 | ###The Client's View 53 | What's important here is that no matter which of the three alternatives we choose, clienst observe exactly the same behavior in each case. This ability to choose different implementations of the data without affecting clients is called *data abstraction*. 54 | 55 | ###Self Reference 56 | On the inside of a class, the name `this` represents the object on which the current method is executed. 57 | 58 | ```scala 59 | class Rational(x: Int, y: Int) { 60 | ... 61 | 62 | def less(that: Rational) = numer * that.denom < that.numer * denom 63 | 64 | def max(that: Rational) = if (this.less(that)) that else this 65 | 66 | } 67 | ``` 68 | 69 | Not that a simple name `x`, which refers to another member of the class, is an abbreviation of `this.x`. The members of a class can always be referenced with `this` as the prefix. 70 | 71 | ###Preconditions 72 | There's no such thing as a rational number with a denominator of zero... how can we guard against users creating illegal rationals like that? 73 | 74 | We can enforce this by calling the `require` function: 75 | 76 | ```scala 77 | class Rational(x: Int, y: Int) { 78 | require(y != 0, "denominator must be nonzero") 79 | } 80 | ``` 81 | 82 | `require` is a predefined function that takes a condition and an optional message string. If the condition is false, an `IllegalArgumentException` is thrown with the given message string. 83 | 84 | ###Assertions 85 | Besides `require`, there's another test called `assert`. `assert` also takes a condition and an optional message string as parameters, eg 86 | 87 | ```scala 88 | val x = sqrt(y) 89 | assert(x >= 0) 90 | ``` 91 | Like `require`, a failing `assert` will also throw an exception, but it's a different one: `AssertionError`. 92 | 93 | This reflects a difference in intent 94 | 95 | * `require` is used to enforce a precondition on the caller of a function 96 | * `asser` is used to check the code of the function itself 97 | 98 | ###Constructors 99 | In scala, a class implicitly introduces a constructor, called the *primary contstuctor* of the class. 100 | 101 | The primary constructor: 102 | 103 | * takes the parameters of the class 104 | * executes all statements in the class body 105 | 106 | If you know Java, you're used to classes having several constructors; that's also possible in scala though the syntax is different. Let's say we want another constructor for `Rational`, that only takes one integer, the denominator 107 | 108 | ```scala 109 | class Rational(x: Int, y: Int) { 110 | def this(x: Int) = this(x, 1) 111 | } 112 | ``` -------------------------------------------------------------------------------- /notes/week 2/007-evaluation-and-operators.md: -------------------------------------------------------------------------------- 1 | ##Classes and Substitutions 2 | 3 | We previously defined the meaning of a function application using a computation model based on substitution - now we extend this model to classes and objects 4 | 5 | How is an instantiation of the class `new C(e1, ..., en)` evaluated? 6 | 7 | Answer: the expression arguments `e1,...,em`, are evaluated just like the arguments of a normal function - that's it. The resulting expression, say, `new C(v1, ..., vm)` is already a value. We just take these new instance creation expressions as values. 8 | 9 | Now suppose we have a class definition like this: 10 | 11 | ```scala 12 | class C(x1,...,xm) { def f(y1,...,yn) = b ... } 13 | ``` 14 | 15 | where 16 | 17 | * the formal parameters of the class are `x1,...,xm` 18 | * the class defines a method `f` with formal parameters `y1,...,yn` 19 | 20 | How is the following expression evaluated? 21 | 22 | ```new C(v1,...,vm).f(w1,...,wn)``` 23 | 24 | Answer: the expression is rewritten to: 25 | ![img](http://i.imgur.com/E87F0Rp.png) 26 | 27 | There are three substitusions at work here: 28 | 29 | * the substitution of the formal parameters `y1,...,yn` of the function `f` by the arguments `w1,...,wn` 30 | * the substitution of the formal parameters `x1,...,xm` of the class `C` by the class arguments `v1,...,vm` 31 | * the substitution of the self reference *this* by the value of the object `new C(v1,...,vn)` 32 | 33 | ###Operators 34 | 35 | In principle, the rational numbers defined by `Rational` are as natural as integers. 36 | 37 | But for the user of these abstractions, there is a noticeable difference: we write x + y, if x and y are integers, but we write r.add(s) if r and s are rational numbers. 38 | 39 | In Scala, we can eliminate this difference. There are two steps. 40 | 41 | ###Step 1: Infix Notation 42 | 43 | Any method with a parameter can be used like an infix operator, so we can write 44 | 45 | ``` 46 | /* in place of */ 47 | r add s r.add(s) 48 | r less s r.less(s) 49 | r max s r.max(s) 50 | ``` 51 | 52 | ###Step 2: Relaxed Identifiers 53 | 54 | Operators can be used as identifiers. Normally in programming languages, operators must be alphanumeric. They start with a letter, followed by a sequence of letters or numbers. In scala we have an alternative, *symbolic identifiers*, that start with an operator symbol (like +, -), followed by other uperator symbols. 55 | 56 | In this definition, the underscore character counts as a letter. Alphanumeric identifiers can also end in an underscore, followed by some operator symbols. 57 | 58 | ###Precedence Rules 59 | 60 | Wait a second... if all operators are user-defined, how is their precedence established? There's actually one universal rule in scala - the precedence of an operator is determined by its first character. Here's a table listing the characters in increasing order of priority precedence: 61 | ![img](http://i.imgur.com/oL1GfGg.png) 62 | 63 | -------------------------------------------------------------------------------- /notes/week 3/001-class-hierarchies.md: -------------------------------------------------------------------------------- 1 | ##Class Hierarchies 2 | 3 | ###Abstract Classes 4 | Consider the task of writing a class for sets of integers with the following operations: 5 | 6 | ```scala 7 | abstract class IntSet { 8 | def incl(x: Int): IntSet 9 | def contains(x: Int): Boolean 10 | } 11 | ``` 12 | 13 | `IntSet` is an *abstract class* 14 | 15 | Abstract classes can contain members which are missing an implementation. Consequently, no instances of an abstract class can be created with the `new` operator 16 | 17 | ###Class Extensions 18 | Let's consider implementing sets as binary trees. 19 | 20 | The invariant that we want to maintain is that for each node, the nodes on its right hand side all have integer values that are higher than the node, and the nodes on its left all have values that are less 21 | 22 | There are two types of possible trees: a tree for the empty set, and a tree consisting of an integer and two sub-trees. 23 | 24 | ```scala 25 | class Empty extends IntSet { 26 | def contains(x: Int): Boolean = false 27 | def incl(x: Int): IntSet = new NonEmpty(x, new Empty, new Empty) 28 | } 29 | ``` 30 | 31 | So, what's a NonEmpty set? 32 | 33 | In this implementation, it'd be represented by a class that takes an element (the integer stored in the node), and a left and a right subtree (in this case, an IntSet). 34 | 35 | ```scala 36 | class NonEmpty(elem: Int, left: IntSet, right: IntSet) extends IntSet { 37 | def contains(x: Int): Boolean = 38 | if (x < elem) left contains x 39 | else if (x > elem) right contains x 40 | else true 41 | 42 | def incl(x: Int): IntSet = 43 | if (x < elem) new NonEmpty(elem, left incl x, right) 44 | else if (x > elem) new NonEmpty(elem, left, right incl x) 45 | else this 46 | } 47 | ``` 48 | 49 | The implementation of contains and includes makes use of the sorted criterion of trees; for contains, we always only have to look in one of the possible subtrees. If the given number is less than the current element value, then we know we'll have to look in the left subtree. If it's greater, then we look in the right. If it's neither less or greater, it must be equal, in which case we've found the element. 50 | 51 | `incl` follows a similar algorithm - if the element we're adding is less than the element in the tree, we put it in the left subtree. If it's greater, we put it in the right. Otherwise, the element is already in the tree and we can return the tree as-is, there's nothing to be added. 52 | 53 | One important thing to note is that we're still purely functional - there's no mutation here. When we say we're "including" an element in a subtree, we mean we're creating a new tree that contains the previous element of the tree and a larger left subtree where x is included in the previous left subtree, and the current subtree on the right. 54 | 55 | Let's say we've got this tree here, and we want to include the number 3 in it: 56 | ![img](http://i.imgur.com/fEiJdZj.png) 57 | 58 | Here's how it works. We'd create the new node 3, with two empty subtrees, which would be the left subtree of a new node 5, with an empty right hand subtree, and finally, the tree would be a new tree, with the node 7, and the same right hand side tree as before. 59 | ![img](http://i.imgur.com/ZMpps3u.png) 60 | 61 | So really, we end up with *two trees* - the old one, and the new one. The two trees share the subtree on the right hand side, but they differ on the left tree. 62 | 63 | These are called *persistent* data structures; even when we do "changes" to them, the old data structure is maintained, it doesn't go away. 64 | 65 | ###Terminology 66 | `Empty` and `NonEmpty` both extend the base class `IntSet` - this implies that the types `Empty` and `NonEmpty` conform to the type `IntSet`. In other words, an object of type `Empty` or `NonEmpty` can be used where an object of type `IntSet` is required. 67 | 68 | 69 | ###Object Definitions 70 | In the `IntSet` example, one could argue that there is really only a single empty `IntSet` - it seems overkill to have the user create many instances of it. We can epxress this case better with an *object definition*: 71 | 72 | ```scala 73 | object Empty extends IntSet { 74 | def contains(x: Int): Boolean = false 75 | def incl(x: Int): IntSet = new NonEmpty(x, new Empty, new Empty) 76 | } 77 | ``` 78 | 79 | This defines a *singleton object* named `Empty`. No other `Empty` instance can be (or need to be) created. Singleton objects are values, so `Empty` evaluates to itself. 80 | 81 | ###Programs 82 | So far we've just executed code from the REPL or the worksheet, but it's also possible to create standalone applications in Scala - each such application contains an object with a `main` method. 83 | 84 | For instance, here is the "Hello World!" program in Scala: 85 | 86 | ```scala 87 | object Hello { 88 | def main(args: Array[String]) = println("hello world!") 89 | } 90 | ``` 91 | 92 | ###Dynamic Binding 93 | Object oriented languages (including scala) implement *dynamic method dispatch* - this means that the code invoked by a method call depends on the runtime type of the object that contains the method. 94 | 95 | ``` 96 | Empty contains 1 97 | -> [1/x][Empty/this] false 98 | = false 99 | ``` 100 | 101 | What do you do? Well, you look up the `contains` method in empty, and performing the necessary substitutions we get false. -------------------------------------------------------------------------------- /notes/week 3/002-how-classes-are-organized.md: -------------------------------------------------------------------------------- 1 | ###Packages 2 | 3 | Generally in Scala, as in Java, classes are organized into packages; using a package clause at the top of your source file. 4 | 5 | ###Traits 6 | 7 | In Java, as well as in Scala, a class can only have one superclass - it's a single inheritance language. In practice this can be quite constraining; often times a type would naturally have several super types, or you want to inherit behavior from several super entities that all contribute to the final code of the class. 8 | 9 | To do that we can use a concept called *traits*; they're essentially declared like an abstract class, but with `trait` instead of `abstract` 10 | 11 | ```scala 12 | def height: Int 13 | def width: Int 14 | def surface = height * width 15 | ``` 16 | 17 | Classes, objects, and traits can inherit from at most one class but arbitrarily many traits: 18 | 19 | ```scala 20 | class Square Shape with Planar with Movable 21 | ``` 22 | 23 | Traits resemble interfaces in Java but are more powerful - they can contain fields and concrete methods. These implementations can be overridden in subclasses. On the other hand, traits cannot have (value) parameters - only classes can (like the numerator and denominator of class `Rational`). 24 | 25 | ###Top Types 26 | At the top of the scala type hierarchy we find: 27 | 28 | * `Any` the base type of all types. It defines methods like '==', '!=', 'hashCode', and 'toString' 29 | * `AnyRef` the base type of all reference types; an alias of `java.lang.Object` 30 | * `AnyVal` the base type of all primitive types; for now these are just the primitives that scala inherits from java 31 | 32 | ###The Nothing Type 33 | 34 | `Nothing` is at the bottom of scala's type hierarchy - it is a subtype of every other type. 35 | 36 | There is no value of type `Nothing` - why is that useful? 37 | 38 | * to signal abnormal termination; sometimes a function would not return, but instead throw an exception or terminate the program. What would the return type of that function be? The best possible type is nothing - it doesn't return anything. 39 | * as an element type of empty collections (like Set[Nothing]) 40 | 41 | ###Exceptions 42 | Scala's exception handling is similar to Java: 43 | 44 | ```scala 45 | throw Exc 46 | ``` 47 | 48 | aborts evaluation with the exception Exc - the type of this expression is `Nothing`. 49 | 50 | ###Null 51 | Every reference class type also has `null` as a value - when somebody expects a string, you could pass it null. The type of `null` is `Null`. 52 | 53 | `Null` is a subtype of every class that inherits from `Object` - it is incompatible with subtypes of `AnyVal` 54 | 55 | ```scala 56 | val x = null 57 | val y: String = x 58 | val z: Int = null //Throws a type mismatch error 59 | ``` -------------------------------------------------------------------------------- /notes/week 3/003-polymorphism.md: -------------------------------------------------------------------------------- 1 | ###Polymorphism 2 | 3 | As a motivating example, let's look at a datastructure that's been fundamental in many functional languages from the beginnings - the Cons-List 4 | 5 | It is an immutable linked list, constructed from two building blocks: 6 | 7 | * the empty list, which we call `Nil` 8 | * a cell containg an element and a pointer to the remainder of the list, called `Cons` 9 | 10 | Here's what `List(1,2,3)` would look like: 11 | ![img](http://i.imgur.com/Cbla4yn.png) 12 | 13 | We'd have a reference to a `Cons` cell that contains the 1, and a reference to a `Cons` cell that contains 2, which has a reference to a `Cons` cell that contains 3, which has a reference to `Nil` 14 | 15 | A nested list like `List(List(true, false), List(3))` would look like: 16 | ![img](http://i.imgur.com/XYcKqde.png) 17 | 18 | ###Cons-Lists in scala 19 | How would we write this as a class hierarchy in scala? 20 | 21 | Here's an outline that would represent lists of integers in this fashion: 22 | 23 | ```scala 24 | trait IntList ... 25 | class Cons(val head: Int, val tail: IntList) extends IntList ... 26 | class Nil extends IntList ... 27 | ``` 28 | 29 | Notice there's a bit of new syntax in the `Cons` declaration - `val head: Int` defines a parameter and a field definition in the class itself. It's equivalent to: 30 | 31 | ```scala 32 | class Cons(_head: Int, _tail:IntList) extends IntList { 33 | val head = _head 34 | val tail = _tail 35 | } 36 | ``` 37 | 38 | A list is either 39 | 40 | * an empty list `new Nil`, or 41 | * a list `Cons(x, xs)` consisting of a `head` element x and a `tail` list xs 42 | 43 | ###Type Parameters 44 | There's one problem with our type hierarchy - it's way too narrow to only define lists with `Int` elements. If we did it that way, we'd need a type hierarchy for `Double`, `Boolean`, and so on... 45 | 46 | What we need to do is generalize the definition - we can do that using the `type` parameter 47 | 48 | ```scala 49 | trait List[T] 50 | class Cons[T](val: head T, val tail: List[T]) extends List[T] 51 | class Nil[T] extends List[T] 52 | ``` 53 | 54 | So, we're going to define a base trait, List, which takes a type parameter `T`. That base trait List[T] will have two subclasses, `Cons[T]` and `Nil[T]`. `Cons[T]` will now have a head element of type `T`, and a tail element of type `List[T]` 55 | 56 | ###Generic Functions 57 | Like classes, functions can have type parameters. For instance, here's a function that creates a list consisting of a single element: 58 | 59 | ```scala 60 | def singleton[T](elem: T) = new Cons[T](elem, new Nil[T]) 61 | ``` 62 | 63 | We can then write: 64 | 65 | ```scala 66 | singleton[Int](1) 67 | singleton[Boolean](true) 68 | ``` 69 | 70 | ###Type Inference 71 | The scala compiler can usually deduce the correct type parameters from the value arguments of a function call - so in most cases the type parameters can be left out. The above could just be written as: 72 | 73 | ```scala 74 | singleton(1) 75 | singleton(true) 76 | ``` 77 | 78 | ###Types and Evaluation 79 | 80 | Type parameters do not affect evaluation in scala at all - we can assume that all type parameters and type arguments are removed before evaluation of the program. This is called *type erasure*. Types are only important for the compiler to verify that the program satisfies certain correctness properties, but they're not relevant for the actual execution. 81 | 82 | ###Polymorphism 83 | Polymorphism means that a function type comes "in many forms" - basically, the function can be applied to arguments of many types, or the type can have instances of many types. 84 | 85 | We have seen two principle forms of polymorphism - subtyping, and generics. 86 | 87 | Subtyping means that instances of a subclass can be passed to a base class - ie, given our `List` hierarchy, anywhere we have a parameter that accepts type `List`, we can pass either a `Nil` or a `Cons`. 88 | 89 | Generics means that we can create many instances of a function or class by type parameterization. By using generics, we could create a `List[Int]`, or a `List[List[Boolean]]`, whatever dawg. 90 | -------------------------------------------------------------------------------- /notes/week 4/001-functions-as-objects.md: -------------------------------------------------------------------------------- 1 | ##Functions as Objects 2 | 3 | We have seen that Scala's numeric types and the `Boolean` type can be implemented like normal classes... what about functions? 4 | 5 | In fact, function values *are* treated as objects in Scala. 6 | 7 | The function type `A => B` is just an abbreviation for the class `scala.Function[A,B]`, roughly defined as: 8 | 9 | ```scala 10 | package scala 11 | trait Function1[A, B] { 12 | def apply(x: A): B 13 | } 14 | ``` 15 | 16 | So functions are objects with `apply` methods. 17 | 18 | There are also traits `Function2, Function3`... for functions which take more parameters (currently up to 22) 19 | 20 | ###Expansion of Function Values 21 | 22 | An anonymous function such as `(x: Int) => x * x` is expanded to 23 | 24 | ```scala 25 | { class AnonFun extends Function1[Int, Int] { 26 | def apply(x: Int) = x * x 27 | } 28 | new AnonFun 29 | } 30 | ``` 31 | 32 | Or, shorter, using *anonymous class syntax*, like in Java: 33 | 34 | ```scala 35 | new Function1[Int, Int] { 36 | def apply(x: Int) = x * x 37 | } 38 | ``` 39 | 40 | ###Expansion of Function Calls 41 | We've seen how we represent functions, but what about applications of these functions? 42 | 43 | A function call, such as `f(a,b)`, where `f` is a value of some class type, is expanded to `f.apply(a,b)` 44 | 45 | So the OO-translation of 46 | 47 | ```scala 48 | val f = (x: Int) => x * x 49 | f(7) 50 | ``` 51 | 52 | would be 53 | 54 | ```scala 55 | val f = new Function1[Int, Int] { 56 | def apply(x: Int) = x * x 57 | } 58 | f.apply(7) 59 | ``` 60 | 61 | ###Functions and Methods 62 | Note that anything defined with a `def`, ie, a method, like ```def f(x: Int): Boolean = ...` is not itself a function value; but if the name of a method is used in a place where a function type is expected, it's converted automatically to the function value. 63 | 64 | -------------------------------------------------------------------------------- /notes/week 4/002-objects-everywhere.md: -------------------------------------------------------------------------------- 1 | ##Pure Object Orientation 2 | Odersky's about to blow your mind, dawg - argues that scala is actually also object oriented, and in a very pure form, despite the fact that everything we've seen about it so far has been functional. 3 | 4 | A pure object oriented language is one in which every value is an object; also, every operation is essentially a method call on some object. 5 | 6 | Is the language is based on classes, this means that the type of each value is a class. 7 | 8 | So, is Scala a pure object oriented language? 9 | 10 | At first glance, there seem to be some exceptions: primitive types, functions... but let's look closer. 11 | 12 | ###Standard Classes 13 | Conceptually, types such as `Int` or `Boolean` do not receive special treatment in scala; they are like the other classes, defined in the `scala` package. 14 | 15 | For reasons of efficiency, the scala compiler represents the values of type `scala.Int` by 32 bit integers, and values of type `scala.Boolean` by Java's Booleans etc; but this we can treat simply as an optimization and a measure to improve interoperability between Scala and Java code. Conceptually, these things can be treated just like normal classes 16 | 17 | One *could* define Boolean as a class from first principles, without any changes in user code, without resort to primitive booleans: 18 | 19 | ```scala 20 | package idealized.scala 21 | abstract class Boolean { 22 | def ifThenElse[T](t: => T, e: => T): T 23 | 24 | def && (x: => Boolean): Boolean = ifThenElse(x, False) 25 | def || (x: => Boolean): Boolean = ifThenElse(true, x) 26 | def unary_!: Boolean = ifThenElse(false, true) 27 | 28 | def == (x: Boolean): Boolean = ifThenElse(x, x.unary_!) 29 | def != (x: Boolean): Boolean = ifThenElse(x.unary_!, x) 30 | } 31 | ``` 32 | 33 | We would have one abstract method, called ifThenElse; it's a parameterized method, containing a type parameter T; it has a 'then' part, `t`, and an else part, `e`. Both the then and else part take an expression of type T; the result of ifThenElse would be T. 34 | 35 | The idea is that instead of `if (cond) thenExpression else elseExpression`, we'd translate that to the ifThenElse method call of our condition, and we'd pass tE and eE as the arguments to the ifThenElse method `cond.ifThenElse(te, ee)` 36 | 37 | Once we have ifThenElse, how would we define other operators on booleans, like the conjunction (&&) and disjunction (||)? Turns out that all the operations on booleans can be defined in terms of ifThenElse. 38 | 39 | Let's look at &&: it would take another expression of type Boolean, and it would then call ifThenElse(x, false); what this means is that if the Boolean itself is true, then we would return the argument x; on the other hand, if the Boolean is false, then the result is immediately false. 40 | 41 | We can apply these tricks to the rest of the operations on booleans. 42 | 43 | Once we have that outline, we still have to define the boolean constants, false and true; we can't be using the primitive booleans because we pass them to ifThenElse with one of our idealized booleans. False and true must themselves be constants of type idealized.scala.Boolean 44 | 45 | Here's how we define them: 46 | 47 | ```scala 48 | package idealized.scala 49 | 50 | object true extends Boolean { 51 | def ifThenElse[T](t: => T, e => e) = t 52 | } 53 | 54 | object false extends Boolean { 55 | def ifThenElse[T](t: => T, e => e) = e 56 | } 57 | ``` 58 | 59 | Each of them would be an object; for the true constant, all we need to do is define what the definition of ifThenElse should be for that constant. IE, what is the definition of if (true), thenExpression else elseExpression? well, we've seen from the rewrite rules that this will just become thenExpression 60 | 61 | So our true constant is just the implementation of that rewrite rule; we say ifThenElse of a then expression and an else expression gives us the then part. Conversely, ifThenElse of a then part and an else part gives us the else part. -------------------------------------------------------------------------------- /notes/week 4/003-subtyping-and-generics.md: -------------------------------------------------------------------------------- 1 | ##Polymorphism 2 | 3 | There are two principle forms: 4 | 5 | * subtyping, where we can pass instances of a subtype where a basetype was required 6 | * generics, where we can parameterize types with other types 7 | 8 | We're going to look at the interactions between the two concepts. There are two main areas: 9 | 10 | * bounds, where we can subject type parameters to some type constraints 11 | * variance, which defines how parameterized types behave under subtyping 12 | 13 | ###Type Bounds 14 | 15 | Let's say we wanted to write a method `assertAllPos`, which takes an `IntSet` and returns the `IntSet` itself if all its elements are positive, and throws an exception otherwise. 16 | 17 | What would be the best type we can give to `assertAllPos`? Maybe 18 | 19 | 20 | ```scala 21 | def assertAllPost(s: IntSet): IntSet 22 | ``` 23 | 24 | In most situations this is fine, but can one be more precise? 25 | 26 | If `assertAllPos` gets an empty set, it returns Empty - if it gets a non-empty argument, it will give you back a non-empty result. That knowledge is not reflected in the type above, we just say it takes an IntSet and returns an IntSet. 27 | 28 | ###Type Bounds 29 | 30 | One way to express this might be with type bounds, like so: 31 | 32 | ```scala 33 | def assertAllPos[S <: IntSet](r: S): S = ... 34 | ``` 35 | 36 | Here, we're saying that `assertAllPos` takes *some type* `S`, which has to be a subtype of `IntSet` (either `Empty` or `NonEmpty`), and a set of that type (r), and we return a result of the same type. 37 | 38 | Here, the `<: IntSet` is an upper bound of the type parameter `S`; what it means is that we can instantiate `S` to any type argument as long as the type conforms to the bound 39 | 40 | Generally: 41 | 42 | * `S <: T` means *S is a subtype of T* 43 | * `S >: T` means *S is a supertype of T, or T is a subtype of S* 44 | 45 | ###Lower Bounds 46 | 47 | We can also use a lowery bound for a type variable 48 | 49 | ```[S >: NonEmpty]``` 50 | 51 | introduces a type parameter `S` that can range only over *supertypes* of `NonEmpty`; so `S` could only be one of `NonEmpty, IntSet, AnyRef,` or `Any`. 52 | 53 | It's not immediately apparent where this can be useful - we'll circle back to this. 54 | 55 | ###Mixed Bounds 56 | 57 | Finally, it's possible to mix a lower and upper bound: 58 | 59 | ```[S >: NonEmpty <: IntSet]``` 60 | 61 | That would restrict `S` to any type on the interval between `NonEmpty` and `IntSet` 62 | 63 | ###Covariance 64 | 65 | There's another interaction between subtyping and type parameters to consider - given `NonEmpty <: IntSet`, is `List[NonEmpty] <: List[IntSet]`? 66 | 67 | What if we've wrapped both types in a list? Should a list of `NonEmpty` also be a subtype of list of `IntSet` 68 | 69 | Intuitively, this makes sense - a list of non-empty sets is a special case of a list of arbitrary sets. From a domain modeling perspective, a list of non-empty should indeed be a subtype of a list of `IntSet`s. 70 | 71 | We call types for which this relationship holds *covariant* because their subtyping relationship varies exactly like the type parameter. So, `List` is a covariant type. Does covariance make sense for all types, not just for `List`? 72 | 73 | ###Arrays 74 | 75 | For perspective, let's look at the concept of arrays in Java (also C#) 76 | 77 | Just a reminder, in those languages an array of elements of type `T` is written `T[]`; in scala we use parameterized type syntax to refer to the same type: `Array[T]` 78 | 79 | Arrays in Java are covariant, so one would have: `NonEmpty[] <: IntSet[]` 80 | 81 | But covariant array typing causes problems. Think about this: 82 | 83 | ```java 84 | NonEmpty[] a = new NonEmpty[]{ new NonEmpty(1, Empty, Empty) } 85 | IntSet[] b = a 86 | b[0] = Empty 87 | NonEmpty s = a[0] 88 | ``` 89 | 90 | OH GOD. It looks like we just assigend an `Empty` set to a variable of type `NonEmpty`. If types are supposed to prevent something it's clearly this. 91 | 92 | Our third line would give us a runtime exception, an `ArrayStoreException`; this protects the assignment of `Empty` into this array. To make up for the problems of covariance in arrays, Java needs to store in every array a type tag that reflects what type the array was created with. When we assign something into the array, the runtime type of the item is checked against the type tag. 93 | 94 | This doesn't seem like a great deal - we've traded a compile time error for a runtime error, and incurred the runtime cost of checking types everytime something is put into an array. One could argue that it was a mistake to make arrays covariant; it created a hole in the type system that had to be patched. 95 | 96 | So when does it make sense for a type to be a subtype of another, and when does that not make sense? 97 | 98 | ###Liskov Substitution Principle 99 | 100 | Good ole' Barbara Liskov has got your back on this one. 101 | 102 | >If `A` <: `B`, then everything one can do with a value of type `B` should also be able to do with a value of type `A` 103 | 104 | Essentially, we should be able to substitute and `A` for a `B` and do all the same things. -------------------------------------------------------------------------------- /notes/week 4/004-variance.md: -------------------------------------------------------------------------------- 1 | Eh... I'll circle back to this one. -------------------------------------------------------------------------------- /notes/week 4/005-decomposition.md: -------------------------------------------------------------------------------- 1 | ##Decomposition 2 | 3 | Suppose we want to write a small interpreter for arithmetic expressions - we'll keep it simple and restrict ourselves to numbers and additions. 4 | 5 | Expressions can be represented as a class hierarchy, with a base trait `Expr` and two subclasses, `Number` and `Sum`. If we want to explore a tree of nodes consisting of numbers and sums, we'd like to know what kind of tree is it, and what components it has. To be able to do that, we could use the following implementation 6 | 7 | ```scala 8 | trait Expr { 9 | def isNumber: Boolean 10 | def isSum: Boolean 11 | def numValue: Int 12 | def leftOp: Expression 13 | def rightOp: Expression 14 | } 15 | 16 | class Number(n : Int) extends Expr { 17 | def isNumber: Boolean = true 18 | def isSum: Boolean = false 19 | def numValue: Int = n 20 | def leftOp: Expr = throw new Error("Number.leftOp") 21 | def rightOp: Expr = throw new Error("Number.rightOp") 22 | } 23 | ``` 24 | 25 | Number is a subclass of expression. We have two classification methods, isNumber and isSum, as well as Accessor methods numValue, leftOp, and rightOp. For a complete functionality of exploration, when we look at an expression we'd like to know if it's a number or a sum; thus, `isNumber` and `isSum`. If it's a number, we'd like to know the value of the number, hence, `numValue`. If it's a sum, we'd like to know its operands, so we have `leftOp` and `rightOp`. 26 | 27 | Here's `Sum`: 28 | 29 | ```scala 30 | class Sum(e1: Expr, e2: Expr) extends Expr { 31 | def isNumber: Boolean = false 32 | def isSum: Boolean = true 33 | def numValue: Int = throw new Error("Sum.numValue") 34 | def leftOp: Expr = e1 35 | def rightOp: Expr = e2 36 | } 37 | ``` 38 | 39 | The idea here is that `new Sum(e1, e2) == e1 + e2`. 40 | 41 | ###Evaluation of Expressions 42 | 43 | We can now write an evaluation function as follows; the idea is that the evaluation function should take one of these expression trees and return the number that it represents. 44 | 45 | ```scala 46 | def eval(e: Expr): Int { 47 | if (e.isNumber) e.numValue 48 | else if (e.isSum) eval(e.leftOp) + eval(e.rightOp) 49 | else throw new Error("Unknown expression " + e) 50 | } 51 | ``` 52 | 53 | So, `eval(Sum(Num(1), Num(2)))` should give us 3. How do we write that? Well, one way is, given an expression, we ask what it is. Is it a number? If so, then we can return the numeric value of that expression. Otherwise, if that expression is a sum, we take both its operands and we evaluate both of them using eval. 54 | 55 | So far so good, but there's a problem - writing all these classification and accessor functions quickly becomes tedious - we've already written 15 methods just to model expressions consisting of sums and numbers. 56 | 57 | Let's say we wanted to add to our expession tree; we want two new expressions - `Prod`, which represents the product of two expressions, and `Var` which represents variables. Variables would take a string that represented their name. 58 | 59 | ```scala 60 | class Prod(e1: Expr, e2: Expr) extends Expr //e1 * e2 61 | class Var(x: String) extends Expr 62 | ``` 63 | 64 | If we wanted to continue with our scheme of classification and accessor methods, we'd need to add methods for those two new classes but also to all the classes we've already defined. Ugh. If we continue with this game we find that the number of methods we need to define tends to grow quadratically, which is just dumb. 65 | 66 | ###Non-Solution: Type Tests and Type Casts 67 | 68 | What do we doooo... a 'hacky' solution could use type tests and type casts. Most classes have some form of typechecking or type casting - Scala lets us do this using methods defined in class `Any`: 69 | 70 | ```scala 71 | def isInstanceOf[T]: Boolean // checks whether this object's type conforms to 'T' 72 | def asInstanceOf[T]: T // treats this object as an instance of type 'T'; throws 'ClassCastException' if it isn't 73 | 74 | Scala Java 75 | x.isInstanceOf[T] x instanceof T 76 | x.asInstanceOf[T] (T) x 77 | ``` 78 | 79 | These correspond to Java's type tests and casts. Don't use these in scala - they're for dumbsters. They're a very low level, unsafe operation. There's a better way. 80 | 81 | Here's what eval would look like with type tests and casts: 82 | 83 | ```scala 84 | def eval(e: Expr): Int = 85 | if (e.isInstanceOf[Number]) 86 | e.asInstanceOf[Number].numValue 87 | else if (e.isInstanceOf[Sum]) 88 | eval(e.asInstanceOf[Sum].leftOp) + 89 | eval(e.asInstanceOf[Sum].rightOp) 90 | else throw new Error("Unknown expression " + e) 91 | ``` 92 | 93 | Assessment of this solution: bleh. The good part is that we don't need any classification methods - these instanceOf tests fulfill that role now. And since we'll only call access methods after we've cast to an appropriate type, we only need access methods for classes where the value is defined, meaning our base trait `Expr` could be empty, and `Num` only needs `numVal` and `Sum` only needs `leftOp` and `rightOp`. 94 | 95 | But typetesting and casting is unsafe - we don't necessarily know at runtime if the cast will succeed. We've guarded every cast with a type test, so we can assure statically that all these casts will succeed, but in general that's not assured. 96 | 97 | ###Solution 1: Object Oriented Decomposition 98 | 99 | Suppose all we want to do is *evaluate* expressions; we could just have a direct object oriented solution for that. Instead of making eval a method which exists outside our hierarchy, we just write is as a method of `Expr` itself. 100 | 101 | ```scala 102 | trait Expr { 103 | def eval: Int 104 | } 105 | 106 | class Number(n: Int) extends Expr { 107 | def eval: Int = n 108 | } 109 | 110 | class Sum(e1: Expr, e2: Expr) extends Expr { 111 | def eval: Int = e1.eval + e2.eval 112 | } 113 | ``` 114 | 115 | But what happends if we'd like to display expressions now? We want to have another method `def show: String` in our base trait; then we'd have to have an implementation of `show` in `Number` and in `Sum` and every other class we define. We needed to touch all the classes in our hierarchy to add a new feature, and in a real system this is problematic. 116 | 117 | ###Limitations of OO Decomposition 118 | 119 | There's a more pervasive limitation - what if we want to simplify the expressions, instead of evaluate or showing it, maybe using the usual rule of distribution. We want to replace sums of products with the same left operands with a product of sums: 120 | 121 | > a * b + a * c -> a * (b + c) 122 | 123 | How would we go about doing that? We can't really have a simplify method in either `Product` or `Sum`, because the simplification involves a whole subtree, not a single node. It can't be encapsulated as a method in a single object without looking at other objects. 124 | 125 | We could put `simplify` in the `Sum` operation, but then it'd have to look at its two operands and verify that they're indeed both products, and that the left operand of each product is the same tree. 126 | 127 | Doing that, we're back at square one - classification and access methods. So, OO decomposition is good for some things, like implementing the `eval` function, but it can't do other things, like a non-local simplification. It might not be the best solution if you have many new methods you want to introduce, because you have to touch all subclasses to do it. In the next lecture we'll look at some techniques to address these problems... -------------------------------------------------------------------------------- /notes/week 4/006-pattern-matching.md: -------------------------------------------------------------------------------- 1 | ##Pattern-Matching 2 | 3 | ###Reminder: Decomposition 4 | 5 | Last time we were trying to find a general and convenient way to access objects in a extensible class hierarchy. We were using arithmetic expressions as our motivating example. 6 | 7 | ![Martin's handwriting is illegible](http://i.imgur.com/A7YUfhs.png) 8 | 9 | We tried lotsa stuff - classification and access methods, which led to a quadratic explosion, type tests and casts which were unsafe and low level, and finally object oriented decomposition which doesn't always work and led to us needing to touch all classes to add a new method. 10 | 11 | ###Solution 2: Functional Decomposition with Pattern Matching 12 | 13 | An important observation here is that the sole purpose of the test and accessor functions was to *reverse* the construction process; ie, when we construct a new node with `new Sum(e1, e2)`, we picked a particular class of node, `Sum`, and we picked the two arguments. 14 | 15 | The purpose of decomposition is to recover what kind of constructor we used for the node, where it was a `Sum` or a `Number` say, and what the arguments were. 16 | 17 | That kind of situation is very fundamental, and so common that many functional languages automate it. The technical term for this is *pattern matching*. 18 | 19 | ###Case Classes 20 | 21 | We get pattern matching in scala with case classes - it's similar to a normal class definition: 22 | 23 | ```scala 24 | trait Expr 25 | case class Number(n: Int) extends Expr 26 | case class Sum(e1: Expr, e2: Expr) extends Expr 27 | ``` 28 | 29 | Like before, here we've got a trait `Expr`, and two concrete subclasses `Number` and `Sum`. 30 | 31 | When we define a case class, the scala compiler will implicitly define a companion object with a factory method `apply` which will construct that class directly: 32 | 33 | ```scala 34 | object Number { 35 | def apply(n: Int) = new Number(n) 36 | } 37 | 38 | object Sum { 39 | def apply(e1: Expr, e2: Expr) = new Sum(e1, e2) 40 | } 41 | ``` 42 | 43 | Now we can write `Number(1)` instead of `new Number(1)`. But the classes that we defined above are all empty - how can we access the components? 44 | 45 | ###Pattern Matching 46 | 47 | One way to see pattern matching is as a generalization of `switch` from C/Java to class hierarchies. We express it in scala using the keyword `match`: 48 | 49 | ```scala 50 | def eval(e: Expr): Int = e match { 51 | case Number(n) => n 52 | case Sum(e1, e2) => eval(e1) + eval(e2) 53 | } 54 | ``` 55 | 56 | Basically, we're saying match the given expression with a number of patterns. The first pattern says if it's a number of sum given value n, return that value. The second pattern says if it's a sum with some operand e1 and another e2, then evaluate the two operands and form the sum. 57 | 58 | ###Match Syntax 59 | 60 | * `match` is followed by a sequence of *cases*, `pat => expr` 61 | * each case associates an *expressions* `expr` with a *pattern* `pat` 62 | * a `MatchError` exception is thrown if no pattern matches the value of the selector. 63 | 64 | ###Forms of Patterns 65 | 66 | What are patterns built from? 67 | 68 | * constructors, eg `Number`, `Sum` - we've seen `Sum(n)`, where we refer to `n` in the expression. If we didn't care about the argument of `Sum` we could use the wildcard like so `Sum(_)`, meaning we don't care about that variable 69 | * variables, eg `n`, `e1`, `e2` 70 | * wildcard patterns _ 71 | * constants, eg `1`, `true` 72 | 73 | We can take these building blocks and compose more complicated patterns from them: 74 | 75 | ```scala 76 | match { 77 | case Sum(Number(1), Var(x)) => x 78 | } 79 | ``` 80 | 81 | would match objects which are sums, with left operands that are Numbers with a value of 1, and with a right node of type Var. The name field of the var can be anything, but then on the right hand side of the pattern we can refer to the variable. 82 | 83 | The same variable name can appear only once in a pattern - so `Sum(x, x)` is not a legal pattern. 84 | 85 | So here's some of the fine print: how do we distinguish a variable, such as `n`, which can match anything, from a constant, like `val N = 2`, which matches just the number 2 and nothing else? Syntactically, we need to find a way to distinguish one from the other. 86 | 87 | The convention scala uses is that variables *always* must begin with a lower case letter, whereas constants should begin with a capital letter (excepting reserved words like `null`, `true`, `false`). 88 | 89 | ###Evaluating Match Expressions 90 | 91 | An expression of the form 92 | 93 | > e match { case p1 => e1 ... case pn => en } 94 | 95 | matches the value of the slector `e` with the patterns `p1 ... pn` in the order in which they're written. The whole match ecpression is rewritten to the right hand side of the first case where the pattern matches the selector `e`. When we do that, references to pattern variables are replaced by the corresponding parts in the selector. 96 | 97 | ###What do Patterns Match? 98 | 99 | What does it mean that a particular pattern matches an expression? Let's look at the possible forms of patterns to understand that. 100 | 101 | If we have a constructor pattern, like `C(p1, ...., pn)`, it matches all the values of type `C` and its subtypes that have been constructed with arguments matching the patterns `p1, ..., pn` 102 | 103 | A variable pattern `x` matches any value, and *binds* the name of the variable to this value, meaning in the associated expression we can then use `x` for the value that it matched. 104 | 105 | A constant pattern `c` matches values that are equal to `c` (in the sense of ==) 106 | 107 | Let's look at an application of our hot new pattern-matching evaluation function with a tree of `Sum(Number(1), Number(2))`: 108 | 109 | ```scala 110 | eval(Sum(Number(1), Number(2))) 111 | ``` 112 | 113 | The first thing we would do as usual is rewrite that application with the body of eval, where the actual argument replaces the formal parameter, like this: 114 | 115 | ```scala 116 | Sum(Number(1), Number(2)) match { 117 | case Number(n) => n 118 | case Sum(e1, e2) => eval(e1) + eval(e2) 119 | } 120 | ``` 121 | 122 | Next, we have to evaluate the match expression. We have to match the selector expression against all the patterns. The first doesn't match because the constructor is different, but the second does. That means the two variables e1 and e2 will be bound to `Number(1)` and `Number(2)`. 123 | 124 | After that, the whole expression will rewrite to the right hand side expression of the matched pattern. 125 | 126 | ```scala 127 | eval(Number(1) + eval(Number(2))) 128 | ``` 129 | 130 | which gives us 131 | 132 | ```scala 133 | Number(1) match { 134 | case Number(n) => n 135 | case Sum(e1, e2) => eval(e1) + eval(e2) 136 | } 137 | ``` 138 | 139 | etc etc. 140 | 141 | ###Pattern Matching and Methods 142 | 143 | It's totally possible to have pattern matching methods inside the class hierarchy too - we could just as easily have put eval inside of `Expr` like so: 144 | 145 | ```scala 146 | trait Expr { 147 | def eval: Int = this match { 148 | case Number(n) => n 149 | case Sum(e1, e2) => eval(e1) + eval(e2) 150 | } 151 | } 152 | ``` 153 | 154 | Once we've done that, we might ask what are the trade offs to doing it this way, versus the object oriented decomposition we saw earlier, where we had different implementations of eval in each class in our hiearchy? Some of the choice between these is a matter of style, but there are some differences that are important. 155 | 156 | For example, are we more often creating new subclasses of `Expr`, or are we creating more methods? We're thinking here about the future extensibility and extention paths of our system. 157 | 158 | If we're creating a lot of new subclasses, then the object oriented version has the upper hand. It's a very easy, local change to create a new subclass with a new `eval` method, where as in the functional solution we'd have to go back and change the code inside the `eval` method and add a new case to it. 159 | 160 | On the otherhand, if what we're doing is creating lots of new methods, but the class hiearchy is kept relatively stable, then the pattern matching approach is advantageous. Each new method in the pattern matching approach is just a local change, whereas a new method, such as `show`, in the object oriented approach would require touching every subclass. 161 | 162 | The problem of this two dimensional extensibility, where we want to add new classes to a hiearchy or new methods, or both, is called the *expression problem* -------------------------------------------------------------------------------- /notes/week 4/007-lists.md: -------------------------------------------------------------------------------- 1 | ##Lists 2 | 3 | A list having `x1, ..., xn` as elements is written `List(x1, ..., xn)` 4 | 5 | ```scala 6 | val fruit = List('apples', 'banannas', 'pears') 7 | val nums = List(1, 2, 3, 4) 8 | val diag3 = (List(List(1, 0, 0))) 9 | ``` 10 | 11 | Lists are sequences, just like arrays - but there are two important differences. Lists are immutable, so you can't change an element of a list. Second, lists are recursive, while arrays are flat. They're very much like the Cons list we constructed from scratch before. 12 | 13 | A list like `val fruit = List("apples", "oranges", "pears")` would look like this: 14 | 15 | ![img](http://i.imgur.com/0fII5qp.png) 16 | 17 | Something more complicated like val List(List(1, 0, 0), List(0, 1, 0), List(0, 0, 1)) would look like 18 | 19 | ![img](http://i.imgur.com/Bm58iGI.png) 20 | 21 | ###The List Type 22 | 23 | Like arrays, lists are homogeneous; the elements of a list must all have the same type. The tye of a list with elements of type `T` is written `scala.List[T]` or just `List[T]` 24 | 25 | ###Constructors of Lists 26 | 27 | We've seen lists constructed homogeneously; we've written `List('some', 'elements')`; that's actually syntactic sugar for something more fundamental. All lists in scala are constructed from the empty list `Nil`, and the construction operation :: (pronounced *cons*): `x :: xs` gives a new list with the first element `x`, followed by the elements of `xs` 28 | 29 | ```scala 30 | fruit = "apples" :: ('oranges' :: ('pears' :: Nil)) 31 | ``` 32 | 33 | ###Right Associativity 34 | 35 | Convention: Operators ending in ":" associate to the right. 36 | 37 | That means that if we have two double colons, like `A :: B :: C`, it's interperted as `A :: (B :: C)` 38 | 39 | So we can omit the parentheses. We can create new lists like this: `val nums = 1 :: 2 :: 3 :: 4 :: Nil` 40 | 41 | ###Operations on Lists 42 | 43 | All operations on lists can be expressed in terms of the following three operations: 44 | 45 | `head` which is the first element in the list 46 | `tail` the list composed of all elements except the first 47 | `isEmpty` '`true`' if the list is empty, `'false'` otherwise 48 | 49 | These operations are defined as methods on objects of type list. For example: 50 | 51 | ```scala 52 | fruit.head == 'apples' 53 | fruit.tail.head == 'oranges' 54 | ``` 55 | 56 | ###List Patterns 57 | 58 | It's also possible and useful to decompose lists with pattern matching. 59 | 60 | `Nil` the `Nil` constant 61 | `p :: ps` A pattern that matches a list with a `head` matching `p` and a `tail` matching `ps` 62 | `List(p1, ..., pn)` same as `p1 :: ... :: pn :: Nil` 63 | 64 | 65 | `1 :: 2 :: xs` Lists that start with `1` and then `2`; the rest of the list is arbitrary and bound to the variable `xs` 66 | 67 | ###Sorting Lists 68 | 69 | Suppose we want to sort a list of numbers in ascending order 70 | 71 | We could sort the list `List(7, 3, 9 , 2)` by sorting the tail `List(3, 9, 2)`, to obtain `List(2, 3, 9)`. The next step is to insert the head 7 in the right place to optain the result `List(2, 3, 7, 9)` 72 | 73 | This idea is *insertion sort*: 74 | 75 | ```scala 76 | def isort(xs: List[Int]): List[Int] = xs match { 77 | case List() => List() 78 | case y :: ys => insert(y, isort(ys)) 79 | } 80 | ``` 81 | 82 | This is the most standard way to decompose a list; first we ask if a list is empty, and if not, we ask, well what is its head and tail. 83 | 84 | We still have to define the function `insert`. 85 | 86 | ```scala 87 | def insert(x: Int, xs: List[Int]): List[Int] = xs match { 88 | case List() => List(x) 89 | case y :: ys => if (x <= y) x :: xs else y :: insert(x, ys) 90 | } 91 | ``` -------------------------------------------------------------------------------- /notes/week 5/001-more-functions-on-lists.md: -------------------------------------------------------------------------------- 1 | ##More Functions on Lists 2 | 3 | ###Sublists and element access: 4 | 5 | * `xs.length` : The number of elements of `xs`. 6 | * `xs.last` : The lists's last element, exception if `xs` is empty 7 | * `xs.init` : A list consisting of all elements of `xs` except the last one, exception if `xs` is empty 8 | * `xs take n` : A list consisting of the first `n` elements of `xs`, or `xs` itself if it is shorter than `n` 9 | * `xs drop n` : The rest of the collection after taking `n` elements 10 | * `xs(n)` : The element of `xs` at index `n` 11 | 12 | ###Creating new lists: 13 | 14 | * `xs ++ ys` : the list cinsisting of all elements of `xs` followed by all elements of `ys` 15 | * `xs.reverse` : the list containing the elements of `xs` in reversed order 16 | * `xs.updated(n,x)` : In a sense, the functional equivalent of mutable elements in an array. We can't exactly do that, since lists are immutable - what we can do is return a new list, that contains all the elements of `xs`, except at the given index, where it contains `x`. 17 | 18 | ###Finding elements 19 | 20 | * `xs indexOf x` : the index of the first element in `xs` equal to `x`, or -1 if `x` does not appear in `xs` 21 | * `xs contains x` : the same as `xs indexOf x >= 0` 22 | 23 | ###Implementation Issues 24 | 25 | We know that the complexity of `head` is a simple field selection; it's a very small, constant time. Can `last` be implemented as efficiently? `tail` again is just a simple selection of a field of a list, ie constant time; can `init` be implemented as efficiently? 26 | 27 | ###Implementation of last 28 | 29 | ```scala 30 | def last[T](xs: List[T]): T = xs match { 31 | case List() => throw new Error("last of empty list") 32 | case List(x) => x 33 | case y :: ys => last(ys) 34 | } 35 | ``` 36 | 37 | So `last` takes steps proportional to the length of the list `xs` - we need to take one recursion for each element in the list. 38 | 39 | ###Implementation of init 40 | 41 | ```scala 42 | def init[T](xs: List[T]): List[T] = xs match { 43 | case List() => throw new Error("init of empty list") 44 | case List(x) => List() 45 | case y :: ys => y :: init(ys) 46 | } 47 | ``` 48 | 49 | ###Implementation of Concatenation 50 | 51 | How can concatenation be implemented? Recall that `xs ::: ys` is really the same as the call of the method ` ::: ` with receiver `ys`, and `xs` as the argument - ie, `ys.:::(xs)`. It's the prepend of `xs` on top of `ys`. Very much like the prepend function we wrote last week, except of an entire list instead of a single element. 52 | 53 | So far, everything we've done has been with a pattern-match on the list in question - now there are *two* lists. oh man... which list should we pattern match on? 54 | 55 | When we've done this pattern matching we've typically constructed lists from left to right - we were asking the question "what's the first element of the result list, and what's the remainder?". 56 | 57 | In this case, the first element of the result list here clearly depends on `xs` - so it makes sense to match on that. 58 | 59 | ```scala 60 | def concat[T](xs: List[T], ys: List[T]) = xs match { 61 | case List() => ys 62 | cast z :: zs => z :: concat(zs, ys) 63 | } 64 | ``` 65 | 66 | What is the complexity of `concat`? Well, it's clear that we'll need a call of `concat` for each element of the left list, so complexity will correspond to the length of the list `xs`. 67 | 68 | ###Implementation of reverse 69 | 70 | ```scala 71 | def reverse[T](xs: List[T]): List[T] = xs match { 72 | case List() => xs 73 | case y :: ys => reverse(ys) ++ List(y) 74 | } 75 | ``` 76 | 77 | What's the complexity of `reverse`? Well, we know that concatenation is linear, proportional to the size of the list on the left hand size of the operator. That list in this case is a list that grows from 1 to the length of `xs`. 78 | 79 | Furthermore, we do one step for each element in the reversed list, because we go through each element of `ys` and put it at the end of the reversed list. 80 | 81 | So, taken together, that gives us a quadratic complexity of N * N, which is a bit disappointing. We all know that with an array or a mutable linked list of pointers that we could reverse in linear time - we'll see later on how we might do better. -------------------------------------------------------------------------------- /notes/week 5/002-pairs-and-tuples.md: -------------------------------------------------------------------------------- 1 | ##Pairs and Tuples 2 | 3 | Let's make a function to sort our lists that's more efficient than insertion sort, which is for dummies. 4 | 5 | A good algorithm for this is *merge sort*: 6 | 7 | * if the list consists of zero or one elements, it is already sorted. 8 | * otherwise, separate the list into two sub-lists, each containing around half the elements of the original list 9 | * sort the two sub-lists 10 | * merge the two sorted sub-lists into a single sorted list 11 | 12 | ```scala 13 | def msort(xs List[Int]): List[Int] = { 14 | val n = xs.length / 2 15 | if (n == 0) xs 16 | else { 17 | def merge(xs: List[Int], ys: List[Int]) = ??? 18 | val (fst, snd) = xs splitAt n 19 | merge(msort(fst), msort(snd)) 20 | } 21 | } 22 | ``` 23 | 24 | let's look at an implementation of `merge`: 25 | 26 | ```scala 27 | def merge(xs: List[Int], ys: List[Int]) = 28 | xs match { 29 | case Nil => 30 | ys 31 | case x :: xs1 => 32 | ys match { 33 | case Nil => 34 | xs 35 | case y :: ys 1 => 36 | if (x < y) x :: merge(xs1, ys) 37 | else y :: merge(xs, ys1) 38 | } 39 | } 40 | ``` 41 | 42 | wat. 43 | 44 | We'll improve this later, but here's how it works - we'll pattern match first on the left list. If the left list is `Nil`, then the merge must consist of all elements of the right list, so we return `ys`. 45 | 46 | If the left list is not `Nil`, and it consists of a head element `x` followed by a tail `xs1`, then we do a pattern match on the right hand list `ys`. If that is `Nil`, then we can simply return `xs` - otherwise, then we have two head elements, `x` and `y`, and two tail lists, `xs1` and `ys1`. 47 | 48 | We compare the head elements with each other - if `x` is smaller than `y`, then obviously `x` must be the first element of our sorted list. So we take `x` followed by a merge of all the remaining elements from the `xs` list (that's `xs1`) and all the elements of the `ys` list. If `y` is greater, then we can take `y` as the first element of the sorted list, followed by a merge of all the `xs` elements and the elements of `ys` that follow `y` (`ys1`) 49 | 50 | ###splitAt 51 | 52 | The `splitAt` function on lists returns two sublists - the elements up to the given index, and the elements from that index. The lists are returned in a *pair*. 53 | 54 | ###Detour: Pair and Tuples 55 | 56 | Let's take a second to look at pairs and their generalization, tuples. A pair in scala is written `(x,y)`, where `x` and `y` are the elements of the pair. 57 | 58 | ```scala 59 | val pair = ("answer", 42) 60 | ``` 61 | 62 | The type of the above pair is `(String, Int)` 63 | 64 | We can also decompose pairs using pattern matching, like this: 65 | 66 | ```scala 67 | val (label, value) = pair // label: String = answer; value: Int = 42 68 | ``` 69 | This works the same with tuples of more than two elements; you can have triples, quadruples, etc. 70 | 71 | ###Translation of Tuples 72 | 73 | So far, all the types we've encountered are actually abbreviations for some instance of a class type. Tuples are no exception. A tuple type `(T1, ..., Tn)` is an abbreviation of the parameterized type `scala.Tuple`*n*`[T1, ..., Tn]` 74 | 75 | A tuple expression of `(e1, ..., en)` is equivalent to the function application `scala.Tuple`*n*`(e1, ..., en)` 76 | 77 | Finally, a tuple pattern of `(p1, ..., pn)` is equivalent to the constructor pattern `scala.Tuple`*n*`(p1, ..., pn)` 78 | 79 | ###The Tuple Class 80 | 81 | Here, all the `Tuple`*n* classes are modeled after the following pattern: 82 | 83 | ```scala 84 | case class Tuple2[T1, T2](_1: +T1, _2: +T2) { 85 | override def toString = "(" + _1 _ "," + _2 + ")" 86 | } 87 | ``` 88 | Meaning we can of course get to the fields of a typle with _1 and _2, etc. The pattern matching form is cooler. 89 | 90 | ###Excercise 91 | 92 | The `merge` function as given uses a nested pattern match - that's not so great. It doesn't reflect the symmetry of the merge algorithm - it makes us do a merge on the left and side and then a nested match on the right hand side. For merge it doesn't really matter what side is left or right. 93 | 94 | So let's rewrite `merge` using a pattern match over pairs! 95 | 96 | ```scala 97 | def msort(xs List[Int]): List[Int] = { 98 | val n = xs.length / 2 99 | if (n == 0) xs 100 | else { 101 | def merge(xs: List[Int], ys: List[Int]): List[Int] = (xs, ys) match { 102 | case (Nil, ys) => ys 103 | case (xs, Nil) => xs 104 | case (x :: xs1, y :: ys1) => 105 | if (x < y) x :: merge(xs1, ys) 106 | else y :: merge(xs, ys1) 107 | } 108 | val (fst, snd) = xs splitAt n 109 | merge(msort(fst), msort(snd)) 110 | } 111 | } 112 | ``` 113 | 114 | _NEATO._ -------------------------------------------------------------------------------- /notes/week 5/003-implicit-parameters.md: -------------------------------------------------------------------------------- 1 | ##Implicit Parameters 2 | 3 | Problem: our mergesort from last session can also be used for `List[Int]` - it makes sense to apply the same function to lists of other element types, `Double`, `Boolean`, etc. 4 | 5 | The most straight forward way to do that would be to parameterize `msort`, so instead of `List[Int]`, it'll take a `List[T]`: 6 | 7 | ```scala 8 | def msort[T](xs: List[T]): List[T] = ... 9 | ``` 10 | 11 | But aw crap, that doesn't work! The comparison < in `merge` is not defined for arbitrary types `T`. Now that our list elements can have an arbitrary type, we can no longer be sure that there is in fact a less than function defined on those elements. 12 | 13 | So, what can we do? Well, what if we parameterized `merge` with the necessary comparison function? 14 | 15 | ```scala 16 | def msort[T](xs: List[T])(lt: (T, T) => Boolean)= { 17 | ... 18 | merge(msort(fst)(lt), msort(snd)(lt)) 19 | } 20 | ``` 21 | 22 | and merge would look like: 23 | 24 | ```scala 25 | def merge(cs: List[T], ys: List[T]) = (xs, ys) match { 26 | ... 27 | case (x :: xs1, y :: ys1) => 28 | if (lt(x, y)) .. 29 | else ... 30 | } 31 | ``` 32 | 33 | Then we would call it like: 34 | 35 | ```scala 36 | val nums = List(2, -4, 5, 7, 1) 37 | msort(nums)((x, y) => x < y) 38 | 39 | val fruits = List("apple", "pineapple", "orange", "banana") 40 | msort(fruits)((x, y) => x.compareTo(y) < 0) 41 | ``` 42 | 43 | ###Parametrization with Ordered 44 | 45 | There's already a class in the standard library that represents ordering... `scala.math.Ordering[T]` provides ways to compare elements of type `T`. So instead of parameterizing with the `lt` operation directly, we could parameterize with `Ordering` instead: 46 | 47 | ```scala 48 | def msort[T](xs: List[T])(ord: Ordering) = 49 | def merge(xs: List[T], ys: List[T]) = 50 | ... if (ord.lt(x,y)) ... 51 | ... merge(msort(fst)(ord), msort(snd)(ord)) 52 | ``` 53 | 54 | now we can use the predefined orderings in our calls: 55 | 56 | ```scala 57 | msort(nums)(Ordering.Int) 58 | ``` 59 | 60 | There's just one problem - this sucks. Passing around these `lt` or `ord` values is rather cumbersome - it would be nice if we could just synthesize the right comparison operation directly just given the type `T`. We can make it at least appear that way by not passing `Ord` explicitly and making it an *implicit* parameter.... 61 | 62 | 63 | ```scala 64 | def msort[T](xs: List[T])(implicit ord: Ordering) = 65 | def merge(xs: List[T], ys: List[T]) = 66 | ... if (ord.lt(x,y)) ... 67 | ... merge(msort(fst), msort(snd)) 68 | 69 | msort(nums) 70 | msort(fruits) 71 | ``` 72 | 73 | HOW DID THAT HAPPEN 74 | 75 | When we write an implicit parameter, and we don't write an actual argument that matches that parameter, the compiler will figure out the right implicit to pass based on the demanded type. 76 | 77 | The compiler will search for an implicit definition that 78 | 79 | * is marked `implicit` 80 | * has a type compaitble with `T` 81 | * is visible at the point of the function call, or is defined in a companion object associated with `T` 82 | 83 | If there is a single (most specific) definition, it will be taken as the actual argument for the implicit parameter. Otherwise, it's an error. -------------------------------------------------------------------------------- /notes/week 5/004-higher-order-list-functions.md: -------------------------------------------------------------------------------- 1 | ##Higher-Order List Functions 2 | 3 | The examples have shown that functions on lists often have similar functions - often, we're doing one of several recurring patterns, like 4 | 5 | * transforming each element in a list in a certain way 6 | * retrieving a list of all elements satisfying a criterion 7 | * combining the elements of a list using an operator 8 | 9 | Functional languages allow programmers to write generic functions that implement patterns such as these using *higher order functions* 10 | 11 | ###Applying a Function to Elements of a List 12 | 13 | A common operation is to transform each element of a list and then return the list of results. 14 | 15 | eg, to multiply each element of a list by the same factor, we could write: 16 | 17 | ```scala 18 | def scaleList(xs: List[Double], factor: Double): List[Double] = xs match { 19 | case Nil => xs 20 | case y :: ys => y * factor :: scaleList(ys, factor) 21 | } 22 | ``` 23 | 24 | ###Map 25 | 26 | That scheme can be generalized to the method `map` of the `List` class. 27 | 28 | A simple way to define `map`: 29 | 30 | ```scala 31 | abstract class List[T] { ... 32 | def map[U](f: T => Y): List[U] = this match { 33 | case Nil => this 34 | case x :: xs => f(x) :: xs.map(f) 35 | } 36 | } 37 | ``` 38 | 39 | (in fact, the actual definition of `map` is a bit more complicated because it is tail-recursive, and also because it works for arbitrary collections, not just lists) 40 | 41 | Here's a much more concise `scaleList` using `map` (so concise in fact it hardly warrants writing a separate function for it): 42 | 43 | ```scala 44 | def scaleList(xs: List[Double], factor: Double) = xs map (x => x * factor) 45 | ``` 46 | 47 | ###Filtering 48 | 49 | Another common operation is the selection of all elements satisfying a given condition. Here's `filter`: 50 | 51 | ```scala 52 | abstract class List[T] { 53 | ... 54 | def filter(p: T => Boolean): List[T] = this match { 55 | case Nil => this 56 | case x :: xs => if (p(x)) x :: xs.filter(p) else xs.filter(p) 57 | } 58 | } 59 | ``` 60 | 61 | ###Variations 62 | 63 | There are a LOT of methods that extract sublists from a list based on some predicate. More than I feel like typing out. 64 | 65 | ![img](http://i.imgur.com/B6d4CcM.png) -------------------------------------------------------------------------------- /notes/week 5/005-reduction-of-lists.md: -------------------------------------------------------------------------------- 1 | ##Reduction of Lists 2 | Another common operation is to combine the elements of a list using a given operator - ie `sum(List(x1, ..., xn))`, or `product(List(x1, ..., xn))` 3 | 4 | We can implement this with the usual recursive schema: 5 | 6 | ```scala 7 | def sum(xs: List[Int]): Int = xs match { 8 | case Nil => 0 9 | case y :: ys => y + sum(ys) 10 | } 11 | ``` 12 | 13 | ###ReduceLeft 14 | 15 | This pattern can be abstracted out using the generic method `reduceLeft`; this inserts a given binary operator between each successive element of a list. 16 | 17 | `List(x1, ..., xn) reduceLeft op = (...(x1 op x2) op ...) op xn` 18 | 19 | Here's a pretty drawing martin did for us: 20 | 21 | 22 | Here's a change from jekyl, dawg. 23 | 24 | 25 | ![img](http://i.imgur.com/sh4mMtU.png) 26 | 27 | Using `reduceLeft`, we can simplify: 28 | 29 | ```scala 30 | def sum(xs: List[Int]) = (0 :: xs) reduceLeft ((x,y) => x + y) 31 | def product(xs: List[Int]) = (1 :: xs) reduceLeft ((x,y) => x * y) 32 | ``` 33 | 34 | ###A shorter way to write functions 35 | 36 | By the way - instead of writing `((x,y) => x * y)`, we can also just write `(_ * _)` 37 | 38 | Every _ represents a new parameter, going from left to right. The parameters are implicitly defined at the next outer pair of parentheses (or the whole expression if there are no enclosing parentheses) 39 | 40 | So, `sum` and `product` can just be written like this: 41 | 42 | ```scala 43 | def sum(xs: List[Int]) = (0 :: xs) reduceLeft (_ + _) 44 | def product(xs: List[Int]) = (1 :: xs) reduceLeft (_ * _) 45 | ``` 46 | 47 | ###FoldLeft 48 | The function `reduceLeft` is defined in terms of a more general function, `foldLeft`. It's like `reduceLeft`, but it takes an *accumulator*, or zero-element, `z`, which is returned when `foldLeft` is called on an empty list. 49 | 50 | `(List(x1, ..., xn) foldLeft z)(op) = (...(z op x1) op ...) op xn` 51 | 52 | Here's `sum` and `product` again! 53 | 54 | ```scala 55 | def sum(xs: List[Int]) = (xs foldLeft 0) (_ + _) 56 | def product(xs: List[Int]) = (xs foldLeft 1) (_ * _) 57 | ``` 58 | 59 | ###Implementations of ReduceLeft and FoldLeft 60 | 61 | ```scala 62 | abstract class List[T] { ... 63 | def reduceLeft(op: (T, T) => T): T = this match { 64 | case Nil => throw new Error("Nil.reduceLeft") 65 | case x :: xs => (xs foldLeft x)(op) 66 | } 67 | def foldLeft[U](z: U)(op: (U,T) => U): U = this match { 68 | case Nil => z 69 | case x :: xs => (xs foldLeft op(z, x))(op) 70 | } 71 | } 72 | ``` 73 | 74 | ###FoldRight and ReduceRight 75 | 76 | Applications of `foldLeft` and `reduceLeft` unfold on trees that lean to the left, as evidenced by another lovely drawring: 77 | 78 | ![img](http://i.imgur.com/ANbCvsv.png) 79 | 80 | It would make sense to have a dual pair of operations that unfold trees which lean to the right, ie 81 | 82 | ![img](http://i.imgur.com/6ZQ8UNo.png) 83 | ![img](http://i.imgur.com/jpOn1Jz.png) 84 | 85 | Here's the implementation: 86 | ```scala 87 | abstract class List[T] { ... 88 | def reduceRight(op: (T, T) => T): T = this match { 89 | case Nil => throw new Error("Nil.reduceRight") 90 | case x :: Nil => x 91 | case x :: xs => op(x, xs.reduceRight(op)) 92 | } 93 | def foldLeft[U](z: U)(op: (U,T) => U): U = this match { 94 | case Nil => z 95 | case x :: xs => op(x, (xs foldRight z)(op)) 96 | } 97 | } 98 | ``` 99 | 100 | ###Difference between FoldLeft and FoldRight 101 | 102 | For operators that are associative and commutative, `foldLeft` and `foldRight` are equivalent - but sometimes only one of the two operators is appropriate. 103 | 104 | As an example, here's another formulation of `concat`: 105 | 106 | ```scala 107 | def concat[T](xs: List[T], ys: List[T]): List[T] = (xs foldRight ys)(_ :: _) 108 | ``` 109 | 110 | Here, it isn't possible to replace `foldRight` by `foldLeft` - why? Well, the types don't match up. We end up trying to do a `foldLeft` over a list `xs`, meaning we apply an operation over each element of that list - the operation cons is not applicable to arbitrary elements, only lists. -------------------------------------------------------------------------------- /notes/week 5/006-reasoning-about-concat.md: -------------------------------------------------------------------------------- 1 | Gonna circle back to this one... -------------------------------------------------------------------------------- /notes/week 5/007-a-larger-equational-proof-on-lists.md: -------------------------------------------------------------------------------- 1 | Circlin' back.... -------------------------------------------------------------------------------- /notes/week 6/001-other-collections.md: -------------------------------------------------------------------------------- 1 | ##Other Sequences 2 | 3 | We have seen that lists are *linear* - access to the first element is much faster than access to the middel or end of a list. 4 | 5 | Scala also has an alternative sequence implementation, `Vector`, which has more evenly balanced access patterns than `List`. Vectors are essentially very very shallow trees. 6 | 7 | A vector of up to 32 elements is just an array, where the elements are stored in sequence. If a vector becomes larger than 32 elements, its representation changes - we would then have a vector of 32 pointers to arrays of 32 elements. Once that is exhausted (ie, we have 32 * 32 elements), the representation changes again, and becomes a vector of pointers to pointers to arrays of 32 elements, everything becomes one level deeper. You get the idea. 8 | 9 | ![img](http://i.imgur.com/cwjsc4w.png) 10 | 11 | How much time would it take to retreive an element at some index in a vector? For lists, it very much depends on what the index is; fast for 0, linearly slow for indices towards the end of the list. 12 | 13 | Vectors are much better behaved; to get an index of a vector of length 32 is a single index access. If the vector has size up to about 1000, it's about 2 accesses. Generally the number of accesses are equal to the depth of the vector. That depth grows very slowly - a depth of 6 gives you a billion elements. 14 | 15 | Another advantage of vectors is that they're fairly good on bulk operations that traverse a sequence; like a `map` that applies a function to every element of a sequence or a `fold` that reduces adjacent elements with an operator. For a vector we can do those types of things in chuncks of 32, which happens to coincide quite closely to the cache line in modern processors. Meaning that all the 32 adjacent elements will be in a single cache line and accesses will be fairly fast. 16 | 17 | For lists on the other hand, you have that recursive structure where each element is in a cons cell with a pointer to the next, and you have no guarantee that those cons cells are anywhere near each other - they might be in different cache lines, different pages, so the locality for list accesses could be much worse than the locality for vector accesses. 18 | 19 | If vectors are so much better, why keep lists at all? Well, it turns out that if your operations fit into the model that you're usually just after taking the head of a sequence (which for lists is a constant time operation and for vectors might mean going down several layers) and then taking the tail to process the rest (which again is constant time for lists and more complicated for vectors), then lists are much better. Basically, if your access patterns have this recursive structure, lists are what you want. 20 | 21 | If however your access patterns are typically bulk operations, `map`, `fold`, `filter`, then a vector would be preferable. 22 | 23 | ###Operations on Vectors 24 | 25 | Vectors are created analogously to lists 26 | 27 | ```scala 28 | val nums = Vector(1, 2, 3, -88) 29 | val people = Vector("Bob", "James", "Peter") 30 | ``` 31 | 32 | They support the same operations as lists, with the exception of ::. Because :: in a list is the primitive thing that builds the list and lets us pattern match against it. 33 | 34 | Instead of :: Vectors have 35 | 36 | * `x +: xs` : create a new vector with leading element `x` followed by all elements of `xs` 37 | * `xs :+ x` : create a new vector with trailing element `x`, preceded by all elements of `xs` 38 | 39 | Note that `:` always points to where the sequence is. 40 | 41 | What would it take to append an element to a Vector - recall that all scala's collections are immutable, so we have to create a new Vector, we can't touch the existing one. Here's a picture that illustrates it perfectly: 42 | 43 | ![img](http://i.imgur.com/6YOtuzz.png) 44 | 45 | Nailed it. 46 | 47 | Basically, we'll take the last array of our vector, and create a new one that contains the element we're appending. That gives us a new array of 32 elements, which we then have to combine somehow with the original vector. We can't change the pointer from the original to the new array, because that of course would change the old vector. 48 | 49 | So, we create another copy of the root array, that points to our new element, and also points to the other elements our previous copy pointed to. Finally, we have to create another root, which points again to our new copy and the old immediate descendants. And we're done! The new vector is in red, whereas the blue one wasn't touched at all. 50 | 51 | Analyzing the complexity of this, we see we have to create a new 32 element array for every level we did the change - in our case, three of these arrays would've been created. 52 | 53 | ###Collection Hierarchy 54 | 55 | A common base class of `List` and `Vector` is `Seq`, the class of all *sequences*. `Seq` itself is a subclass of `Iterable`. 56 | 57 | ###Arrays and Strings 58 | 59 | Arrays and Strings both support the same operations as `Seq` and can be implicitly converted to sequences where needed. They're not really subclasses of `Seq` since they come from the Java universe. 60 | 61 | ```scala 62 | val xs = Array(1, 2, 3, 4) 63 | xs map (x => x * 2) 64 | ``` 65 | 66 | ###Ranges 67 | 68 | a range simply represents a sequence of evenly spaced integers. There are three common operators: 69 | 70 | * `to` (inclusive) 71 | * `until` (exclusive) 72 | * `by` (to determine step value) 73 | 74 | ```scala 75 | val r: Range 1 until 5 // 1, 2, 3, 4 76 | val s: Range 1 to 5 // 1, 2, 3, 4, 5 77 | 78 | 1 to 10 by 3 // 1, 4, 7, 10 79 | 6 to 1 by -2 // 6, 4, 2 80 | ``` 81 | 82 | Ranges are represented as single objects with three fields: the lower bounds, the upper bounds, and the step value. 83 | 84 | ###More Sequence Operations 85 | 86 | SO MANY! 87 | 88 | ![img](http://i.imgur.com/9EdwTWq.png) 89 | 90 | ###Combinations 91 | 92 | To list all combinations of numbers `x` and `y` where `x` is drawn from `1..M` and `y` is drawn from `1..N` 93 | 94 | ```scala 95 | (1 to M) flatMap (x => (1..N) map (y => (x, y))) 96 | ``` 97 | 98 | ###Scalar Product 99 | 100 | The scalar product of two vectors is the sum of the product of corresponding elements Xi and Yi of the two vectors. We can take the mathematical definition and map it directly to code 101 | 102 | ```scala 103 | def scalarProduct(xs: Vector[Double], ys: Vector[Double]): Double = 104 | (xs zip ys).map(xy => xy._1 * xy._2).sum 105 | ``` 106 | 107 | An alternative way to write this is with a *pattern matching function value* 108 | 109 | ```scala 110 | def scalarProduct(xs: Vector[Double], ys: Vector[Double]): Double = 111 | (xs zip ys).map { case (x,y) => x * y }.sum 112 | ``` 113 | 114 | Generally the function value `{case p1 => e1 ... case pn => en }` is just equivalent short hand to `x => x match { case p1 => e1 ... case pn => en }` -------------------------------------------------------------------------------- /notes/week 6/002-combinatorial-search-and-for-expressions.md: -------------------------------------------------------------------------------- 1 | ##Handling Nested Sequences 2 | 3 | Higher order functions on sequences often replace loops in imperative languages; programs that use many nested loops can often be expressed then using *combinations* of these higher order functions. 4 | 5 | Here's an example: we want to find all pairs of positive integers `i` and `j`, such that `1 <= j < i < n`, and `i + j` is prime. 6 | 7 | For example, if `n = 7`, the pairs we want to find are: 8 | 9 | ![img](http://i.imgur.com/KrSFNE3.png) 10 | 11 | In an imperative programming language, we'd probably use two nested loops, one for `i`, one for `j`, together with a test to see if `i + j` is a prime number, and some kind of buffer to collect the results. 12 | 13 | What's a purely functional way to do this? 14 | 15 | A natural way is to generate datastructures bit by bit until we have generated the structure we need for the final result. 16 | 17 | The first data structure we want to generate is the sequence of all pairs of integers `(i, j)` such that `1 <= j < i < n`. Then, we want to filter the pairs for which `i + j` is prime. 18 | 19 | How do we generate this sequence of pairs of integers? The natural way is probably to generate all the integers `i` between 1 and `n` (excluded), then, for each integer `i`, generate the list of pairs `(i, 1), ..., (i, i-1)` 20 | 21 | This can be achieved by combining `until` and `map`: 22 | 23 | ```scala 24 | (1 until n) map (i => 25 | (1 until i) map (j => (i, j))) 26 | ``` 27 | 28 | If we do this in the worksheet, we get back a Vector[Vector] - why? Well, recall our class hieararchy from last session; `Range` is a subtype of `Seq`. 29 | 30 | The `Range` that we started with, `(1 until n)`, got transformed by a `map`, which produced a sequence of `Pair`s... sequences of pairs are not elements of `Range`s so we needed some other representation. What we got was something that sits between `Seq` and `Range`, an `IndexedSequence`, essentially a sequence that uses random access. 31 | 32 | And basically, the prototypical default implementation of an `IndexedSequence` is just a `Vector`. The type inferencer decided that it couldn't do what we're looking for with `Ranges`, they can't contain `Pair`s, so it went up the hiearchy and took the next best type. 33 | 34 | That's still not the right thing to do - we want to generate a single collection of pairs, not a collection of vectors. We need to concatenate all the element vectors into one sequence of pairs. 35 | 36 | We can combine all the sub-sequences using `foldRight` with `++`: 37 | 38 | ```scala 39 | (xss foldRight Seq[Int]())(_ ++ _) 40 | ``` 41 | 42 | Or, we could just use the built-in method `flatten`. 43 | 44 | (1 until n) map (i => 45 | (1 until i) map (j => (i, j))).flatten 46 | 47 | But wait - _there's more!!_ 48 | 49 | We can apply a useful law - remember the `flatMap` function from before? Check it: 50 | 51 | `xs flatMap f = (xs map f).flatten` 52 | 53 | Essentially, `flatMap` is exactly the same thing as `map`ping `f`, giving us a collection of collections, and then applying `flatten`. So we can contract the two to just use a `flatMap`: 54 | 55 | ```scala 56 | (1 until n) flatMap (i => 57 | (1 until i) map (j => (i, j))) 58 | ``` 59 | 60 | Now we need to filter our sequence according to the criterion, that the sum of the pair is prime. 61 | 62 | ```scala 63 | def isPrime(n: Int) = (2 until n) forall (n % _ != 0) 64 | (1 until n) flatMap (i => 65 | (1 until i) map (j => (i, j))) filter (pair => 66 | isPrime(pair._1 + pair._2)) 67 | ``` 68 | 69 | This works but.... *ugh*. 70 | 71 | Is there a simpler way to organize this expression that makes it more understandable? One thing we could try to do is name the intermediate results, so split our large expression into several smaller ones. But it turns out there's a more fundmental way to express problems like this in a higher level notation that's easier to understand! 72 | 73 | ##For-Expressions 74 | 75 | Higher order functions such as `map`, `flatMap`, and `filter` provide us powerful contructs for manipulating lists - but sometimes the level of abstraction required by these functions make our programs difficult to understand. This is where scala's `for` expression comes to the rescue. 76 | 77 | Here's an example: let `persons` be a list of elements of class `Person`: 78 | 79 | ```scala 80 | case class Person(name: String, age: Int) 81 | ``` 82 | 83 | To obtain the names of persons over 20 years old, you can write: 84 | 85 | ```scala 86 | for (p <- persons if p.age > 20) yield p.name 87 | ``` 88 | 89 | This is equivalent to 90 | 91 | ```scala 92 | persons filter (p => p.age > 20) map (p => p.name) 93 | ``` 94 | 95 | For-expressions are similar to for loops in immperative languages, but there's an important difference. A for loop operates with a side-effect, it changes something - a for expression doesn't. A for expression produces a new result - each element of the result is produced by a `yield` expression. 96 | 97 | ###Syntax 98 | 99 | A for-expression is of the form ```for (s) yield e ```, where `s` is a sequence of *generators* and *filters*, and `e` is an expression whose value is returned by an iteration. 100 | 101 | * a generator is of the form `p <- e`, where `p` is a pattern and `e` an expression whose value is a collection. The idea is that we would let `p` range over all elements of the colletion `e` 102 | * a filter is of the form `if f` where `f` is a boolean expression; the idea here is that the filter will remove from consideration all the elements of the collection where `f == false` 103 | * the sequence must always start with a generator 104 | * if there are several generators in the sequence, the last generators vary faster than the first. The first one steps through more slowly, and for each element of the first, the second generator will be traversed, and so on. 105 | 106 | We can also write braces instead of parens, so that our sequences of generators and filters can be written on multiple lines without requiring semicolons. 107 | 108 | Here's an example of the original problem: 109 | 110 | ```scala 111 | for { 112 | i <- 1 until n 113 | j <- 1 until i 114 | if isPrime(i + j) 115 | } yield (i, j) 116 | ``` 117 | 118 | And here's a version of `scalarProduct` from last time: 119 | 120 | ```scala 121 | (for((x,y) <- xs zip ys) yield x * y).sum 122 | ``` -------------------------------------------------------------------------------- /notes/week 6/003-combinatorial-search-example.md: -------------------------------------------------------------------------------- 1 | ###The N-Queens Problem 2 | 3 | So far, all the collections we've seen were sequences of some sort or another - there are two other fundamental classes of collection, sets and maps. This week we're going to look at sets. 4 | 5 | ###Sets 6 | 7 | A set is written like a sequence: 8 | 9 | ```scala 10 | val fruit = Set("apple", "banana", "pear") 11 | val s = (1 to 6).toSet 12 | ``` 13 | 14 | Most operations on sequences are also available on sets 15 | 16 | ```scala 17 | s map (_ + 2) 18 | fruit filter (_.startsWith == "app") 19 | ``` 20 | 21 | ###Sets vs Sequences 22 | 23 | The principle differences between sets and sequences: 24 | 25 | * sets are unordered; the elements of a set do not have a predefined order in which they appear in the set 26 | * sets do not have duplicate elements 27 | * the fundamental operation on sets is `contains`; the principle operation you can do with a set is asking if a given element is present within it: `s contains 5 // true` 28 | 29 | ###N-Queens 30 | 31 | The eight queens problem is to place eight queens on a chessboard so that no queen is threatened by another. In other words, there can't be two queens in the same row, column, or diagonal. 32 | 33 | We want to develop a solution for a chessboard of any size, not just 8. 34 | 35 | One way to solve the problem is to place a queen on each row - and once we have placed k - 1 queens, we must place the kth queen in a column where it's not "in check" with any other queen on the board. 36 | 37 | ![img](http://i.imgur.com/Hqn1Iiv.png) 38 | 39 | ###Algorithm 40 | 41 | We can solve this with a recursive algorithm! (It's a unix system... I know this!) 42 | 43 | Suppose we have already generated all the solutions consiting of placing `k - 1` queens on a board of size `n`. Each solution is represented by a list (of length `k-1`) containing the numbers of columns (between 0 and `n-1`) 44 | 45 | ![img](http://i.imgur.com/qfZFo4j.png) 46 | 47 | So, we number rows and columns from 0, and then the solution of our first three queens would be a list, starting with the last queen we placed `List(0,3,1)`; our complete solution would include our last queen. `List(2, 0, 3, 1)` 48 | 49 | Of course, in general, there can be more solutions (or none at all). We're dealing here with not single solutions but *sets* of solutions. 50 | 51 | Let's put this in an actual program! 52 | 53 | ```scala 54 | def queens(n: Int): Set[List[Int]] = { 55 | def placeQueens(k: Int): Set[List[Int]] = 56 | if (k == 0) Set(List()) 57 | else 58 | for { 59 | queens <- placeQueens(k - 1) 60 | col <- 0 until n 61 | if isSafe(col, queens) 62 | } yield col :: queens 63 | placeQueens(n) 64 | } 65 | ``` 66 | 67 | Here's a function to place all queens on a chessboard of `n` rows. The input of `queens` would be the number of rows, and the output would be a `Set` of solutions; each solution would be a `List[Int]`. 68 | 69 | We use a recursive algorithm with an auxiliary method, `placeQueens`, which places a number `k` of queens on a board and produces the set of solutions. The initial call is `placeQueens(n)`, which means we want to place all `n` queens. 70 | 71 | Now we've reduced the problem to how to implement `placeQueens`. We deal with the boundary case first - if `k` equals 0, we don't need to place any queens - what do we return? The empty set of solutions? That's actually not quite right... if we're not asked to do anything, then the solution is to not do anything. So we should return an empty `List` as our solution. 72 | 73 | Now, in the case where `k` is greater than 0 we have to do some real work. In general, to place `k` queens, we have to solve the problem of placing `k - 1` queens. We'll let `queens` range over the set of our partial solutions returned by `placeQueens(k - 1)`. 74 | 75 | Next, we have to put our `k` queen into a certain column. We can simply try all the possible columns - `col <- 0 until n`. We can't place the queen in any column we please, we need to make sure it doesn't threaten any other queen. So, we'll put a filter in there, that says that the column for the queen is safe with respect to the previous queens. (`isSafe(col, queens))` 76 | 77 | If it is, then we can yield a new solution, which will be our partial solution `col`, augmented by the queen in the new column. So it would be `col :: queens` 78 | 79 | OKAY. There's still one thing left to do - define that method `isSafe`. 80 | 81 | ```scala 82 | def isSafe(col: Int, queens: List[Int]): Boolean = { 83 | val row = queens.length 84 | val queensWithRow = (row - 1 to 0 by -1) zip queens 85 | queensWithRow forall { 86 | case (r, c) => col != c && math.abs(col - c) != row - r 87 | } 88 | } 89 | ``` 90 | 91 | The first thing we want to do is add rows to all the queens we look at here; the row of the queen to be placed will be just `queens.length`, since the other queens are in rows 0 to `n - 1`. 92 | 93 | Next, we want to add a row to each of our previous queens, transforming our list of `Int`s into a list of pairs, of row and column. We've got a set of solutions, which is something like `List(0, 3, 1)`, with the columns of the previous queens. We want to transform that into a solution that adds the rows. The first element was actually the last row to be placed, so we'd get `List((2,0), (1,3), (0,1))`. 94 | 95 | The idea here is to use a `zip` with a range - the range that we want to apply here is the range that goes from row - 1 to 0, by -1 steps. We zip that sequence with the list of our queens, and we call that `withRow`. So now we've got the partial solution of queens, represented with rows. 96 | 97 | Now what we can do is simply check whether the ween at `row` and `column` is in check with any of our `queensWithRow`. `forall` of these `queensWithRow`, it must be that the new queen is not in check.... 98 | 99 | So we take the row and the column out of the pair with a `case` statement, and now comes our check. What do we need to check? First, we need to make sure that the current column is not the same as any of the previous queen's columns. `col != c` 100 | 101 | We also need to make sure that the queen is not in check over any of the diagonals. Meaning, the absolute difference between the two columns (`math.abs(col - c)`) must not be the same as the absolute difference between the two rows (`row - r`). 102 | 103 | If that predicate is true, then we know that the queen is not in check over any of the diagonals with the queen in `(r, c)`! 104 | -------------------------------------------------------------------------------- /notes/week 6/004-queries-with-for.md: -------------------------------------------------------------------------------- 1 | ##Queries With For 2 | 3 | The `for` notation is essentially equivalent to the common operation of query languages for databases.... 4 | 5 | ```scala 6 | case class Book(title: String, authors: List[String]) 7 | ``` 8 | 9 | Suppose we've got a database of books, represented as a list of books. 10 | 11 | ![img](http://i.imgur.com/HJL1yNJ.png) 12 | 13 | ###Some Queries 14 | 15 | To find the titles of bboks whose author's name is "Bird": 16 | 17 | 18 | ```scala 19 | for (b <- books; a <- b.authors if a startsWith "Bird") 20 | yield b.title 21 | ``` 22 | 23 | To find all the books which have the word "Program" in the tiel 24 | 25 | ```scala 26 | for (b <- books if b.title indexOf "Program" >= 0) 27 | yield b.title 28 | ``` 29 | 30 | ###Another query 31 | 32 | To find the names of all authors who have written at least two books: 33 | 34 | ```scala 35 | for { 36 | b1 <- books 37 | b2 <- books 38 | if b1 != b2 39 | a1 <- b1.authors 40 | a2 <- b2.authors 41 | if a1 == a2 42 | } yield a1 43 | ``` 44 | 45 | The way to do this is to have two iterators ranging over the database, `b1` and `b2`; we demand that `b1` and `b2` are different. Now we have pairs of different books; we let `a1` and `a2` range over the authors of these pairs. If we find a match, ie, an author that appears in the authors lists of both `b1` and `b2`, then we've found an author that's published at least two books. 46 | 47 | If we run this, we end up getting the right results, but twice. Whaaaa? 48 | 49 | The reason is that we have two generators that both range over `books`, so each pair of book will show up twice, once with the arguments swapped. 50 | 51 | IE, we'll get this: 52 | 53 | ![img](http://i.imgur.com/w6WvEmE.png) 54 | 55 | How can we avoid this? Well, an easy way would be to instead of just demanding that the two books are different, we can demand that the title of the first book must be lexicographically smaller than the title of the second book. 56 | 57 | ```scala 58 | for { 59 | b1 <- books 60 | b2 <- books 61 | if b1.title < b2.title 62 | a1 <- b1.authors 63 | a2 <- b2.authors 64 | if a1 == a2 65 | } yield a1 66 | ``` 67 | 68 | But what happens if an author has published three books? The author is printed three times! oh man..... 69 | 70 | Even with this added condition, we have three possible pairs of books 71 | 72 | ![img](http://i.imgur.com/JeI8BAt.png) 73 | 74 | We have three possible pairs out of two possible books out of these three; for each of the three possibilities, the same author will be printed. What can we do???? 75 | 76 | One solution would be to remove duplicate authors from the result list. There's a function for this that works on all sequences, called `distinct`: 77 | 78 | ```scala 79 | { for { 80 | b1 <- books 81 | b2 <- books 82 | if b1.title < b2.title 83 | a1 <- b1.authors 84 | a2 <- b2.authors 85 | if a1 == a2 86 | } yield a1 87 | }.distinct 88 | ``` 89 | 90 | That'd do the trick, but on the other hand, maybe these problems are a sign that we've started off with the wrong data structure. We have a database that's a list of books - in actual databases, the order in which the rows appear shouldn't matter. Databases are much more sets of rows instead of lists rows. Sets have the advantage that duplicates are eliminated by design. 91 | 92 | So let's make `books` a set of rows! That gets the job done. -------------------------------------------------------------------------------- /notes/week 6/005-translation-of-for.md: -------------------------------------------------------------------------------- 1 | ##For Expressions and Higher Order Functions 2 | 3 | The syntax of `for` is closely related to the higher-order function `map,`, `flatMap`, and `filter` 4 | 5 | First of all, these functions can all be defined in terms of `for`: 6 | 7 | ```scala 8 | def mapFun[T, U](xs: List[T], f: T => U): List[U] = 9 | for (x <- xs) yield f(x) 10 | 11 | def flatMap[T, U](xs: List[T], f: T => Iterable[U]): List[U] = 12 | for (x <- xs; y <- f(x)) yield y 13 | 14 | def filter[T](xs: List[T, p: T => Boolean]): List[T] = 15 | for (x <- xs if p(x)) yield x 16 | ``` 17 | 18 | In Reality, it goes the other way - the scala compiler expresses for-expressions in terms of `map`, `flatMap`, and a lazy variant of `filter`. 19 | 20 | Here's a simple example. A simple for expression that consists of just one generator, that consists of arbitrary expressions `e1` and `e2`, will be translated to an application of `map`. 21 | 22 | ```scala 23 | for (x <- e1) yield e2 24 | 25 | // Translates to: 26 | 27 | e1.map(x => e2) 28 | ``` 29 | 30 | A for-expression that has a generator followed by a filter, which is in turn followed by further generators or filters, here subsumed by `s` 31 | 32 | ```scala 33 | for (x <- e1 if f; s) yield e2 34 | ``` 35 | 36 | can be rewritten to another for expression, that contains a generator, and the filter has been absorbed into the generator: 37 | 38 | ```scala 39 | for (x <- e1.withFilter(x => f); s) yield e2 40 | ``` 41 | 42 | At first approximation, we can read `withFilter` like `filter`; the generator will be reduced to all those elements that pass the condition `f`. 43 | 44 | `withFilter` is actually a _lazy_ variant of `filter`, meaning, it doesn't immediately produce a new datastructure of all the filtered elements. That would be _wasteful_. 45 | 46 | Instead, it remembers that any following call to `map` or `flatMap` has to be filtered by the function `f`. 47 | 48 | The third and last form of for-expressions is the one where a leading generator is followed not by a filter but another generator: 49 | 50 | ```scala 51 | for (x <- e1; y <- e2; s) yield e3 52 | ``` 53 | 54 | Again that can be followed by an arbitrary sequence of filters and generators `s`. That for expression will be translated into a call of `flatMap`. 55 | 56 | The idea here is that we take the for expression that takes all the remaining computations (so, we generate a `y` from `e2`, and do some more stuff, and then yield `e3`), that would be a collection-valued operation, because `y <- e2` is a generator. 57 | 58 | So what we need to do is take everything that comes out of this for-expression and `flatMap` it, concatenating it all into the result list. Which is precisely what happens: 59 | 60 | ```scala 61 | e1.flatMap(x => for (y <- e2; s) yield e3) 62 | ``` 63 | 64 | So what happened in the first case is that we translated directly into an application of `map`; in the second and third case, we translated into another for-expression that has one less element, either one fewer filter, or one fewer generator. 65 | 66 | Each of these translation steps can be repeated, yielding simpler and simpler for-expressions until finally we must hit the simplest case that must translate to a map. 67 | 68 | Take the for expression that computed pairs whose sum is prime: 69 | 70 | ```scala 71 | for { 72 | i <- 1 until n 73 | j <- 1 until i 74 | if isPrime(i + j) 75 | } yield (i, j) 76 | ``` 77 | 78 | Applying the translation scheme to this expression gives us: 79 | 80 | ```scala 81 | (1 until n).flatMap(i => 82 | (1 until i).withFilter(j => isPrime(i+j)) 83 | .map(j => (i, j))) 84 | ``` 85 | 86 | This is almost exactly the expression we came up with first! _WOW_. 87 | 88 | Let's translate a query on our books database, like 89 | 90 | ```scala 91 | for (b <- books; a <- b.authors if a startsWith "Bird") 92 | yield b.title 93 | ``` 94 | 95 | ```scala 96 | books.flatMap(b => 97 | // translated to: 98 | // for (a <- b.authors if a startsWith "Bird") yield b.title) 99 | // translated to: 100 | // for (a <- b.authors withFilter(a => a.startsWith "Bird")) yield b.title 101 | //translated to: 102 | // b.authors withFilter(a => a.startsWith "Bird") map (y => y.title) 103 | ``` 104 | 105 | Interestingly, the translation of for is not limited to just lists or sequences, or even collections; it is based solely on the presence of the methods `map`, `flatMap`, and `withFilter`. 106 | 107 | This lets us use the syntax for our own types as well - we must only define those three functions for these types. 108 | 109 | There are many types for which this is useful; arrays, iterators, databases, XML data, optional values, parsers. 110 | 111 | ###For and Databases 112 | For example, `books` might not be a list, but a database stored on some server. 113 | 114 | As long as the client interface to the database defines the methods `map`, `flatMap`, and `withFilter`, we can use the `for` syntax for querying the database. 115 | 116 | This is the basis for the scala data base connection frameworks, like ScalaQuery and Slick. Similar ideas underly Microsoft's LINQ framework. -------------------------------------------------------------------------------- /notes/week 6/006-maps.md: -------------------------------------------------------------------------------- 1 | ##Maps 2 | 3 | Another funadmental collection type is the _map_. 4 | 5 | A map of type `Map[Key, Value]` is a data structure that associates keys of type `Key` with values of type `Value` 6 | 7 | ```scala 8 | val romanNumerals = Map("I" -> 1, "V" -> 5, "X" -> 10) 9 | 10 | val campitalOfCountry = Map("US" -> "Washington", "Switzerland" -> "Bern") 11 | ``` 12 | 13 | ###Maps are Iterables, Maps are Functions 14 | 15 | Class `Map[Key, Value]` extends the collection type `Iterable[(Key, Value)]` 16 | 17 | Therefore, maps support the same collection operations as other iterables. Ie, 18 | 19 | ```scala 20 | val countryOfCapital = capitalOfCountry map { 21 | case(x,y) => (y, x) 22 | } 23 | ``` 24 | 25 | Note that maps extend iterables of key/value _pairs_. 26 | 27 | Infact, the syntax `key -> value` is just an alternative way to write the pair `(key, value)` 28 | 29 | Class `Map[Key, Value]` extends the function type `Key => Value`, so maps can be used everywhere functions can. In particular, maps can be applied to key arguments. `capitalOfCountry("US")` is a well-formed application; looks like a function call which gives us back "Washington." 30 | 31 | ###Querying Map 32 | Applying a map to a non-existing key gives an error. What can we do to query a map without knowing if it contains a given key or not? 33 | 34 | Instead of having a simple function application, we can call a `get` method on the map 35 | 36 | ```scala 37 | capitalOfCountry get "andorra" 38 | ``` 39 | 40 | What we get here is an `Option` value, and it read `None`, meaning "andorra" is not in the map. 41 | 42 | If we were to go `capitalOfCountry.get "US"`, we'd get an option valut that read `Some(Washington)`. What are these things??!!! 43 | 44 | ###The Option Type 45 | ```scala 46 | trait Option[+A] 47 | case class Some[+A](value: A) extends Option[A] 48 | object None extends Option[Nothing] 49 | ``` 50 | 51 | An `Option` value can be one of two things, so the expression `map get key` either returns: 52 | 53 | * `None` if `map` does not contain the given key 54 | * `Some(x)` if `map` associates the given `key` with the value x. 55 | 56 | ###Decomposing Option 57 | Since options are defined as case classes, they can be decomposed using pattern matching: 58 | 59 | ```scala 60 | def showCapital(country: String) = capitalOfCountry.get(country) match { 61 | case Some(capital) => capital 62 | case None => "missing data" 63 | } 64 | ``` 65 | 66 | Options also support quite a few operations of the other collections. In particular, they support `map`, `flatMap`, and `filter`, so we can use them with for-expressions 67 | 68 | ###Sorted and GroupBy 69 | 70 | Two useful operations of SQL queries in addition to for-expressions are `groupBy` and `orderBy` 71 | 72 | `orderBy` on a collection can be expressed with `sortWith` and `sorted`, which is just a "natural" ordering: 73 | 74 | ```scala 75 | val fruit = List("apple", "pear", "orange", "pineapple") 76 | fruit sortWith(_.length < _.length) //List("pear", "apple", "orange", "pineapple") 77 | fruit.sorted //List("apple", "orange", 'pear", "pineapple") 78 | ``` 79 | 80 | `groupBy` partitions a collection into a `map` of collections according to a _discriminator function_. 81 | 82 | ```scala 83 | fruit groupBy (_.head) //|> Map(p -> List(pear, pineapple), 84 | // a -> List(apple)) 85 | ``` 86 | 87 | `head` here is the first character that appears in each string - so what that gives us is a `map` that associates each head character with a list of all the fruit that have that character as the head. 88 | 89 | ###Map Example 90 | A polynomial can be seen as a map from exponents to coefficients. For instance, `x^3 - 2x + 5` can be represented as `Map(0 -> 5, 1 -> -2, 3 -> 1)` 91 | 92 | Based on this observation, let's design a class `Polynom` that represents polynomials as maps! 93 | 94 | ###Default Values 95 | So far, maps were _partial functions_: applying a map to a key value in `map(key)` could lead to an exception, if the key was not stored in the map. What if we could make maps total functions, that would never fail but that would give back a default value if some key wasn't found? 96 | 97 | There's an operation for that! `withDefaultValue` turns a map into a total function: 98 | 99 | ```scala 100 | val cap1 = capitalOfCountry withDefaultValue "" 101 | cap1("andorra") 102 | ``` -------------------------------------------------------------------------------- /notes/week 6/007-putting-the-pieces-together.md: -------------------------------------------------------------------------------- 1 | ###Task 2 | 3 | Phone keys have mnemonics assigned to them: 4 | 5 | ![img](http://i.imgur.com/RmBkZjM.png) 6 | 7 | Assume we're given a dictionary `words` as a list of words - we want to design a method `translate` such that `translate(phoneNumber)` produces all phrasses of words that can serve as mnemonics for the phone number. -------------------------------------------------------------------------------- /notes/week 7/001-structural-induction-on-trees.md: -------------------------------------------------------------------------------- 1 | Optional - circlin' back... -------------------------------------------------------------------------------- /notes/week 7/002-streams.md: -------------------------------------------------------------------------------- 1 | #Streams 2 | 3 | We've seen a number of immutable collections that provide powerful operations for combinatorial serach; for instance, if we wanted to find the second prime number between 1000 and 10000: `((1000 to 10000) filter isPrime)(1)` 4 | 5 | This is much shorter than the recursive alternative 6 | 7 | ![img](http://i.imgur.com/FWtrunV.png) 8 | 9 | However, the shorter version has a serious problem - its evaluation is very, very inefficient. 10 | 11 | It constructs *all* prime numbers between 1000 and 10000 in a list, but only ever looks at the first two elements of that list. 12 | 13 | Reducing the upper bound would speed things up - maybe our upper bound 10000 is too high and we should reduce it. But not knowing *a priori* where the prime numbers are, we'd always risk that we miss the second prime number all together. We're in the uncomfortable position of either having really bad performance because the upper bound is too high, or not finding the prime number at all because the bound is too low. 14 | 15 | ###Delayed Evaluation 16 | However, we can make the short code efficient with a trick - we'll *avoid computing the tail of a sequence unil it is needed for the evaluation result (which might be never)*. 17 | 18 | That idea is implemented in a new class, the `Stream`. They're similar to lists, but their tail is evaluated only on demand. 19 | 20 | ###Defining Streams 21 | Streams are defined from a constant `Stream.empty` and a constructor `Stream.cons`. 22 | 23 | For instance: 24 | 25 | ```scala 26 | val xs = Stream.cons(1, Stream.cons(2, Stream.empty)) 27 | ``` 28 | 29 | They can also be defined by using the `Stream` object as a factory (`Stream(1, 2, 3)`) 30 | 31 | the `toStream` method on a collection will turn the collection into a stream: `(1 to 1000).toStream` 32 | 33 | The result of that call is a `Stream[Int] = Stream(1, ?)` what's that mean? Well, a stream is essentially a recursive structure like a list, so we have a one in the first node, but the tail is not yet evaluated. 34 | 35 | ![img](http://i.imgur.com/59zFeJS.png) 36 | 37 | The tail will be evaluated when somebody asks for it explicitly. 38 | 39 | ###Stream Ranges 40 | Lets try to write a function that returns `(lo until hi).toStream` directly: 41 | 42 | ```scala 43 | def streamRange(lo: Int, hi: Int): Stream[Int] = 44 | if (lo >= hi) Stream.empty 45 | else Stream.cons(lo, streamRange(lo + 1, hi)) 46 | ``` 47 | 48 | Let's compare that to a function that does the same thing for lists: 49 | 50 | ```scala 51 | def listRange(lo: Int, hi: Int): List[Int] = 52 | if (lo >= hi) Nil 53 | else lo :: listRange(lo + 1, hi) 54 | ``` 55 | 56 | It turns out these two functions are completely isomorphic - they have exactly the same structure, only one returns a stream and one returns a list. But their operational behavior is completely different! 57 | 58 | `listRange(1, 10)` would generate the complete list in one go: 59 | 60 | ![img](http://i.imgur.com/TfJk6ec.png) 61 | 62 | `streamRange(1,10)` would generate one cons square and then stop - the rest would be a question mark. Instead there's an object that knows how to reconstitue the stream if somebody demands it. 63 | 64 | ![img](http://i.imgur.com/lvHz3oL.png) 65 | 66 | ###Methods on Streams 67 | `Stream` supports almost all methods on `List` - for instance to find the second prime number between 1000 and 10000: `((1000 to 10000).toStream filter isPrime)(1)` 68 | 69 | ###Stream Cons Operator 70 | The one major exception is ::. 71 | 72 | `x :: xs` always produces a list, never a stream. 73 | 74 | There is an alternative operator, `#::` which produces a stream. 75 | 76 | ###Implementation of Streams 77 | 78 | ```scala 79 | trait Stream[+A] extends Seq[A] { 80 | def isEmpty: Boolean 81 | def head: A 82 | def tail: Stream[A] 83 | } 84 | ``` 85 | 86 | Here's a concrete implementation: 87 | 88 | ```scala 89 | object Stream { 90 | def cons[T](hd: T, tl: => Stream[T]) = new Stream[T] { 91 | def isEmpty = false 92 | def head = hd 93 | def tail = tl 94 | } 95 | val empty = new Stream[Nothing] { 96 | def isEmpty = true 97 | def head = throw new NoSuchElementException("empty.head") 98 | def tail = throw new NoSuchElementException("empty.tail") 99 | } 100 | } 101 | ``` 102 | 103 | Notice that the `tl` paramater for the `cons` method here is a by-name parameter; this differs from the `List` `cons` class, where it is a normal parameter. 104 | 105 | Because `tl` is a call by name parameter, when we first construct the `cons` cell for a stream the tail is not evaluated. It'll be evaluated the first time somebody dereferences the `tl` parameter - in this case, that'll happen when somebody calls the `tail` method. 106 | 107 | ###Other Stream Methods 108 | Other stream methods are implemented analogously to their list counterparts. Here's `filter`: 109 | 110 | ```scala 111 | class Stream[+T] { 112 | ... 113 | def filter(p: T => Boolean): Stream[T] = 114 | if (isEmpty) this 115 | else if (p(head)) cons(head, tail.filter(p)) 116 | else tail.filter(p) 117 | } 118 | ``` 119 | 120 | Notice that in the case of a `head` element that passes the predicate function `p`, we do a computation of `tail.filter(p)`, but that computation is the second, `tl` parameter of a `cons` construction. That means the evaluation of `filter` down the spine of the stream will be delayed again until somebody wants to find out what the result of taking the tail of the result stream is. --------------------------------------------------------------------------------