├── .gitignore ├── README.md ├── lean ├── love01_definitions_and_lemma_statements_demo.lean ├── love01_definitions_and_lemma_statements_exercise_sheet.lean ├── love01_definitions_and_lemma_statements_exercise_solution.lean ├── love01_definitions_and_lemma_statements_homework_sheet.lean ├── love01_definitions_and_lemma_statements_homework_solution.lean ├── love02_tactical_proofs_demo.lean ├── love02_tactical_proofs_exercise_sheet.lean ├── love02_tactical_proofs_exercise_solution.lean ├── love02_tactical_proofs_homework_sheet.lean ├── love02_tactical_proofs_homework_solution.lean ├── love03_structured_proofs_and_proof_terms_demo.lean ├── love03_structured_proofs_and_proof_terms_exercise_sheet.lean ├── love03_structured_proofs_and_proof_terms_exercise_solution.lean ├── love03_structured_proofs_and_proof_terms_homework_sheet.lean ├── love03_structured_proofs_and_proof_terms_homework_solution.lean ├── love04_functional_programming_demo.lean ├── love04_functional_programming_exercise_sheet.lean ├── love04_functional_programming_exercise_solution.lean ├── love04_functional_programming_homework_sheet.lean ├── love05_inductive_predicates_demo.lean ├── love05_inductive_predicates_exercise_sheet.lean ├── love05_inductive_predicates_exercise_solution.lean ├── love05_inductive_predicates_homework_sheet.lean ├── love05_inductive_predicates_homework_solution.lean ├── love06_monads_demo.lean ├── love06_monads_exercise_sheet.lean ├── love06_monads_exercise_solution.lean ├── love06_monads_homework_sheet.lean ├── love06_monads_homework_solution.lean ├── love07_metaprogramming_demo.lean ├── love07_metaprogramming_exercise_sheet.lean ├── love07_metaprogramming_exercise_solution.lean ├── love07_metaprogramming_homework_sheet.lean ├── love08_operational_semantics_demo.lean ├── love08_operational_semantics_exercise_sheet.lean ├── love08_operational_semantics_exercise_solution.lean ├── love08_operational_semantics_homework_sheet.lean ├── love09_hoare_logic_demo.lean ├── love09_hoare_logic_exercise_sheet.lean ├── love09_hoare_logic_exercise_solution.lean ├── love09_hoare_logic_homework_sheet.lean ├── love09_hoare_logic_homework_solution.lean ├── love10_denotational_semantics_demo.lean ├── love10_denotational_semantics_exercise_sheet.lean ├── love10_denotational_semantics_exercise_solution.lean ├── love10_denotational_semantics_homework_sheet.lean ├── love10_denotational_semantics_homework_solution.lean ├── love11_logical_foundations_of_mathematics_demo.lean ├── love11_logical_foundations_of_mathematics_exercise_sheet.lean ├── love11_logical_foundations_of_mathematics_exercise_solution.lean ├── love12_basic_mathematical_structures_demo.lean ├── love12_basic_mathematical_structures_exercise_sheet.lean ├── love12_basic_mathematical_structures_exercise_solution.lean ├── love13_rational_and_real_numbers_demo.lean ├── love13_rational_and_real_numbers_exercise_sheet.lean ├── love13_rational_and_real_numbers_exercise_solution.lean └── lovelib.lean ├── leanpkg.toml ├── logical_verification_in_lean.pdf └── md ├── love01_definitions_and_lemma_statements.md ├── love02_tactical_proofs.md ├── love03_structured_proofs_and_proof_terms.md ├── love04_functional_programming.md ├── love05_inductive_predicates.md ├── love06_monads.md ├── love07_metaprogramming.md ├── love08_operational_semantics.md ├── love09_hoare_logic.md ├── love10_denotational_semantics.md ├── love11_logical_foundations_of_mathematics.md ├── love12_basic_mathematical_structures.md ├── love12_basic_mathematical_structures_hierarchy.png └── love13_rational_and_real_numbers.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.olean 2 | /_target 3 | /leanpkg.path 4 | -------------------------------------------------------------------------------- /lean/love01_definitions_and_lemma_statements_demo.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Demo 1: Definitions and Lemma Statements -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Types and Terms -/ 9 | 10 | #check ℕ 11 | #check ℤ 12 | 13 | #check empty 14 | #check unit 15 | #check bool 16 | 17 | #check ℕ → ℤ 18 | #check ℤ → ℕ 19 | #check bool → ℕ → ℤ 20 | #check (bool → ℕ) → ℤ 21 | #check ℕ → (bool → ℕ) → ℤ 22 | 23 | #check λx : ℕ, x 24 | #check λf : ℕ → ℕ, λg : ℕ → ℕ, λh : ℕ → ℕ, λx : ℕ, h (g (f x)) 25 | #check λ(f g h : ℕ → ℕ) (x : ℕ), h (g (f x)) 26 | 27 | constants a b : ℤ 28 | constant f : ℤ → ℤ 29 | constant g : ℤ → ℤ → ℤ 30 | 31 | #check λx : ℤ, g (f (g a x)) (g x b) 32 | #check λx, g (f (g a x)) (g x b) 33 | 34 | #check λx, x 35 | 36 | constant trool : Type 37 | constants ttrue tfalse tmaybe : trool 38 | 39 | 40 | /- Type Definitions -/ 41 | 42 | namespace my_nat 43 | 44 | inductive nat : Type 45 | | zero : nat 46 | | succ : nat → nat 47 | 48 | #check nat 49 | #check nat.zero 50 | #check nat.succ 51 | 52 | end my_nat 53 | 54 | #print nat 55 | #print ℕ 56 | 57 | namespace my_list 58 | 59 | inductive list (α : Type) : Type 60 | | nil : list 61 | | cons : α → list → list 62 | 63 | #check list.nil 64 | #check list.cons 65 | 66 | end my_list 67 | 68 | #print list 69 | 70 | inductive aexp : Type 71 | | num : ℤ → aexp 72 | | var : string → aexp 73 | | add : aexp → aexp → aexp 74 | | sub : aexp → aexp → aexp 75 | | mul : aexp → aexp → aexp 76 | | div : aexp → aexp → aexp 77 | 78 | 79 | /- Function Definitions -/ 80 | 81 | def add : ℕ → ℕ → ℕ 82 | | m nat.zero := m 83 | | m (nat.succ n) := nat.succ (add m n) 84 | 85 | #reduce add 2 7 86 | #eval add 2 7 87 | 88 | def mul : ℕ → ℕ → ℕ 89 | | _ nat.zero := nat.zero 90 | | m (nat.succ n) := add m (mul m n) 91 | 92 | #reduce mul 2 7 93 | 94 | #print mul 95 | #print mul._main 96 | 97 | def power : ℕ → ℕ → ℕ 98 | | _ 0 := 1 99 | | m (nat.succ n) := m * power m n 100 | 101 | #reduce power 2 5 102 | 103 | def power₂ (m : ℕ) : ℕ → ℕ 104 | | 0 := 1 105 | | (nat.succ n) := m * power₂ n 106 | 107 | #reduce power₂ 2 5 108 | 109 | def iter (α : Type) (z : α) (f : α → α) : ℕ → α 110 | | 0 := z 111 | | (nat.succ n) := f (iter n) 112 | 113 | #check iter 114 | 115 | def power₃ (m n : ℕ) : ℕ := 116 | iter ℕ 1 (λl, m * l) n 117 | 118 | #reduce power₃ 2 5 119 | 120 | /- 121 | -- illegal 122 | def evil : ℕ → ℕ 123 | | n := nat.succ (evil n) 124 | -/ 125 | 126 | def append (α : Type) : list α → list α → list α 127 | | list.nil ys := ys 128 | | (list.cons x xs) ys := list.cons x (append xs ys) 129 | 130 | #check append 131 | #reduce append _ [3, 1] [4, 1, 5] 132 | 133 | def append₂ {α : Type} : list α → list α → list α 134 | | list.nil ys := ys 135 | | (list.cons x xs) ys := list.cons x (append₂ xs ys) 136 | 137 | #check append₂ 138 | #reduce append₂ [3, 1] [4, 1, 5] 139 | 140 | #check @append₂ 141 | #reduce @append₂ _ [3, 1] [4, 1, 5] 142 | 143 | def append₃ {α : Type} : list α → list α → list α 144 | | [] ys := ys 145 | | (x :: xs) ys := x :: append₃ xs ys 146 | 147 | def reverse {α : Type} : list α → list α 148 | | [] := [] 149 | | (x :: xs) := reverse xs ++ [x] 150 | 151 | def eval (env : string → ℤ) : aexp → ℤ 152 | | (aexp.num i) := i 153 | | (aexp.var x) := env x 154 | | (aexp.add e₁ e₂) := eval e₁ + eval e₂ 155 | | (aexp.sub e₁ e₂) := eval e₁ - eval e₂ 156 | | (aexp.mul e₁ e₂) := eval e₁ * eval e₂ 157 | | (aexp.div e₁ e₂) := eval e₁ / eval e₂ 158 | 159 | 160 | /- Lemma Statements -/ 161 | 162 | namespace sorry_lemmas 163 | 164 | lemma add_comm (m n : ℕ) : 165 | add m n = add n m := 166 | sorry 167 | 168 | lemma add_assoc (l m n : ℕ) : 169 | add (add l m) n = add l (add m n) := 170 | sorry 171 | 172 | lemma mul_comm (m n : ℕ) : 173 | mul m n = mul n m := 174 | sorry 175 | 176 | lemma mul_assoc (l m n : ℕ) : 177 | mul (mul l m) n = mul l (mul m n) := 178 | sorry 179 | 180 | lemma mul_add (l m n : ℕ) : 181 | mul l (add m n) = add (mul l m) (mul l n) := 182 | sorry 183 | 184 | lemma reverse_reverse {α : Type} (xs : list α) : 185 | reverse (reverse xs) = xs := 186 | sorry 187 | 188 | end sorry_lemmas 189 | 190 | end LoVe 191 | -------------------------------------------------------------------------------- /lean/love01_definitions_and_lemma_statements_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 1: Definitions and Lemma Statements -/ 2 | 3 | /- Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 4 | 5 | import .love01_definitions_and_lemma_statements_demo 6 | 7 | namespace LoVe 8 | 9 | 10 | /- Question 1: Fibonacci Numbers -/ 11 | 12 | /- 1.1. Define the function `fib` that computes the Fibonacci numbers. -/ 13 | 14 | def fib : ℕ → ℕ 15 | := sorry 16 | 17 | /- 1.2. Check that your function works as expected. -/ 18 | 19 | #reduce fib 0 -- expected: 0 20 | #reduce fib 1 -- expected: 1 21 | #reduce fib 2 -- expected: 1 22 | #reduce fib 3 -- expected: 2 23 | #reduce fib 4 -- expected: 3 24 | #reduce fib 5 -- expected: 5 25 | #reduce fib 6 -- expected: 8 26 | #reduce fib 7 -- expected: 13 27 | #reduce fib 8 -- expected: 21 28 | 29 | 30 | /- Question 2: Arithmetic Expressions -/ 31 | 32 | /- Consider the type `aexp` from the lecture. -/ 33 | 34 | #print aexp 35 | #check eval 36 | 37 | /- 2.1. Test that `eval` behaves as expected. Making sure to exercise each 38 | constructor at least once. You can use the following environment in your 39 | tests. What happens if you divide by zero? -/ 40 | 41 | def some_env : string → ℤ 42 | | "x" := 3 43 | | "y" := 17 44 | | _ := 201 45 | 46 | -- invoke `#eval` here 47 | 48 | /- 2.2. The following function simplifies arithmetic expressions involving 49 | addition. It simplifies `0 + e` and `e + 0` to `e`. Complete the definition so 50 | that it also simplifies expressions involving the other three binary 51 | operators. -/ 52 | 53 | def simplify : aexp → aexp 54 | | (aexp.add (aexp.num 0) e₂) := simplify e₂ 55 | | (aexp.add e₁ (aexp.num 0)) := simplify e₁ 56 | -- insert the missing cases here 57 | -- catch-all cases below 58 | | (aexp.num i) := aexp.num i 59 | | (aexp.var x) := aexp.var x 60 | | (aexp.add e₁ e₂) := aexp.add (simplify e₁) (simplify e₂) 61 | | (aexp.sub e₁ e₂) := aexp.sub (simplify e₁) (simplify e₂) 62 | | (aexp.mul e₁ e₂) := aexp.mul (simplify e₁) (simplify e₂) 63 | | (aexp.div e₁ e₂) := aexp.div (simplify e₁) (simplify e₂) 64 | 65 | /- 2.3. State the correctness lemma for `simplify`, namely that the simplified 66 | expression should have the same semantics, with respect to `eval`, as the 67 | original expression. -/ 68 | 69 | -- enter your lemma statement here 70 | 71 | 72 | /- Question 3: λ-Terms -/ 73 | 74 | /- We start by declaring three new opaque types. -/ 75 | 76 | constants α β γ : Type 77 | 78 | /- 3.1. Complete the following definitions, by replacing the `sorry` markers by 79 | terms of the expected type. 80 | 81 | Hint: You can use `_` as a placeholder while constructing a term. By hovering 82 | over `_`, you will see the current logical context. -/ 83 | 84 | def I : α → α := 85 | λa, a 86 | 87 | def K : α → β → α := 88 | λa b, a 89 | 90 | def C : (α → β → γ) → β → α → γ := 91 | sorry 92 | 93 | def proj_1st : α → α → α := 94 | sorry 95 | 96 | -- please give a different answer than for `proj_1st` 97 | def proj_2nd : α → α → α := 98 | sorry 99 | 100 | def some_nonsense : (α → β → γ) → α → (α → γ) → β → γ := 101 | sorry 102 | 103 | /- 3.2. Show the typing derivation for your definition of `C` above. -/ 104 | 105 | -- write your solution here in a comment or on paper 106 | 107 | end LoVe 108 | -------------------------------------------------------------------------------- /lean/love01_definitions_and_lemma_statements_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 1: Definitions and Lemma Statements -/ 2 | 3 | /- Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 4 | 5 | import .love01_definitions_and_lemma_statements_demo 6 | 7 | namespace LoVe 8 | 9 | 10 | /- Question 1: Fibonacci Numbers -/ 11 | 12 | /- 1.1. Define the function `fib` that computes the Fibonacci numbers. -/ 13 | 14 | def fib : ℕ → ℕ 15 | | 0 := 0 16 | | 1 := 1 17 | | (nat.succ (nat.succ n)) := fib n + fib (nat.succ n) 18 | -- (n + 2) and (n + 1) would also work 19 | 20 | /- 1.2. Check that your function works as expected. -/ 21 | 22 | #reduce fib 0 -- expected: 0 23 | #reduce fib 1 -- expected: 1 24 | #reduce fib 2 -- expected: 1 25 | #reduce fib 3 -- expected: 2 26 | #reduce fib 4 -- expected: 3 27 | #reduce fib 5 -- expected: 5 28 | #reduce fib 6 -- expected: 8 29 | #reduce fib 7 -- expected: 13 30 | #reduce fib 8 -- expected: 21 31 | 32 | 33 | /- Question 2: Arithmetic Expressions -/ 34 | 35 | /- Consider the type `aexp` from the lecture. -/ 36 | 37 | #print aexp 38 | #check eval 39 | 40 | /- 2.1. Test that `eval` behaves as expected. Making sure to exercise each 41 | constructor at least once. You can use the following environment in your 42 | tests. What happens if you divide by zero? -/ 43 | 44 | def some_env : string → ℤ 45 | | "x" := 3 46 | | "y" := 17 47 | | _ := 201 48 | 49 | #eval eval some_env (aexp.add (aexp.var "x") (aexp.var "y")) 50 | #eval eval some_env (aexp.sub (aexp.num 5) (aexp.var "y")) 51 | #eval eval some_env (aexp.mul (aexp.num 11) (aexp.var "z")) 52 | #eval eval some_env (aexp.div (aexp.num 2) (aexp.num 0)) 53 | 54 | /- 2.2. The following function simplifies arithmetic expressions involving 55 | addition. It simplifies `0 + e` and `e + 0` to `e`. Complete the definition so 56 | that it also simplifies expressions involving the other three binary 57 | operators. -/ 58 | 59 | def simplify : aexp → aexp 60 | | (aexp.add (aexp.num 0) e₂) := simplify e₂ 61 | | (aexp.add e₁ (aexp.num 0)) := simplify e₁ 62 | | (aexp.sub e₁ (aexp.num 0)) := simplify e₁ 63 | | (aexp.mul (aexp.num 0) e₂) := aexp.num 0 64 | | (aexp.mul e₁ (aexp.num 0)) := aexp.num 0 65 | | (aexp.mul (aexp.num 1) e₂) := simplify e₂ 66 | | (aexp.mul e₁ (aexp.num 1)) := simplify e₁ 67 | | (aexp.div (aexp.num 0) e₂) := aexp.num 0 68 | | (aexp.div e₁ (aexp.num 0)) := aexp.num 0 69 | | (aexp.div e₁ (aexp.num 1)) := simplify e₁ 70 | -- catch-all cases below 71 | | (aexp.num i) := aexp.num i 72 | | (aexp.var x) := aexp.var x 73 | | (aexp.add e₁ e₂) := aexp.add (simplify e₁) (simplify e₂) 74 | | (aexp.sub e₁ e₂) := aexp.sub (simplify e₁) (simplify e₂) 75 | | (aexp.mul e₁ e₂) := aexp.mul (simplify e₁) (simplify e₂) 76 | | (aexp.div e₁ e₂) := aexp.div (simplify e₁) (simplify e₂) 77 | 78 | /- 2.3. State the correctness lemma for `simplify`, namely that the simplified 79 | expression should have the same semantics, with respect to `eval`, as the 80 | original expression. -/ 81 | 82 | lemma simplify_correct (env : string → ℤ) (e : aexp) : 83 | eval env (simplify e) = eval env e := 84 | sorry 85 | 86 | 87 | /- Question 3: λ-Terms -/ 88 | 89 | /- We start by declaring three new opaque types. -/ 90 | 91 | constants α β γ : Type 92 | 93 | /- 3.1. Complete the following definitions, by replacing the `sorry` markers by 94 | terms of the expected type. 95 | 96 | Hint: You can use `_` as a placeholder while constructing a term. By hovering 97 | over `_`, you will see the current logical context. -/ 98 | 99 | def I : α → α := 100 | λa, a 101 | 102 | def K : α → β → α := 103 | λa b, a 104 | 105 | def C : (α → β → γ) → β → α → γ := 106 | λg b a, g a b 107 | 108 | def proj_1st : α → α → α := 109 | λx y, x 110 | 111 | -- please give a different answer than for `proj_1st` 112 | def proj_2nd : α → α → α := 113 | λx y, y 114 | 115 | def some_nonsense : (α → β → γ) → α → (α → γ) → β → γ := 116 | λg a f b, g a b 117 | 118 | /- 3.2. Show the typing derivation for your definition of `C` above. -/ 119 | 120 | /- Let Γ := g : α → β → γ, b : β, a : α. We have 121 | 122 | –––––––––––––––––– Var –––––––––– Var 123 | Γ ⊢ g : α → β → γ Γ ⊢ a : α 124 | –––––––––––––––––––––––––––––––––––– App –––––––––– Var 125 | Γ ⊢ g a : β → γ Γ ⊢ b : β 126 | –––––––––––––––––––––––––––––––––––––––––––––––––––––– App 127 | Γ ⊢ g a b : γ 128 | ––––––––––––––––––––––––––––––––––––––––––– Lam 129 | g : α → β → γ, b : β ⊢ (λa : α, g a b) : γ 130 | –––––––––––––––––––––––––––––––––––––––––––––– Lam 131 | g : α → β → γ ⊢ (λ(b : β) (a : α), g a b) : γ 132 | ––––––––––––––––––––––––––––––––––––––––––––––– Lam 133 | ⊢ (λ(g : α → β → γ) (b : β) (a : α), g a b) : γ 134 | -/ 135 | 136 | end LoVe 137 | -------------------------------------------------------------------------------- /lean/love01_definitions_and_lemma_statements_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 1: Definitions and Lemma Statements -/ 2 | 3 | /- Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 4 | 5 | import .lovelib 6 | 7 | namespace LoVe 8 | 9 | 10 | /- Question 1: Snoc -/ 11 | 12 | /- 1.1. Define the function `snoc` that appends a single element to the end of 13 | a list. -/ 14 | 15 | def snoc {α : Type} : list α → α → list α 16 | := sorry 17 | 18 | /- 1.2. Convince yourself that your definition of `snoc` works by testing it on 19 | a few examples. -/ 20 | 21 | -- invoke `#reduce` or `#eval` here 22 | 23 | 24 | /- Question 2: Map -/ 25 | 26 | /- 2.1. Define a generic `map` function that applies a function to every element 27 | in a list. -/ 28 | 29 | def map {α : Type} {β : Type} (f : α → β) : list α → list β 30 | := sorry 31 | 32 | /- 2.2. State the so-called functiorial properties of `map` as lemmas. 33 | Schematically: 34 | 35 | map (λx, x) xs = xs 36 | map (λx, g (f x)) xs = map g (map f xs) 37 | 38 | Make sure to state the second law as generally as possible, for arbitrary 39 | types. -/ 40 | 41 | -- enter your lemma statements here 42 | 43 | 44 | /- Question 3: λ-Terms -/ 45 | 46 | /- We start by declaring four new opaque types. -/ 47 | 48 | constants α β γ δ : Type 49 | 50 | /- 3.1. Complete the following definitions, by providing terms with the expected 51 | type. -/ 52 | 53 | def B : (α → β) → (γ → α) → γ → β := 54 | sorry 55 | 56 | def S : (α → β → γ) → (α → β) → α → γ := 57 | sorry 58 | 59 | def more_nonsense : ((α → β) → γ → δ) → γ → β → δ := 60 | sorry 61 | 62 | def even_more_nonsense : (α → β) → (α → γ) → α → β → γ := 63 | sorry 64 | 65 | /- 3.2 (**optional**). Complete the following definition. 66 | 67 | Note: Peirce is pronounced like the English word "purse". -/ 68 | 69 | def weak_peirce : ((((α → β) → α) → α) → β) → β := 70 | sorry 71 | 72 | end LoVe 73 | -------------------------------------------------------------------------------- /lean/love01_definitions_and_lemma_statements_homework_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 1: Definitions and Lemma Statements -/ 2 | 3 | /- Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 4 | 5 | import .lovelib 6 | 7 | namespace LoVe 8 | 9 | 10 | /- Question 3: λ-Terms -/ 11 | 12 | /- We start by declaring four new opaque types. -/ 13 | 14 | constants α β γ δ : Type 15 | 16 | /- 3.2 (**optional**). Complete the following definition. 17 | 18 | Note: Peirce is pronounced like the English word "purse". -/ 19 | 20 | def weak_peirce : ((((α → β) → α) → α) → β) → β := 21 | λf, f (λg, g (λa, f (λx, a))) 22 | 23 | end LoVe 24 | -------------------------------------------------------------------------------- /lean/love02_tactical_proofs_demo.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Demo 2: Tactical Proofs -/ 2 | 3 | import .love01_definitions_and_lemma_statements_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Tactic Mode -/ 9 | 10 | lemma fst_of_two_props : 11 | ∀a b : Prop, a → b → a := 12 | begin 13 | intros a b, 14 | intros ha hb, 15 | apply ha 16 | end 17 | 18 | lemma fst_of_two_props₂ (a b : Prop) (ha : a) (hb : b) : 19 | a := 20 | begin 21 | apply ha 22 | end 23 | 24 | 25 | /- Basic Tactics -/ 26 | 27 | lemma prop_comp (a b c : Prop) (hab : a → b) (hbc : b → c) : 28 | a → c := 29 | begin 30 | intro ha, 31 | apply hbc, 32 | apply hab, 33 | exact ha 34 | end 35 | 36 | lemma prop_comp₂ (a b c : Prop) (hab : a → b) (hbc : b → c) : 37 | a → c := 38 | begin 39 | intro, 40 | apply hbc, 41 | apply hab, 42 | assumption 43 | end 44 | 45 | lemma α_example {α β : Type} (f : α → β) : 46 | (λx, f x) = (λy, f y) := 47 | by refl 48 | 49 | lemma β_example {α β : Type} (f : α → β) (a : α) : 50 | (λx, f x) a = f a := 51 | by refl 52 | 53 | def double (n : ℕ) : ℕ := 54 | n + n 55 | 56 | lemma δ_example (m : ℕ) : 57 | double m = m + m := 58 | by refl 59 | 60 | lemma ζ_example : 61 | (let n : ℕ := 2 in n + n) = 4 := 62 | by refl 63 | 64 | lemma η_example {α β : Type} (f : α → β) : 65 | (λx, f x) = f := 66 | by refl 67 | 68 | lemma ι_example {α β : Type} (a : α) (b : β) : 69 | prod.fst (a, b) = a := 70 | by refl 71 | 72 | lemma nat_exists_double_iden : 73 | ∃n : ℕ, double n = n := 74 | begin 75 | use 0, 76 | refl 77 | end 78 | 79 | 80 | /- Proofs about Logical Connectives and Quantifiers -/ 81 | 82 | -- introduction rules 83 | #check true.intro 84 | #check not.intro 85 | #check and.intro 86 | #check or.intro_left 87 | #check or.intro_right 88 | #check iff.intro 89 | #check exists.intro 90 | 91 | -- elimination rules 92 | #check false.elim 93 | #check and.elim_left 94 | #check and.elim_right 95 | #check or.elim 96 | #check iff.elim_left 97 | #check iff.elim_right 98 | #check exists.elim 99 | 100 | -- definition of `¬` and related lemmas 101 | #print not 102 | #check classical.em 103 | #check classical.by_contradiction 104 | 105 | lemma and_swap (a b : Prop) : 106 | a ∧ b → b ∧ a := 107 | begin 108 | intro hab, 109 | apply and.intro, 110 | apply and.elim_right, 111 | exact hab, 112 | apply and.elim_left, 113 | exact hab 114 | end 115 | 116 | lemma and_swap₂ : 117 | ∀a b : Prop, a ∧ b → b ∧ a := 118 | begin 119 | intros a b hab, 120 | apply and.intro, 121 | { exact and.elim_right hab }, 122 | { exact and.elim_left hab } 123 | end 124 | 125 | lemma or_swap (a b : Prop) : 126 | a ∨ b → b ∨ a := 127 | begin 128 | intros hab, 129 | apply or.elim hab, 130 | { intros ha, 131 | exact or.intro_right _ ha }, 132 | { intros hb, 133 | exact or.intro_left _ hb } 134 | end 135 | 136 | lemma modus_ponens (a b : Prop) : 137 | (a → b) → a → b := 138 | begin 139 | intros hab ha, 140 | apply hab, 141 | exact ha 142 | end 143 | 144 | lemma modus_ponens₂ (a b : Prop) (hab : a → b) (hp : a) : 145 | b := 146 | begin 147 | apply hab, 148 | assumption 149 | end 150 | 151 | lemma proof_of_negation (a : Prop) : 152 | a → ¬¬ a := 153 | begin 154 | intro ha, 155 | apply not.intro, 156 | intro hna, 157 | apply hna, 158 | exact ha 159 | end 160 | 161 | lemma proof_of_negation₂ (a : Prop) : 162 | a → ¬¬ a := 163 | begin 164 | intros ha hna, 165 | apply hna, 166 | exact ha 167 | end 168 | 169 | lemma proof_by_contradiction (a : Prop) : 170 | ¬¬ a → a := 171 | begin 172 | intro hnna, 173 | apply classical.by_contradiction, 174 | exact hnna 175 | end 176 | 177 | lemma nat_exists_double_iden₂ : 178 | ∃n : ℕ, double n = n := 179 | begin 180 | apply exists.intro 0, 181 | refl 182 | end 183 | 184 | 185 | /- Rewriting Tactics -/ 186 | 187 | lemma proof_of_negation₃ (a : Prop) : 188 | a → ¬¬ a := 189 | begin 190 | dunfold not, 191 | intro ha, 192 | apply not.intro, 193 | intro hna, 194 | apply hna, 195 | exact ha 196 | end 197 | 198 | 199 | /- Proofs about Natural Numbers -/ 200 | 201 | lemma add_zero (n : ℕ) : 202 | add 0 n = n := 203 | begin 204 | induction n, 205 | { refl }, 206 | { simp [add, n_ih] } 207 | end 208 | 209 | lemma add_zero₂ (n : ℕ) : 210 | add 0 n = n := 211 | begin 212 | induction n, 213 | case nat.zero { 214 | refl }, 215 | case nat.succ : m ih { 216 | simp [add, ih] } 217 | end 218 | 219 | lemma add_zero₃ (n : ℕ) : 220 | add 0 n = n := 221 | by induction n; simp [add, *] 222 | 223 | lemma add_succ (m n : ℕ) : 224 | add (nat.succ m) n = nat.succ (add m n) := 225 | begin 226 | induction n, 227 | case nat.zero { 228 | refl }, 229 | case nat.succ : m ih { 230 | simp [add, ih] } 231 | end 232 | 233 | lemma add_comm (m n : ℕ) : 234 | add m n = add n m := 235 | begin 236 | induction n, 237 | case nat.zero { 238 | simp [add, add_zero] }, 239 | case nat.succ : m ih { 240 | simp [add, add_succ, ih] } 241 | end 242 | 243 | lemma add_assoc (l m n : ℕ) : 244 | add (add l m) n = add l (add m n) := 245 | begin 246 | induction n, 247 | case nat.zero { 248 | refl }, 249 | case nat.succ : m ih { 250 | simp [add, ih] } 251 | end 252 | 253 | -- type classes (useful for `ac_refl` below) 254 | instance : is_commutative ℕ add := ⟨add_comm⟩ 255 | instance : is_associative ℕ add := ⟨add_assoc⟩ 256 | 257 | lemma mul_add (l m n : ℕ) : 258 | mul l (add m n) = add (mul l m) (mul l n) := 259 | begin 260 | induction n, 261 | case nat.zero { 262 | refl }, 263 | case nat.succ : m ih { 264 | simp [add, mul, ih], 265 | ac_refl } 266 | end 267 | 268 | 269 | /- Management Tactics -/ 270 | 271 | lemma cleanup_example (a b c : Prop) (ha : a) (hb : b) 272 | (hab : a → b) (hbc : b → c) : 273 | c := 274 | begin 275 | revert a b c ha hb hab hbc, 276 | intros x y z hx hy hxy hyz, 277 | clear hx hxy x, 278 | apply hyz, 279 | clear hyz z, 280 | rename hy h, 281 | exact h 282 | end 283 | 284 | end LoVe 285 | -------------------------------------------------------------------------------- /lean/love02_tactical_proofs_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 2: Tactical Proofs -/ 2 | 3 | import .love02_tactical_proofs_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Connectives and Quantifiers -/ 9 | 10 | /- 1.1. Carry out the following proofs using basic tactics. -/ 11 | 12 | lemma I (a : Prop) : 13 | a → a := 14 | sorry 15 | 16 | lemma K (a b : Prop) : 17 | a → b → b := 18 | sorry 19 | 20 | lemma C (a b c : Prop) : 21 | (a → b → c) → b → a → c := 22 | sorry 23 | 24 | lemma proj_1st (a : Prop) : 25 | a → a → a := 26 | sorry 27 | 28 | -- please give a different answer than for `proj_1st` 29 | lemma proj_2nd (a : Prop) : 30 | a → a → a := 31 | sorry 32 | 33 | lemma some_nonsense (a b c : Prop) : 34 | (a → b → c) → a → (a → c) → b → c := 35 | sorry 36 | 37 | /- 1.2. Prove the contraposition rule using basic tactics. -/ 38 | 39 | lemma contrapositive (a b : Prop) : 40 | (a → b) → ¬ b → ¬ a := 41 | sorry 42 | 43 | /- 1.3. Prove the distributivity of `∀` over `∧` using basic tactics. -/ 44 | 45 | lemma forall_and {α : Type} (p q : α → Prop) : 46 | (∀x, p x ∧ q x) ↔ (∀x, p x) ∧ (∀x, q x) := 47 | sorry 48 | 49 | 50 | /- Question 2: Natural Numbers -/ 51 | 52 | /- 2.1. Prove the following recursive equations on the first argument of the 53 | `mul` operator defined in lecture 1. -/ 54 | 55 | #check mul 56 | 57 | lemma mul_zero (n : ℕ) : 58 | mul 0 n = 0 := 59 | sorry 60 | 61 | lemma mul_succ (m n : ℕ) : 62 | mul (nat.succ m) n = add (mul m n) n := 63 | sorry 64 | 65 | /- 2.2. Prove commutativity and associativity of multiplication using the 66 | `induction` tactic. Choose the induction variable carefully. -/ 67 | 68 | lemma mul_comm (m n : ℕ) : 69 | mul m n = mul n m := 70 | sorry 71 | 72 | lemma mul_assoc (l m n : ℕ) : 73 | mul (mul l m) n = mul l (mul m n) := 74 | := sorry 75 | 76 | /- 2.3. Prove the symmetric variant of `mul_add` using `rw`. To apply 77 | commutativity at a specific position, instantiate the rule by passing some 78 | arguments (e.g., `mul_comm _ l`). -/ 79 | 80 | lemma add_mul (l m n : ℕ) : mul (add l m) n = add (mul n l) (mul n m) := 81 | sorry 82 | 83 | 84 | /- Question 3 (**optional**): Intuitionistic Logic -/ 85 | 86 | /- Intuitionistic logic is extended to classical logic by assuming a classical 87 | axiom. There are several possibilities for the choice of axiom. In this 88 | question, we are concerned with the logical equivalence of three different 89 | axioms: -/ 90 | 91 | def excluded_middle := ∀a : Prop, a ∨ ¬ a 92 | def peirce := ∀a b : Prop, ((a → b) → a) → a 93 | def double_negation := ∀a : Prop, ¬¬ a → a 94 | 95 | /- For the proofs below, please avoid using lemmas from Lean's `classical` 96 | namespace, because this would defeat the purpose of the exercise. -/ 97 | 98 | /- 3.1 (**optional**). Prove the following implication using tactics. 99 | 100 | Hint: You will need `or.elim` and `false.elim`. -/ 101 | 102 | lemma peirce_of_em : excluded_middle → peirce := 103 | sorry 104 | 105 | /- 3.2 (**optional**). Prove the following implication using tactics. 106 | 107 | Hint: Try instantiating `b` with `false` in Peirce's law. -/ 108 | 109 | lemma dn_of_peirce : peirce → double_negation := 110 | sorry 111 | 112 | /- We leave the missing implication for the homework: -/ 113 | 114 | namespace sorry_lemmas 115 | 116 | lemma em_of_dn : 117 | double_negation → excluded_middle := 118 | sorry 119 | 120 | end sorry_lemmas 121 | 122 | end LoVe 123 | -------------------------------------------------------------------------------- /lean/love02_tactical_proofs_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 2: Tactical Proofs -/ 2 | 3 | import .love02_tactical_proofs_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Connectives and Quantifiers -/ 9 | 10 | /- 1.1. Carry out the following proofs using basic tactics. -/ 11 | 12 | lemma I (a : Prop) : 13 | a → a := 14 | begin 15 | intro ha, 16 | exact ha 17 | end 18 | 19 | lemma K (a b : Prop) : 20 | a → b → b := 21 | begin 22 | intros ha hb, 23 | exact hb 24 | end 25 | 26 | lemma C (a b c : Prop) : 27 | (a → b → c) → b → a → c := 28 | begin 29 | intros hg hb ha, 30 | apply hg, 31 | exact ha, 32 | exact hb 33 | end 34 | 35 | lemma proj_1st (a : Prop) : 36 | a → a → a := 37 | begin 38 | intros ha ha', 39 | exact ha 40 | end 41 | 42 | -- please give a different answer than for `proj_1st` 43 | lemma proj_2nd (a : Prop) : 44 | a → a → a := 45 | begin 46 | intros ha ha', 47 | exact ha' 48 | end 49 | 50 | lemma some_nonsense (a b c : Prop) : 51 | (a → b → c) → a → (a → c) → b → c := 52 | begin 53 | intros hg ha hf hb, 54 | apply hg, 55 | exact ha, 56 | exact hb 57 | end 58 | 59 | /- 1.2. Prove the contraposition rule using basic tactics. -/ 60 | 61 | lemma contrapositive (a b : Prop) : 62 | (a → b) → ¬ b → ¬ a := 63 | begin 64 | intros hab hnb ha, 65 | apply hnb, 66 | apply hab, 67 | apply ha 68 | end 69 | 70 | /- 1.3. Prove the distributivity of `∀` over `∧` using basic tactics. -/ 71 | 72 | lemma forall_and {α : Type} (p q : α → Prop) : 73 | (∀x, p x ∧ q x) ↔ (∀x, p x) ∧ (∀x, q x) := 74 | begin 75 | apply iff.intro, 76 | { intro h, 77 | apply and.intro, 78 | { intro x, 79 | apply and.elim_left, 80 | apply h }, 81 | { intro x, 82 | apply and.elim_right, 83 | apply h } }, 84 | { intros h x, 85 | apply and.intro, 86 | { apply and.elim_left h }, 87 | { apply and.elim_right h } } 88 | end 89 | 90 | 91 | /- Question 2: Natural Numbers -/ 92 | 93 | /- 2.1. Prove the following recursive equations on the first argument of the 94 | `mul` operator defined in lecture 1. -/ 95 | 96 | #check mul 97 | 98 | lemma mul_zero (n : ℕ) : 99 | mul 0 n = 0 := 100 | begin 101 | induction n, 102 | { refl }, 103 | { simp [add, mul, n_ih] } 104 | end 105 | 106 | lemma mul_succ (m n : ℕ) : 107 | mul (nat.succ m) n = add (mul m n) n := 108 | begin 109 | induction n, 110 | { refl }, 111 | { simp [add, add_succ, add_assoc, mul, n_ih] } 112 | end 113 | 114 | /- 2.2. Prove commutativity and associativity of multiplication using the 115 | `induction` tactic. Choose the induction variable carefully. -/ 116 | 117 | lemma mul_comm (m n : ℕ) : 118 | mul m n = mul n m := 119 | begin 120 | induction m, 121 | { simp [mul, mul_zero] }, 122 | { simp [mul, mul_succ, m_ih], ac_refl } 123 | end 124 | 125 | lemma mul_assoc (l m n : ℕ) : 126 | mul (mul l m) n = mul l (mul m n) := 127 | begin 128 | induction n, 129 | { refl }, 130 | { simp [mul, mul_add, n_ih] } 131 | end 132 | 133 | /- 2.3. Prove the symmetric variant of `mul_add` using `rw`. To apply 134 | commutativity at a specific position, instantiate the rule by passing some 135 | arguments (e.g., `mul_comm _ l`). -/ 136 | 137 | lemma add_mul (l m n : ℕ) : 138 | mul (add l m) n = add (mul n l) (mul n m) := 139 | begin 140 | rw mul_comm _ n, 141 | rw mul_add 142 | end 143 | 144 | 145 | /- Question 3 (**optional**): Intuitionistic Logic -/ 146 | 147 | /- Intuitionistic logic is extended to classical logic by assuming a classical 148 | axiom. There are several possibilities for the choice of axiom. In this 149 | question, we are concerned with the logical equivalence of three different 150 | axioms: -/ 151 | 152 | def excluded_middle := ∀a : Prop, a ∨ ¬ a 153 | def peirce := ∀a b : Prop, ((a → b) → a) → a 154 | def double_negation := ∀a : Prop, ¬¬ a → a 155 | 156 | /- For the proofs below, please avoid using lemmas from Lean's `classical` 157 | namespace, because this would defeat the purpose of the exercise. -/ 158 | 159 | /- 3.1 (**optional**). Prove the following implication using tactics. 160 | 161 | Hint: You will need `or.elim` and `false.elim`. -/ 162 | 163 | lemma peirce_of_em : 164 | excluded_middle → peirce := 165 | begin 166 | simp [excluded_middle, peirce], 167 | dunfold not, 168 | intro hem, 169 | intros a b haba, 170 | apply or.elim (hem a), 171 | { intro, 172 | assumption }, 173 | { intro hna, 174 | apply haba, 175 | intro ha, 176 | apply false.elim, 177 | apply hna, 178 | assumption } 179 | end 180 | 181 | /- 3.2 (**optional**). Prove the following implication using tactics. 182 | 183 | Hint: Try instantiating `b` with `false` in Peirce's law. -/ 184 | 185 | lemma dn_of_peirce : 186 | peirce → double_negation := 187 | begin 188 | simp [peirce, double_negation], 189 | intros hpeirce a hnna, 190 | apply hpeirce a false, 191 | intro hna, 192 | apply false.elim, 193 | apply hnna, 194 | exact hna 195 | end 196 | 197 | /- We leave the missing implication for the homework: -/ 198 | 199 | namespace sorry_lemmas 200 | 201 | lemma em_of_dn : 202 | double_negation → excluded_middle := 203 | sorry 204 | 205 | end sorry_lemmas 206 | 207 | end LoVe 208 | -------------------------------------------------------------------------------- /lean/love02_tactical_proofs_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 2: Tactical Proofs -/ 2 | 3 | import .love02_tactical_proofs_exercise 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Connectives and Quantifiers -/ 9 | 10 | /- 1.1. Complete the following proofs using basic tactics. -/ 11 | 12 | lemma B (a b c : Prop) : 13 | (a → b) → (c → a) → c → b := 14 | sorry 15 | 16 | lemma S (a b c : Prop) : 17 | (a → b → c) → (a → b) → a → c := 18 | sorry 19 | 20 | lemma more_nonsense (a b c d : Prop) : 21 | ((a → b) → c → d) → c → b → d := 22 | sorry 23 | 24 | lemma even_more_nonsense (a b c : Prop) : 25 | (a → b) → (a → c) → a → b → c := 26 | sorry 27 | 28 | /- 1.2. Prove the following lemma. -/ 29 | 30 | lemma weak_peirce (a b : Prop) : 31 | ((((a → b) → a) → a) → b) → b := 32 | sorry 33 | 34 | 35 | /- Question 2 (**optional**): Logical Connectives -/ 36 | 37 | /- 2.1 (**optional**). Prove the following property about double negation. 38 | 39 | Hint: You will need to apply the elimination rule for `false` at a key point in 40 | the proof. -/ 41 | 42 | lemma herman (p : Prop) : ¬¬ (¬¬ p → p) := 43 | sorry 44 | 45 | /- 2.2 (**optional**). Prove the missing link in our chain of classical axiom 46 | implications. 47 | 48 | Hint: You will need to apply the double negation hypothesis for `p ∨ ¬ p`. You 49 | will also need the left and right introduction rules for `or` at some point. -/ 50 | 51 | #check excluded_middle 52 | #check peirce 53 | #check double_negation 54 | 55 | lemma em_of_dn : double_negation → excluded_middle := 56 | sorry 57 | 58 | /- 2.3 (**optional**). We have proved three of the six possible implications 59 | between `excluded_middle`, `peirce`, and `double_negation`. State and prove the 60 | three missing implications, exploiting the three theorems we already have. -/ 61 | 62 | #check peirce_of_em 63 | #check dn_of_peirce 64 | #check em_of_dn 65 | 66 | -- enter your solution here 67 | 68 | end LoVe 69 | -------------------------------------------------------------------------------- /lean/love02_tactical_proofs_homework_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 2: Tactical Proofs -/ 2 | 3 | import .love02_tactical_proofs_exercise 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 2 (**optional**): Logical Connectives -/ 9 | 10 | /- 2.1 (**optional**). Prove the following property about double negation. 11 | 12 | Hint: You will need to apply the elimination rule for `false` at a key point in 13 | the proof. -/ 14 | 15 | lemma herman (p : Prop) : ¬¬ (¬¬ p → p) := 16 | begin 17 | intro hnnnpp, 18 | apply hnnnpp, 19 | intro hnnp, 20 | apply false.elim, 21 | apply hnnp, 22 | intro hp, 23 | apply hnnnpp, 24 | intro hnnp, 25 | exact hp 26 | end 27 | 28 | /- 2.2 (**optional**). Prove the missing link in our chain of classical axiom 29 | implications. 30 | 31 | Hint: You will need to apply the double negation hypothesis for `p ∨ ¬ p`. You 32 | will also need the left and right introduction rules for `or` at some point. -/ 33 | 34 | #check excluded_middle 35 | #check peirce 36 | #check double_negation 37 | 38 | lemma em_of_dn : double_negation → excluded_middle := 39 | begin 40 | simp [double_negation, excluded_middle], 41 | intros hdoubleneg p, 42 | apply hdoubleneg, 43 | intro hnponp, 44 | apply hnponp, 45 | apply or.intro_right, 46 | intro hnp, 47 | apply hnponp, 48 | apply or.intro_left, 49 | assumption 50 | end 51 | 52 | /- 2.3 (**optional**). We have proved three of the six possible implications 53 | between `excluded_middle`, `peirce`, and `double_negation`. State and prove the 54 | three missing implications, exploiting the three theorems we already have. -/ 55 | 56 | #check peirce_of_em 57 | #check dn_of_peirce 58 | #check em_of_dn 59 | 60 | lemma dn_imp_peirce : double_negation → peirce := 61 | begin 62 | intro h, 63 | apply peirce_of_em, 64 | apply em_of_dn, 65 | exact h 66 | end 67 | 68 | lemma peirce_imp_em : peirce → excluded_middle := 69 | begin 70 | intro h, 71 | apply em_of_dn, 72 | apply dn_of_peirce, 73 | exact h 74 | end 75 | 76 | lemma em_imp_dn : excluded_middle → double_negation := 77 | begin 78 | intro h, 79 | apply dn_of_peirce, 80 | apply peirce_of_em, 81 | exact h 82 | end 83 | 84 | end LoVe 85 | -------------------------------------------------------------------------------- /lean/love03_structured_proofs_and_proof_terms_demo.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Demo 3: Structured Proofs and Proof Terms -/ 2 | 3 | import .love01_definitions_and_lemma_statements_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Structured Proofs -/ 9 | 10 | lemma add_comm_zero_left (n : ℕ): 11 | add 0 n = add n 0 := 12 | add_comm 0 n 13 | 14 | lemma add_comm_zero_left₂ (n : ℕ): 15 | add 0 n = add n 0 := 16 | by exact add_comm 0 n 17 | 18 | lemma fst_of_two_props : 19 | ∀a b : Prop, a → b → a := 20 | assume a b ha hb, 21 | show a, from ha 22 | 23 | lemma fst_of_two_props₂ (a b : Prop) (ha : a) (hb : b) : 24 | a := 25 | show a, 26 | begin 27 | exact ha 28 | end 29 | 30 | lemma fst_of_two_props₃ (a b : Prop) (ha : a) (hb : b) : 31 | a := 32 | ha 33 | 34 | lemma prop_comp (a b c : Prop) (hab : a → b) (hbc : b → c) : 35 | a → c := 36 | assume ha, 37 | have hb : b := hab ha, 38 | have hc : c := hbc hb, 39 | show c, from hc 40 | 41 | lemma β_example {α β : Type} (f : α → β) (a : α) : 42 | (λx, f x) a = f a := 43 | rfl 44 | 45 | def double (n : ℕ) : ℕ := 46 | n + n 47 | 48 | lemma nat_exists_double_iden : 49 | ∃n : ℕ, double n = n := 50 | exists.intro 0 51 | (show double 0 = 0, from rfl) 52 | 53 | lemma nat_exists_double_iden₂ : 54 | ∃n : ℕ, double n = n := 55 | exists.intro 0 rfl 56 | 57 | lemma nat_exists_double_iden₃ : 58 | ∃n : ℕ, double n = n := 59 | exists.intro 0 (by refl) 60 | 61 | lemma and_swap (a b : Prop) : 62 | a ∧ b → b ∧ a := 63 | assume hab : a ∧ b, 64 | have ha : a := and.elim_left hab, 65 | have hb : b := and.elim_right hab, 66 | show b ∧ a, from and.intro hb ha 67 | 68 | lemma and_swap₂ (a b : Prop) : 69 | a ∧ b → b ∧ a := 70 | assume hab : a ∧ b, 71 | have ha : a := and.elim_left hab, 72 | have hb : b := and.elim_right hab, 73 | begin 74 | apply and.intro, 75 | { exact hb }, 76 | { exact ha } 77 | end 78 | 79 | lemma or_swap (a b : Prop) : 80 | a ∨ b → b ∨ a := 81 | assume hab : a ∨ b, 82 | show b ∨ a, from or.elim hab 83 | (assume ha, 84 | show b ∨ a, from or.intro_right b ha) 85 | (assume hb, 86 | show b ∨ a, from or.intro_left a hb) 87 | 88 | lemma modus_ponens (a b : Prop) : 89 | (a → b) → a → b := 90 | assume (hab : a → b) (ha : a), 91 | show b, from hab ha 92 | 93 | lemma proof_of_negation (a : Prop) : 94 | a → ¬¬ a := 95 | assume ha hna, 96 | show false, from hna ha 97 | 98 | #check classical.by_contradiction 99 | 100 | lemma proof_by_contradiction (a : Prop) : 101 | ¬¬ a → a := 102 | assume hnna, 103 | show a, from classical.by_contradiction hnna 104 | 105 | lemma exists_or {α : Type} (p q : α → Prop) : 106 | (∃x, p x ∨ q x) ↔ (∃x, p x) ∨ (∃x, q x) := 107 | iff.intro 108 | (assume hxpq, 109 | match hxpq with 110 | | Exists.intro x hpq := 111 | match hpq with 112 | | or.inl hp := or.intro_left _ (exists.intro x hp) 113 | | or.inr hq := or.intro_right _ (exists.intro x hq) 114 | end 115 | end) 116 | (assume hxpq, 117 | match hxpq with 118 | | or.inl hxp := 119 | match hxp with 120 | | Exists.intro x hp := exists.intro x (or.intro_left _ hp) 121 | end 122 | | or.inr hxq := 123 | match hxq with 124 | | Exists.intro x hq := exists.intro x (or.intro_right _ hq) 125 | end 126 | end) 127 | 128 | 129 | /- Calculational Proofs -/ 130 | 131 | lemma two_mul_example (m n : ℕ) : 132 | 2 * m + n = m + n + m := 133 | calc 2 * m + n = (m + m) + n : by rw two_mul 134 | ... = m + n + m : by ac_refl 135 | 136 | lemma two_mul_example₂ (m n : ℕ) : 137 | 2 * m + n = m + n + m := 138 | have h₁ : 2 * m + n = (m + m) + n := by rw two_mul, 139 | have h₂ : (m + m) + n = m + n + m := by ac_refl, 140 | show _, from eq.trans h₁ h₂ 141 | 142 | 143 | /- Induction by Pattern Matching -/ 144 | 145 | lemma add_zero : 146 | ∀n : ℕ, add 0 n = n 147 | | 0 := by refl 148 | | (nat.succ m) := by simp [add, add_zero m] 149 | 150 | lemma add_succ : 151 | ∀m n : ℕ, add (nat.succ m) n = nat.succ (add m n) 152 | | m 0 := by refl 153 | | m (nat.succ n) := by simp [add, add_succ m n] 154 | 155 | lemma add_comm : 156 | ∀m n : ℕ, add m n = add n m 157 | | m 0 := by simp [add, add_zero] 158 | | m (nat.succ n) := by simp [add, add_succ, add_comm m n] 159 | 160 | lemma add_comm₂ : 161 | ∀m n : ℕ, add m n = add n m 162 | | m 0 := by simp [add, add_zero] 163 | | m (nat.succ n) := 164 | have ih : _ := add_comm₂ m n, 165 | by simp [add, add_succ, ih] 166 | 167 | lemma add_assoc : 168 | ∀l m n : ℕ, add (add l m) n = add l (add m n) 169 | | l m 0 := by refl 170 | | l m (nat.succ n) := by simp [add, add_assoc l m n] 171 | 172 | -- type classes (useful for `ac_refl` below) 173 | instance : is_commutative ℕ add := ⟨add_comm⟩ 174 | instance : is_associative ℕ add := ⟨add_assoc⟩ 175 | 176 | lemma mul_add (l m : ℕ) : 177 | ∀n : ℕ, mul l (add m n) = add (mul l m) (mul l n) 178 | | 0 := by refl 179 | | (nat.succ l) := by simp [add, mul, mul_add l]; ac_refl 180 | 181 | 182 | /- The Curry–Howard Correspondence -/ 183 | 184 | lemma and_swap₃ (a b : Prop) : 185 | a ∧ b → b ∧ a := 186 | λhab : a ∧ b, and.intro (and.elim_right hab) (and.elim_left hab) 187 | 188 | lemma and_swap₄ (a b : Prop) : 189 | a ∧ b → b ∧ a := 190 | begin 191 | intro hab, 192 | apply and.intro, 193 | apply and.elim_right, 194 | exact hab, 195 | apply and.elim_left, 196 | exact hab 197 | end 198 | 199 | #print and_swap₃ 200 | #print and_swap₄ 201 | 202 | 203 | /- Forward Tactics -/ 204 | 205 | lemma prop_comp₂ (a b c : Prop) (hab : a → b) (hbc : b → c) : 206 | a → c := 207 | begin 208 | intro ha, 209 | have hb : b := hab ha, 210 | have hc : c := hbc hb, 211 | exact hc 212 | end 213 | 214 | end LoVe 215 | -------------------------------------------------------------------------------- /lean/love03_structured_proofs_and_proof_terms_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 3: Structured Proofs and Proof Terms -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Chain of Equalities -/ 9 | 10 | /- 1.1. Write the following proof using `calc`. 11 | 12 | (a + b) * (a + b) 13 | = a * (a + b) + b * (a + b) 14 | = a * a + a * b + b * a + b * b 15 | = a * a + a * b + a * b + b * b 16 | = a * a + 2 * a * b + b * b 17 | 18 | Hint: You might need `rw`, `simp`, `ac_refl`, and the lemmas `mul_add`, 19 | `add_mul`, and `two_mul`. -/ 20 | 21 | lemma binomial_square (a b : ℕ) : 22 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 23 | sorry 24 | 25 | /- 1.2. Prove the same argument again, this time as a structured proof. Try to 26 | reuse as much of the above proof idea as possible. -/ 27 | 28 | lemma binomial_square₂ (a b : ℕ) : 29 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 30 | sorry 31 | 32 | /- 1.3 (**optional**). Prove the same lemma again, this time using tactics. -/ 33 | 34 | lemma binomial_square₃ (a b : ℕ) : 35 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 36 | begin 37 | sorry 38 | end 39 | 40 | 41 | /- Question 2: Connectives and Quantifiers -/ 42 | 43 | /- 2.1. Supply structured proofs of the following lemmas. -/ 44 | 45 | lemma I (a : Prop) : 46 | a → a := 47 | sorry 48 | 49 | lemma K (a b : Prop) : 50 | a → b → b := 51 | sorry 52 | 53 | lemma C (a b c : Prop) : 54 | (a → b → c) → b → a → c := 55 | sorry 56 | 57 | lemma proj_1st (a : Prop) : 58 | a → a → a := 59 | sorry 60 | 61 | -- please give a different answer than for `proj_1st` 62 | lemma proj_2nd (a : Prop) : 63 | a → a → a := 64 | sorry 65 | 66 | lemma some_nonsense (a b c : Prop) : 67 | (a → b → c) → a → (a → c) → b → c := 68 | sorry 69 | 70 | /- 2.2. Supply a structured proof of the contraposition rule. -/ 71 | 72 | lemma contrapositive (a b : Prop) : 73 | (a → b) → ¬ b → ¬ a := 74 | sorry 75 | 76 | end LoVe 77 | -------------------------------------------------------------------------------- /lean/love03_structured_proofs_and_proof_terms_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 3: Structured Proofs and Proof Terms -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Chain of Equalities -/ 9 | 10 | /- 1.1. Write the following proof using `calc`. 11 | 12 | (a + b) * (a + b) 13 | = a * (a + b) + b * (a + b) 14 | = a * a + a * b + b * a + b * b 15 | = a * a + a * b + a * b + b * b 16 | = a * a + 2 * a * b + b * b 17 | 18 | Hint: You might need `rw`, `simp`, `ac_refl`, and the lemmas `mul_add`, 19 | `add_mul`, and `two_mul`. -/ 20 | 21 | lemma binomial_square (a b : ℕ) : 22 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 23 | calc (a + b) * (a + b) = a * (a + b) + b * (a + b) : 24 | by simp [add_mul] 25 | ... = a * a + a * b + b * a + b * b : 26 | by simp [mul_add] 27 | ... = a * a + a * b + a * b + b * b : 28 | by ac_refl 29 | ... = a * a + 2 * a * b + b * b : 30 | by simp [two_mul, add_mul]; ac_refl 31 | 32 | /- 1.2. Prove the same argument again, this time as a structured proof. Try to 33 | reuse as much of the above proof idea as possible. -/ 34 | 35 | lemma binomial_square₂ (a b : ℕ) : 36 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 37 | have h1 : (a + b) * (a + b) = a * (a + b) + b * (a + b) := 38 | by simp [add_mul], 39 | have h2 : a * (a + b) + b * (a + b) = a * a + a * b + b * a + b * b := 40 | by simp [mul_add], 41 | have h3 : a * a + a * b + b * a + b * b = a * a + a * b + a * b + b * b := 42 | by ac_refl, 43 | have h4 : a * a + a * b + a * b + b * b = a * a + 2 * a * b + b * b := 44 | by simp [two_mul, add_mul]; ac_refl, 45 | show _, 46 | by rw [h1, h2, h3, h4] 47 | 48 | /- 1.3 (**optional**). Prove the same lemma again, this time using tactics. -/ 49 | 50 | lemma binomial_square₃ (a b : ℕ) : 51 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 52 | begin 53 | simp [add_mul, mul_add, two_mul], 54 | ac_refl 55 | end 56 | 57 | 58 | /- Question 2: Connectives and Quantifiers -/ 59 | 60 | /- 2.1. Supply structured proofs of the following lemmas. -/ 61 | 62 | lemma I (a : Prop) : 63 | a → a := 64 | assume ha, 65 | show a, from ha 66 | 67 | lemma K (a b : Prop) : 68 | a → b → b := 69 | assume ha hb, 70 | show b, from hb 71 | 72 | lemma C (a b c : Prop) : 73 | (a → b → c) → b → a → c := 74 | assume hg hb ha, 75 | show c, from hg ha hb 76 | 77 | lemma proj_1st (a : Prop) : 78 | a → a → a := 79 | assume ha ha', 80 | show a, from ha 81 | 82 | -- please give a different answer than for `proj_1st` 83 | lemma proj_2nd (a : Prop) : 84 | a → a → a := 85 | assume ha ha', 86 | show a, from ha' 87 | 88 | lemma some_nonsense (a b c : Prop) : 89 | (a → b → c) → a → (a → c) → b → c := 90 | assume hg ha hf hb, 91 | have hc : c := hf ha, 92 | show c, from hc 93 | 94 | /- 2.2. Supply a structured proof of the contraposition rule. -/ 95 | 96 | lemma contrapositive (a b : Prop) : 97 | (a → b) → ¬ b → ¬ a := 98 | assume hab hnb ha, 99 | have hb : b := hab ha, 100 | show false, from hnb hb 101 | 102 | end LoVe 103 | -------------------------------------------------------------------------------- /lean/love03_structured_proofs_and_proof_terms_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 3: Structured Proofs and Proof Terms -/ 2 | 3 | import .love02_tactical_proofs_exercise_sheet 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Logical Connectives and Quantifiers -/ 9 | 10 | /- 1.1. Supply a structured proof of the distributivity of `∀` over `∧`. -/ 11 | 12 | #check forall_and 13 | 14 | lemma forall_and₂ {α : Type} (p q : α → Prop) : 15 | (∀x, p x ∧ q x) ↔ (∀x, p x) ∧ (∀x, q x) := 16 | sorry 17 | 18 | /- 1.2. We have proved or stated three of the six possible implications between 19 | `excluded_middle`, `peirce`, and `double_negation`. Prove the three missing 20 | implications using proof terms, exploiting the three theorems we already 21 | have. -/ 22 | 23 | #check peirce_of_em 24 | #check dn_of_peirce 25 | #check sorry_lemmas.em_of_dn 26 | 27 | lemma peirce_of_dn : 28 | double_negation → peirce := 29 | sorry 30 | 31 | lemma em_of_peirce : 32 | peirce → excluded_middle := 33 | sorry 34 | 35 | lemma dn_of_em : 36 | excluded_middle → double_negation := 37 | sorry 38 | 39 | 40 | /- Question 2: Logic Puzzles -/ 41 | 42 | /- 2.1. Prove the following lemma using tactics. -/ 43 | 44 | lemma weak_peirce : 45 | ∀a b : Prop, ((((a → b) → a) → a) → b) → b := 46 | sorry 47 | 48 | /- 2.2 (**optional**). Prove the same lemma again, this time by providing a 49 | proof term. 50 | 51 | Hint: There is an easy way. -/ 52 | 53 | lemma weak_peirce₂ : 54 | ∀a b : Prop, ((((a → b) → a) → a) → b) → b := 55 | sorry 56 | 57 | /- 2.3 (**optional**). Prove the same lemma again, this time by providing a 58 | structured proof. -/ 59 | 60 | lemma weak_peirce₃ : 61 | ∀a b : Prop, ((((a → b) → a) → a) → b) → b := 62 | sorry 63 | 64 | end LoVe 65 | -------------------------------------------------------------------------------- /lean/love03_structured_proofs_and_proof_terms_homework_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 3: Structured Proofs and Proof Terms -/ 2 | 3 | import .love02_tactical_proofs_exercise_sheet 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 2: Logic Puzzles -/ 9 | 10 | /- 2.2 (**optional**). Prove the same lemma again, this time by providing a 11 | proof term. 12 | 13 | Hint: There is an easy way. -/ 14 | 15 | lemma weak_peirce₂ : 16 | ∀a b : Prop, ((((a → b) → a) → a) → b) → b := 17 | λ(a b : Prop) (habaab : (((a → b) → a) → a) → b), 18 | habaab (λ(habaa : (a → b) → a), 19 | habaa (λ(ha : a), habaab (λ(haba : (a → b) → a), ha))) 20 | 21 | /- The easy way is `#print weak_peirce`. There is an even easier way: to use 22 | `weak_peirce` as the proof of `weak_peirce₂`. -/ 23 | 24 | /- 2.3 (**optional**). Prove the same lemma again, this time by providing a 25 | structured proof. -/ 26 | 27 | lemma weak_peirce₃ : 28 | ∀a b : Prop, ((((a → b) → a) → a) → b) → b := 29 | assume (a b : Prop) (habaab : (((a → b) → a) → a) → b), 30 | show b, from habaab 31 | (assume (habaa : (a → b) → a), 32 | show a, from habaa 33 | (assume (ha : a), 34 | show b, from habaab 35 | (assume (haba : (a → b) → a), 36 | show a, from ha))) 37 | 38 | end LoVe 39 | -------------------------------------------------------------------------------- /lean/love04_functional_programming_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 4: Functional Programming -/ 2 | 3 | import .love04_functional_programming_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Reverse of a List -/ 9 | 10 | /- We define a new accumulator-based version of `reverse`. The first argument 11 | serves as the accumulator. This definition is _tail-recursive_, meaning that 12 | compilers and interpreters can easily optimize the recursion away, resulting in 13 | more efficient code. -/ 14 | 15 | def areverse {α : Type} : list α → list α → list α 16 | | ys [] := ys 17 | | ys (x :: xs) := areverse (x :: ys) xs 18 | 19 | /- 1.1. Our intention is that `areverse [] xs` should be equal to `reverse xs`. 20 | But if we start an induction, we quickly see that the induction hypothesis is 21 | not strong enough. Start by proving the following generalization (using pattern 22 | matching or the `induction` tactic): -/ 23 | 24 | lemma areverse_eq_reverse_append {α : Type} : 25 | ∀ys xs : list α, areverse ys xs = reverse xs ++ ys 26 | := sorry 27 | 28 | /- 1.2. Derive the desired equation. -/ 29 | 30 | lemma areverse_eq_reverse {α : Type} (xs : list α) : 31 | areverse [] xs = reverse xs := 32 | sorry 33 | 34 | /- 1.3. Prove the following property. Hint: A one-line inductionless proof is 35 | possible. -/ 36 | 37 | lemma areverse_areverse {α : Type} (xs : list α) : 38 | areverse [] (areverse [] xs) = xs := 39 | sorry 40 | 41 | 42 | /- Question 2: Drop and Take -/ 43 | 44 | /- The `drop` function removes the first `n` elements from the front of a 45 | list. -/ 46 | 47 | def drop {α : Type} : ℕ → list α → list α 48 | | 0 xs := xs 49 | | (_ + 1) [] := [] 50 | | (m + 1) (x :: xs) := drop m xs 51 | 52 | /- Its relative `take` returns a list consisting of the the first `n` elements 53 | at the front of a list. -/ 54 | 55 | /- 2.1. Define `take`. -/ 56 | 57 | /- To avoid unpleasant surprises in the proofs, we recommend that you follow the 58 | same recursion pattern as for `drop` above. -/ 59 | 60 | def take {α : Type} : ℕ → list α → list α 61 | := sorry 62 | 63 | #reduce take 0 [3, 7, 11] -- expected: [] 64 | #reduce take 1 [3, 7, 11] -- expected: [3] 65 | #reduce take 2 [3, 7, 11] -- expected: [3, 7] 66 | #reduce take 3 [3, 7, 11] -- expected: [3, 7, 11] 67 | #reduce take 4 [3, 7, 11] -- expected: [3, 7, 11] 68 | 69 | -- when `#reduce` fails for some obscure reason, try `#eval`: 70 | #eval take 2 ["a", "b", "c"] -- expected: ["a", "b"] 71 | 72 | /- 2.2. Prove the following lemmas. Notice that they are registered as 73 | simplification rules thanks to the `@[simp]` attribute. -/ 74 | 75 | @[simp] lemma drop_nil {α : Type} : 76 | ∀n : ℕ, drop n ([] : list α) = [] 77 | := sorry 78 | 79 | @[simp] lemma take_nil {α : Type} : 80 | ∀n : ℕ, take n ([] : list α) = [] 81 | := sorry 82 | 83 | /- 2.3. Follow the recursion pattern of `drop` and `take` to prove the following 84 | lemmas. In other words, for each lemma, there should be three cases, and the 85 | third case will need to invoke the induction hypothesis. 86 | 87 | The first case is shown for `drop_drop`. Beware of the fact that there are three 88 | variables in the `drop_drop` lemma (but only two arguments to `drop`). 89 | 90 | Hint: The `refl` tactic might be useful in the third case of `drop_drop`. -/ 91 | 92 | lemma drop_drop {α : Type} : 93 | ∀(m n : ℕ) (xs : list α), drop n (drop m xs) = drop (n + m) xs 94 | | 0 n xs := by refl 95 | -- supply the two missing cases here 96 | 97 | lemma take_take {α : Type} : 98 | ∀(m : ℕ) (xs : list α), take m (take m xs) = take m xs 99 | := sorry 100 | 101 | lemma take_drop {α : Type} : 102 | ∀(n : ℕ) (xs : list α), take n xs ++ drop n xs = xs 103 | := sorry 104 | 105 | 106 | /- Question 3: λ-Terms -/ 107 | 108 | /- 3.1. Define an inductive type corresponding to the untyled λ-terms, as given 109 | by the following context-free grammar: 110 | 111 | ::= 'var' 112 | | 'abs' 113 | | 'app' -/ 114 | 115 | -- enter your definition here 116 | 117 | /- 3.2. Register a textual representation of the type `lam`. Make sure to supply 118 | enough parentheses to guarantee that the output is unambiguous. -/ 119 | 120 | def lam.repr : lam → string 121 | -- enter your answer here 122 | 123 | instance : has_repr lam := 124 | ⟨lam.repr⟩ 125 | 126 | 127 | /- Question 4 (**optional**): Concatenation -/ 128 | 129 | /- Consider the following Lean definition of 2–3 trees as an inductive type: -/ 130 | 131 | inductive tttree (α : Type) : Type 132 | | empty {} : tttree 133 | | bin : α → tttree → tttree → tttree 134 | | ter : α → tttree → tttree → tttree → tttree 135 | 136 | export tttree (empty bin ter) 137 | 138 | /- 4.1 (**optional**). Complete the following Lean definition. The `map_tree` 139 | function should apply its argument `f` to all values of type α stored in the 140 | tree and otherwise preserve the tree's structure. -/ 141 | 142 | -- enter your definition here 143 | 144 | /- 4.2 (**optional**). Prove the following lemma about your definition of 145 | `map_tree`. -/ 146 | 147 | lemma map_tttree_id {α : Type} : 148 | ∀t : tttree α, map_tttree (λx : α, x) t = t 149 | := sorry 150 | 151 | /- 4.3 (**optional**). Complete the following Lean definition. The `set_tree` 152 | function should return the set of all values of type α stored in the tree. In 153 | your answer, you may use traditional set notations regardless of whether they 154 | are actually supported by Lean. -/ 155 | 156 | def set_tttree {α : Type} : tttree α → set α 157 | := sorry 158 | 159 | /- A _congruence rule_ is a lemma that can be used to lift an equivalence 160 | relation between terms to the same terms occurring under a common context. 161 | Congruence rules for equality are built into Lean's logic. In the following 162 | example, the equivalence relation is `=`, the terms are `f` and `g`, and the 163 | context is `map_tree … t`: -/ 164 | 165 | lemma map_tttree_congr_weak {α β : Type} (f g : α → β) (f = g) (t : tttree α) : 166 | map_tttree f t = map_tttree g t := 167 | by simp * 168 | 169 | /- 4.4 (**optional**). The above rule is not as flexible as it could be, 170 | because it requires `f = g`. As long as `f` and `g` are equal for all values 171 | `x : α` stored in `t`, we have `map_tree f t = map_tree g t`, even if `f` and 172 | `g` disagree on other `α` values. Inspired by this observation, prove the 173 | following stronger congruence rule. -/ 174 | 175 | lemma map_tttree_congr_strong {α β : Type} (f g : α → β) : 176 | ∀t : tttree α, (∀x, x ∈ set_tttree t → f x = g x) → 177 | map_tttree f t = map_tttree g t 178 | := sorry 179 | 180 | end LoVe 181 | -------------------------------------------------------------------------------- /lean/love04_functional_programming_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 4: Functional Programming -/ 2 | 3 | import .love04_functional_programming_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Reverse of a List -/ 9 | 10 | /- Recall the `reverse` operation and the `reverse_append` lemma from the 11 | lecture. -/ 12 | 13 | #check reverse 14 | #check reverse_append 15 | 16 | /- 1.1. Prove the following distributive property, using a calculational proof 17 | for the inductive step. -/ 18 | 19 | lemma reverse_append₂ {α : Type} : 20 | ∀xs ys : list α, reverse (xs ++ ys) = reverse ys ++ reverse xs 21 | := sorry 22 | 23 | /- 1.2. Prove the induction step in the proof below using the calculational 24 | style, following this proof sketch: 25 | 26 | reverse (reverse (x :: xs)) 27 | = { by definition of `reverse` } 28 | reverse (reverse xs ++ [x]) 29 | = { using the lemma `reverse_append` } 30 | reverse [x] ++ reverse (reverse xs) 31 | = { by the induction hypothesis } 32 | reverse [x] ++ xs 33 | = { by definition of `++` and `reverse` } 34 | [x] ++ xs 35 | = { by computation } 36 | x :: xs -/ 37 | 38 | lemma reverse_reverse₂ {α : Type} : 39 | ∀xs : list α, reverse (reverse xs) = xs 40 | | [] := by refl 41 | | (x :: xs) := 42 | sorry 43 | 44 | 45 | /- Question 2: Gauss's Summation Formula -/ 46 | 47 | -- `sum_upto f n = f 0 + f 1 + ⋯ + f n` 48 | def sum_upto (f : ℕ → ℕ) : ℕ → ℕ 49 | | 0 := f 0 50 | | (m + 1) := sum_upto m + f (m + 1) 51 | 52 | /- 2.1. Prove the following lemma, discovered by Carl Friedrich Gauss as a 53 | pupil. 54 | 55 | Hints: The `mul_add` and `add_mul` lemmas and the `ac_refl` tactics might be 56 | useful to reason about multiplication. -/ 57 | 58 | #check mul_add 59 | #check add_mul 60 | 61 | lemma sum_upto_eq : 62 | ∀m : ℕ, 2 * sum_upto (λi, i) m = m * (m + 1) 63 | := sorry 64 | 65 | /- 2.2. Prove the following property of `sum_upto`. -/ 66 | 67 | lemma sum_upto_mul (a : ℕ) (f : ℕ → ℕ) : 68 | ∀(n : ℕ), sum_upto (λi, a * f i) n = a * sum_upto f n 69 | := sorry 70 | 71 | end LoVe 72 | -------------------------------------------------------------------------------- /lean/love05_inductive_predicates_demo.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Demo 5: Inductive Predicates -/ 2 | 3 | import .love04_functional_programming_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Introductory Example -/ 9 | 10 | inductive even : ℕ → Prop 11 | | zero : even 0 12 | | add_two : ∀n, even n → even (n + 2) 13 | 14 | 15 | /- Logical Symbols -/ 16 | 17 | #print false 18 | #print true 19 | #print and 20 | #print or 21 | #print Exists 22 | #print eq 23 | 24 | #check nat.le.dest 25 | 26 | lemma nat.le.dest2 : 27 | ∀n m : ℕ, n ≤ m → ∃k, k + n = m := 28 | begin 29 | intros n m h_gt, 30 | cases @nat.le.dest n m h_gt with k nk_eq_m, 31 | use k, 32 | linarith 33 | end 34 | 35 | 36 | /- Example: Full Binary Trees -/ 37 | 38 | #check btree 39 | 40 | inductive is_full {α : Type} : btree α → Prop 41 | | empty : is_full empty 42 | | node (a : α) (l r : btree α) (hl : is_full l) (hr : is_full r) 43 | (empty_iff : l = empty ↔ r = empty) : 44 | is_full (node a l r) 45 | 46 | inductive is_full₂ {α : Type} : btree α → Prop 47 | | empty : is_full₂ empty 48 | | node : ∀(a : α) (l r : btree α), is_full₂ l → is_full₂ r → 49 | (l = empty ↔ r = empty) → 50 | is_full₂ (node a l r) 51 | 52 | lemma is_full_singleton {α : Type} (a : α) : 53 | is_full (node a empty empty) := 54 | begin 55 | apply is_full.node, 56 | repeat { apply is_full.empty }, 57 | refl 58 | end 59 | 60 | lemma is_full_t0 : 61 | is_full t0 := 62 | is_full_singleton _ 63 | 64 | lemma is_full_t1 : 65 | is_full t1 := 66 | is_full_singleton _ 67 | 68 | lemma is_full_t2 : 69 | is_full t2 := 70 | begin 71 | rw t2, 72 | apply is_full.node, 73 | { exact is_full_t0 }, 74 | { exact is_full_t1 }, 75 | { simp [t0, t1] } 76 | end 77 | 78 | lemma is_full_mirror {α : Type} : 79 | ∀t : btree α, is_full t → is_full (mirror t) 80 | | empty := by intro; assumption 81 | | (node a l r) := 82 | begin 83 | intro full_t, 84 | cases full_t, 85 | rw mirror, 86 | apply is_full.node, 87 | repeat { apply is_full_mirror, assumption }, 88 | simp [mirror_eq_empty_iff, *] 89 | end 90 | 91 | lemma is_full_mirror₂ {α : Type} : 92 | ∀t : btree α, is_full t → is_full (mirror t) 93 | | _ is_full.empty := 94 | begin 95 | rw mirror, 96 | exact is_full.empty 97 | end 98 | | _ (is_full.node a l r hl hr empty_iff) := 99 | begin 100 | rw mirror, 101 | apply is_full.node, 102 | repeat { apply is_full_mirror₂, assumption }, 103 | simp [mirror_eq_empty_iff, *] 104 | end 105 | 106 | lemma is_full_node_iff {α : Type} (a : α) (l r : btree α) : 107 | is_full (node a l r) ↔ 108 | is_full l ∧ is_full r ∧ (l = empty ↔ r = empty) := 109 | begin 110 | apply iff.intro, 111 | { intro h, 112 | cases h, 113 | cc }, 114 | { intro h, 115 | apply is_full.node, 116 | repeat { cc } } 117 | end 118 | 119 | lemma is_full_mirror₃ {α : Type} : 120 | ∀t : btree α, is_full t → is_full (mirror t) 121 | | _ is_full.empty := 122 | by rw mirror; exact is_full.empty 123 | | _ (is_full.node a l r hl hr empty_iff) := 124 | by simp [mirror, is_full_node_iff, is_full_mirror₃ l hl, 125 | is_full_mirror₃ r hr, mirror_eq_empty_iff, empty_iff] 126 | 127 | 128 | /- Example: Sorted Lists -/ 129 | 130 | inductive sorted : list ℕ → Prop 131 | | nil : sorted [] 132 | | single {x : ℕ} : sorted [x] 133 | | two_or_more {x y : ℕ} {xs : list ℕ} (xy : x ≤ y) 134 | (yxs : sorted (y :: xs)) : 135 | sorted (x :: y :: xs) 136 | 137 | example : 138 | sorted [] := 139 | sorted.nil 140 | 141 | example : 142 | sorted [2] := 143 | sorted.single 144 | 145 | example : 146 | sorted [3, 5] := 147 | begin 148 | apply sorted.two_or_more, 149 | { linarith }, 150 | { exact sorted.single } 151 | end 152 | 153 | example : 154 | sorted [3, 5] := 155 | sorted.two_or_more (by linarith) sorted.single 156 | 157 | example : 158 | sorted [7, 9, 9, 11] := 159 | sorted.two_or_more (by linarith) 160 | (sorted.two_or_more (by linarith) 161 | (sorted.two_or_more (by linarith) 162 | sorted.single)) 163 | 164 | example : 165 | ¬ sorted [17, 13] := 166 | assume h : sorted [17, 13], 167 | have 17 ≤ 13 := 168 | match h with 169 | | sorted.two_or_more xy yxs := xy 170 | end, 171 | have ¬ (17 ≤ 13) := by linarith, 172 | show false, from by cc 173 | 174 | 175 | /- Example: Well-formed and Ground First-Order Terms -/ 176 | 177 | inductive term (α β : Type) : Type 178 | | var {} : β → term 179 | | fn : α → list term → term 180 | 181 | export term (var fn) 182 | 183 | inductive well_formed {α β : Type} (arity : α → ℕ) : 184 | term α β → Prop 185 | | var (x : β) : well_formed (var x) 186 | | fn (f : α) (ts : list (term α β)) 187 | (hargs : ∀t ∈ ts, well_formed t) 188 | (hlen : list.length ts = arity f) : 189 | well_formed (fn f ts) 190 | 191 | inductive variable_free {α β : Type} : term α β → Prop 192 | | fn (f : α) (ts : list (term α β)) 193 | (hargs : ∀t ∈ ts, variable_free t) : 194 | variable_free (fn f ts) 195 | 196 | 197 | /- Example: Reflexive Transitive Closure -/ 198 | 199 | inductive rtc {α : Type} (r : α → α → Prop) : α → α → Prop 200 | | base (a b : α) : r a b → rtc a b 201 | | refl (a : α) : rtc a a 202 | | trans (a b c : α) : rtc a b → rtc b c → rtc a c 203 | 204 | lemma rtc_rtc_iff_rtc {α : Type} (r : α → α → Prop) (a b : α) : 205 | rtc (rtc r) a b ↔ rtc r a b := 206 | begin 207 | apply iff.intro, 208 | { intro h, 209 | induction h, 210 | case rtc.base : x y { 211 | assumption }, 212 | case rtc.refl : x { 213 | apply rtc.refl }, 214 | case rtc.trans : x y z { 215 | apply rtc.trans, 216 | assumption, 217 | assumption } }, 218 | { intro h, 219 | apply rtc.base, 220 | assumption } 221 | end 222 | 223 | lemma rtc_rtc_eq_rtc {α : Type} (r : α → α → Prop) : 224 | rtc (rtc r) = rtc r := 225 | begin 226 | apply funext, 227 | intro a, 228 | apply funext, 229 | intro b, 230 | apply propext, 231 | apply rtc_rtc_iff_rtc 232 | end 233 | 234 | 235 | /- New Tactics -/ 236 | 237 | example {α : Type} (a b c d : α) (f : α → α → α) 238 | (hab : a = c) (hcd : b = d) : 239 | f a b = f c d := 240 | by cc 241 | 242 | example (i : ℤ) (hagt : i > 5) : 243 | 2 * i > 8 := 244 | by linarith 245 | 246 | example (i : ℤ) : 247 | 1 + (i + -1) = i := 248 | by norm_num 249 | 250 | end LoVe 251 | -------------------------------------------------------------------------------- /lean/love05_inductive_predicates_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 5: Inductive Predicates -/ 2 | 3 | import .love05_inductive_predicates_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Even and Odd -/ 9 | 10 | /- The `even` predicate is true for even numbers and false for odd numbers. -/ 11 | 12 | #check even 13 | 14 | /- 1.1. Prove that 0, 2, 4, and 6 are even. -/ 15 | 16 | -- enter your answer here 17 | 18 | /- We define `odd` as the negation of `even`: -/ 19 | 20 | def odd (n : ℕ) : Prop := 21 | ¬ even n 22 | 23 | /- 1.2. Prove that 1 is odd and register this fact as a `simp` rule. 24 | 25 | Hint: `cases` is useful to reason about hypotheses of the form `even …`. -/ 26 | 27 | @[simp] lemma odd_1 : 28 | odd 1 := 29 | sorry 30 | 31 | /- 1.3. Prove that 3, 5, and 7 are odd. -/ 32 | 33 | -- enter your answer here 34 | 35 | /- 1.4. Complete the following proof by structural induction. 36 | 37 | Hint: You can rely implicitly on computation for the induction step. -/ 38 | 39 | lemma even_two_times : 40 | ∀m : ℕ, even (2 * m) 41 | := sorry 42 | 43 | /- 1.5. Complete the following proof by rule induction. 44 | 45 | Hint: You can use the `cases` tactic (or `match … with`) to destruct an 46 | existential quantifier and extract the witness. -/ 47 | 48 | lemma even_imp_exists_two_times : 49 | ∀n : ℕ, even n → ∃m, n = 2 * m 50 | | _ even.zero := exists.intro 0 (by simp) 51 | | _ (even.add_two n hen) := 52 | sorry 53 | 54 | /- 1.6. Using `even_two_times` and `even_imp_exists_two_times`, prove the 55 | following equivalence. -/ 56 | 57 | lemma even_iff_exists_two_times (n : ℕ) : 58 | even n ↔ ∃m, n = 2 * m := 59 | sorry 60 | 61 | 62 | /- Question 2: Binary Trees -/ 63 | 64 | /- 2.1. Prove the converse of `is_full_mirror`. You may exploit already proved 65 | lemmas (e.g., `is_full_mirror`, `mirror_mirror`). -/ 66 | 67 | lemma mirror_is_full {α : Type} : 68 | ∀t : btree α, is_full (mirror t) → is_full t := 69 | sorry 70 | 71 | /- 2.2. Define a function that counts the number of constructors (`empty` or 72 | `node`) in a tree. -/ 73 | 74 | def count {α : Type} : btree α → ℕ 75 | := sorry 76 | 77 | end LoVe 78 | -------------------------------------------------------------------------------- /lean/love05_inductive_predicates_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 5: Inductive Predicates -/ 2 | 3 | import .love05_inductive_predicates_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Even and Odd -/ 9 | 10 | /- The `even` predicate is true for even numbers and false for odd numbers. -/ 11 | 12 | #check even 13 | 14 | /- 1.1. Prove that 0, 2, 4, and 6 are even. -/ 15 | 16 | lemma even_0 : even 0 := even.zero 17 | lemma even_2 : even 2 := even.add_two _ even_0 18 | lemma even_4 : even 4 := even.add_two _ even_2 19 | lemma even_6 : even 6 := even.add_two _ even_4 20 | 21 | /- We define `odd` as the negation of `even`: -/ 22 | 23 | def odd (n : ℕ) : Prop := 24 | ¬ even n 25 | 26 | /- 1.2. Prove that 1 is odd and register this fact as a `simp` rule. 27 | 28 | Hint: `cases` is useful to reason about hypotheses of the form `even …`. -/ 29 | 30 | @[simp] lemma odd_1 : 31 | odd 1 := 32 | by intro h; cases h 33 | 34 | /- 1.3. Prove that 3, 5, and 7 are odd. -/ 35 | 36 | example : odd 3 := by intro h; cases h; cases h_a 37 | example : odd 5 := by intro h; cases h; cases h_a; cases h_a_a 38 | example : odd 7 := by intro h; cases h; cases h_a; cases h_a_a; cases h_a_a_a 39 | 40 | /- 1.4. Complete the following proof by structural induction. 41 | 42 | Hint: You can rely implicitly on computation for the induction step. -/ 43 | 44 | lemma even_two_times : 45 | ∀m : ℕ, even (2 * m) 46 | | 0 := even.zero 47 | | (m + 1) := 48 | begin 49 | apply even.add_two, 50 | apply even_two_times 51 | end 52 | 53 | /- 1.5. Complete the following proof by rule induction. 54 | 55 | Hint: You can use the `cases` tactic (or `match … with`) to destruct an 56 | existential quantifier and extract the witness. -/ 57 | 58 | lemma even_imp_exists_two_times : 59 | ∀n : ℕ, even n → ∃m, n = 2 * m 60 | | _ even.zero := exists.intro 0 (by simp) 61 | | _ (even.add_two n hen) := 62 | begin 63 | cases even_imp_exists_two_times n hen, 64 | use w + 1, 65 | rw h, 66 | linarith 67 | end 68 | 69 | /- 1.6. Using `even_two_times` and `even_imp_exists_two_times`, prove the 70 | following equivalence. -/ 71 | 72 | lemma even_iff_exists_two_times (n : ℕ) : 73 | even n ↔ ∃m, n = 2 * m := 74 | begin 75 | apply iff.intro, 76 | { apply even_imp_exists_two_times }, 77 | { intro h, 78 | cases h, 79 | simp *, 80 | apply even_two_times } 81 | end 82 | 83 | 84 | /- Question 2: Binary Trees -/ 85 | 86 | /- 2.1. Prove the converse of `is_full_mirror`. You may exploit already proved 87 | lemmas (e.g., `is_full_mirror`, `mirror_mirror`). -/ 88 | 89 | lemma mirror_is_full {α : Type} : 90 | ∀t : btree α, is_full (mirror t) → is_full t := 91 | begin 92 | intros t fmt, 93 | have fmmt : is_full (mirror (mirror t)) := is_full_mirror _ fmt, 94 | rw mirror_mirror at fmmt, 95 | assumption 96 | end 97 | 98 | /- 2.2. Define a function that counts the number of constructors (`empty` or 99 | `node`) in a tree. -/ 100 | 101 | def count {α : Type} : btree α → ℕ 102 | | empty := 1 103 | | (node _ l r) := count l + count r + 1 104 | 105 | end LoVe 106 | -------------------------------------------------------------------------------- /lean/love05_inductive_predicates_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 5: Inductive Predicates -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Lambda-Terms -/ 9 | 10 | /- Recall the following type of λ-terms from question 3 of exercise 4. -/ 11 | 12 | inductive lam : Type 13 | | var : string → lam 14 | | abs : string → lam → lam 15 | | app : lam → lam → lam 16 | 17 | export lam (var abs app) 18 | 19 | /- 1.1. Define an inductive predicate `is_abs` that returns true if and only if 20 | its argument has an `abs` constructor at the top level. -/ 21 | 22 | -- enter your definition here 23 | 24 | /- 1.2. Define an inductive predicate `is_βnf` that determines whether a lambda 25 | term is in β-normal form (https://en.wikipedia.org/wiki/Beta_normal_form). 26 | 27 | Hint: Use `is_abs` somewhere. -/ 28 | 29 | -- enter your definition here 30 | 31 | 32 | /- Question 2: Transitive Closure -/ 33 | 34 | /- In mathematics, the transitive closure `R+` of a binary relation `R` over a 35 | set `A` can be defined as the smallest solution satisfying the following rules: 36 | 37 | (base) for all a, b ∈ A, if a R b, then a R+ b; 38 | (step) for all a, b, c ∈ A, if a R b and b R+ c, then a R+ c. 39 | 40 | In Lean, we can define this concept as follows, by identifying the set `A` with 41 | the type `α`: -/ 42 | 43 | inductive tc₁ {α : Type} (r : α → α → Prop) : α → α → Prop 44 | | base : ∀a b, r a b → tc₁ a b 45 | | step : ∀a b c, r a b → tc₁ b c → tc₁ a c 46 | 47 | /- 2.1. Rule `(step)` makes it convenient to extend transitive chains by adding 48 | links to the left. Another way to define the transitive closure `R+` would use 49 | the following rule instead of `(step)`, with a preference for the right: 50 | 51 | (pets) for all a, b, c ∈ A, if a R+ b and b R c, then a R+ c. 52 | 53 | Define a predicate `tc₂` that embodies this alternative definition. -/ 54 | 55 | -- enter your definition here 56 | 57 | /- 2.2. Yet another definition of the transitive closure `R+` would use the 58 | following symmetric rule instead of `(step)` or `(pets)`: 59 | 60 | (trans) for all a, b, c ∈ A, if a R+ b and b R+ c, then a R+ c. 61 | 62 | Define a predicate `tc₃` that embodies this alternative definition. -/ 63 | 64 | -- enter your definition here 65 | 66 | /- 2.3. Prove that `(step)` also holds as a lemma about `tc₃`. -/ 67 | 68 | lemma tc₃_step {α : Type} (r : α → α → Prop) (a b c : α) (rab : r a b) 69 | (tbc : tc₃ r b c) : 70 | tc₃ r a c := 71 | sorry 72 | 73 | /- 2.4 (**optional**). Prove by rule induction on the `tc₁ r a b` premise below 74 | that `(pets)` also holds as a lemma about `tc₁` as defined above. -/ 75 | 76 | lemma tc₁_pets {α : Type} (r : α → α → Prop) (c : α) : 77 | ∀a b, tc₁ r a b → r b c → tc₁ r a c := 78 | sorry 79 | 80 | /- 2.5 (**optional**). Prove by rule induction that `(trans)` also holds as a 81 | lemma about `tc₁`. -/ 82 | 83 | lemma tc₁_trans {α : Type} (r : α → α → Prop) (c : α) : 84 | ∀a b : α, tc₁ r a b → tc₁ r b c → tc₁ r a c := 85 | sorry 86 | 87 | end LoVe 88 | -------------------------------------------------------------------------------- /lean/love05_inductive_predicates_homework_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 5: Inductive Predicates -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 2: Transitive Closure -/ 9 | 10 | /- In mathematics, the transitive closure `R+` of a binary relation `R` over a 11 | set `A` can be defined as the smallest solution satisfying the following rules: 12 | 13 | (base) for all a, b ∈ A, if a R b, then a R+ b; 14 | (step) for all a, b, c ∈ A, if a R b and b R+ c, then a R+ c. 15 | 16 | In Lean, we can define this concept as follows, by identifying the set `A` with 17 | the type `α`: -/ 18 | 19 | inductive tc₁ {α : Type} (r : α → α → Prop) : α → α → Prop 20 | | base : ∀a b, r a b → tc₁ a b 21 | | step : ∀a b c, r a b → tc₁ b c → tc₁ a c 22 | 23 | /- 2.4 (**optional**). Prove by rule induction on the `tc₁ r a b` premise below 24 | that `(pets)` also holds as a lemma about `tc₁` as defined above. -/ 25 | 26 | lemma tc₁_pets {α : Type} (r : α → α → Prop) (c : α) : 27 | ∀a b, tc₁ r a b → r b c → tc₁ r a c := 28 | begin 29 | intros a b tab rbc, 30 | induction tab, 31 | case tc₁.base : x y rxy ryc { 32 | exact tc₁.step _ _ _ rxy (tc₁.base _ _ ryc) }, 33 | case tc₁.step : x y z rxy tyz tyc_of_rzc rzc { 34 | exact tc₁.step _ _ _ rxy (tyc_of_rzc rzc) } 35 | end 36 | 37 | /- 2.5 (**optional**). Prove by rule induction that `(trans)` also holds as a 38 | lemma about `tc₁`. -/ 39 | 40 | lemma tc₁_trans {α : Type} (r : α → α → Prop) (c : α) : 41 | ∀a b : α, tc₁ r a b → tc₁ r b c → tc₁ r a c := 42 | begin 43 | intros a b tab tbc, 44 | induction tab, 45 | case tc₁.base : x y rxy tyc { 46 | exact tc₁.step _ _ _ rxy tyc }, 47 | case tc₁.step : x y z rxy tyz tyc_of_tzc tzc { 48 | exact tc₁.step _ _ _ rxy (tyc_of_tzc tzc) } 49 | end 50 | 51 | end LoVe 52 | -------------------------------------------------------------------------------- /lean/love06_monads_demo.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Demo 6: Monads -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Motivating Example -/ 9 | 10 | def sum_2_5_7₁ (ns : list ℕ) : option ℕ := 11 | match list.nth ns 1 with 12 | | none := none 13 | | some n2 := 14 | match list.nth ns 4 with 15 | | none := none 16 | | some n5 := 17 | match list.nth ns 6 with 18 | | none := none 19 | | some n7 := some (n2 + n5 + n7) 20 | end 21 | end 22 | end 23 | 24 | def bind_option {α : Type} {β : Type} : 25 | option α → (α → option β) → option β 26 | | none f := none 27 | | (some a) f := f a 28 | 29 | def sum_2_5_7₂ (ns : list ℕ) : option ℕ := 30 | bind_option (list.nth ns 1) 31 | (λn2, bind_option (list.nth ns 4) 32 | (λn5, bind_option (list.nth ns 6) 33 | (λn7, some (n2 + n5 + n7)))) 34 | 35 | #check bind 36 | 37 | def sum_2_5_7₃ (ns : list ℕ) : option ℕ := 38 | bind (list.nth ns 1) 39 | (λn2, bind (list.nth ns 4) 40 | (λn5, bind (list.nth ns 6) 41 | (λn7, some (n2 + n5 + n7)))) 42 | 43 | #check (>>=) 44 | 45 | def sum_2_5_7₄ (ns : list ℕ) : option ℕ := 46 | list.nth ns 1 >>= 47 | λn2, list.nth ns 4 >>= 48 | λn5, list.nth ns 6 >>= 49 | λn7, some (n2 + n5 + n7) 50 | 51 | def sum_2_5_7₅ (ns : list ℕ) : option ℕ := 52 | do n2 ← list.nth ns 1, 53 | do n5 ← list.nth ns 4, 54 | do n7 ← list.nth ns 6, 55 | some (n2 + n5 + n7) 56 | 57 | def sum_2_5_7₆ (ns : list ℕ) : option ℕ := 58 | do 59 | n2 ← list.nth ns 1, 60 | n5 ← list.nth ns 4, 61 | n7 ← list.nth ns 6, 62 | some (n2 + n5 + n7) 63 | 64 | 65 | /- A Type Class of Monads -/ 66 | 67 | class lawful_monad (m : Type → Type) 68 | extends has_bind m, has_pure m : Type 1 := 69 | (pure_bind {α β : Type} (a : α) (f : α → m β) : 70 | (pure a >>= f) = f a) 71 | (bind_pure {α : Type} (ma : m α) : 72 | ma >>= pure = ma) 73 | (bind_assoc {α β γ : Type} (f : α → m β) (g : β → m γ) 74 | (ma : m α) : 75 | ((ma >>= f) >>= g) = (ma >>= (λa, f a >>= g))) 76 | 77 | attribute [simp] 78 | LoVe.lawful_monad.bind_pure 79 | LoVe.lawful_monad.bind_assoc 80 | LoVe.lawful_monad.pure_bind 81 | 82 | open LoVe.lawful_monad 83 | 84 | #print monad 85 | #print is_lawful_monad 86 | 87 | 88 | /- The Option Monad -/ 89 | 90 | namespace option 91 | 92 | def pure {α : Type} : α → option α := 93 | option.some 94 | 95 | def bind {α β : Type} : option α → (α → option β) → option β 96 | | none f := none 97 | | (some a) f := f a 98 | 99 | instance lawful_monad_option : lawful_monad option := 100 | { pure := @option.pure, 101 | bind := @option.bind, 102 | pure_bind := 103 | begin 104 | intros α β a f, 105 | refl 106 | end, 107 | bind_pure := 108 | begin 109 | intros α m, 110 | cases m; refl 111 | end, 112 | bind_assoc := 113 | begin 114 | intros α β γ f g m, 115 | cases m; refl 116 | end } 117 | 118 | end option 119 | 120 | 121 | /- The State Monad -/ 122 | 123 | namespace state 124 | 125 | def state (σ : Type) (α : Type) := 126 | σ → α × σ 127 | 128 | end state 129 | 130 | namespace state 131 | 132 | def read {σ : Type} : state σ σ 133 | | s := (s, s) 134 | 135 | def write {σ : Type} (s : σ) : state σ unit 136 | | _ := ((), s) 137 | 138 | def pure {σ α : Type} (a : α) : state σ α 139 | | s := (a, s) 140 | 141 | def bind {σ : Type} {α β : Type} (ma : state σ α) 142 | (f : α → state σ β) : state σ β 143 | | s := 144 | match ma s with 145 | | (a, s') := f a s' 146 | end 147 | 148 | instance {σ : Type} : lawful_monad (state σ) := 149 | { pure := @state.pure σ, 150 | bind := @state.bind σ, 151 | pure_bind := 152 | begin 153 | intros α β a f, 154 | apply funext, 155 | intro s, 156 | refl 157 | end, 158 | bind_pure := 159 | begin 160 | intros α m, 161 | apply funext, 162 | intro s, 163 | simp [bind], 164 | cases m s, 165 | refl 166 | end, 167 | bind_assoc := 168 | begin 169 | intros α β γ f g m, 170 | apply funext, 171 | intro s, 172 | simp [bind], 173 | cases m s, 174 | refl 175 | end } 176 | 177 | end state 178 | 179 | namespace state 180 | 181 | def diff_list : list ℕ → state ℕ (list ℕ) 182 | | [] := pure [] 183 | | (n :: ns) := 184 | do 185 | prev ← read, 186 | if n < prev then 187 | diff_list ns 188 | else 189 | do 190 | write n, 191 | ns' ← diff_list ns, 192 | pure (n :: ns') 193 | 194 | #eval diff_list [1, 2, 3, 2] 0 195 | #eval diff_list [1, 2, 3, 2, 4, 5, 2] 0 196 | 197 | end state 198 | 199 | 200 | /- Example: Generic Iteration over a List -/ 201 | 202 | def mmap {m : Type → Type} [lawful_monad m] {α β : Type} 203 | (f : α → m β) : list α → m (list β) 204 | | [] := pure [] 205 | | (a :: as) := 206 | do 207 | b ← f a, 208 | bs ← mmap as, 209 | pure (b :: bs) 210 | 211 | lemma mmap_append {m : Type → Type} [lawful_monad m] 212 | {α β : Type} (f : α → m β) : 213 | ∀as as' : list α, mmap f (as ++ as') = 214 | do 215 | bs ← mmap f as, 216 | bs' ← mmap f as', 217 | pure (bs ++ bs') 218 | | [] _ := by simp [mmap] 219 | | (a :: as) as' := by simp [mmap, mmap_append as as'] 220 | 221 | def nths {α : Type} (xss : list (list α)) (n : ℕ) : 222 | option (list α) := 223 | mmap (λxs, list.nth xs n) xss 224 | 225 | #eval nths 226 | [[11, 12, 13, 14], 227 | [21, 22, 23], 228 | [31, 32, 33]] 2 229 | 230 | end LoVe 231 | -------------------------------------------------------------------------------- /lean/love06_monads_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 6: Monads -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: `map` for Monads 9 | 10 | Define `map` for monads. This is the generalization of `map` on lists. Use the 11 | monad operations to define `map`. The functorial properties (`map_id` and 12 | `map_map`) are derived from the monad laws. 13 | 14 | This time, we use Lean's monad definition. In combination, `monad` and 15 | `is_lawful_monad` include the same constants, laws, and syntactic sugar as the 16 | `lawful_monad` type class from the lecture. -/ 17 | 18 | section map 19 | 20 | /- We fix a lawful monad `m`: -/ 21 | 22 | variables {m : Type → Type} [monad m] [is_lawful_monad m] 23 | 24 | /- 1.1. Define `map` on `m`. 25 | 26 | **Hint:** The challenge is to find a way to create `m β`. Follow the types. 27 | One way to proceed is to list all the arguments and operations 28 | available (e.g., `pure`, `>>=`) with their types and see if you can plug 29 | them together like Lego blocks. -/ 30 | 31 | def map {α β} (f : α → β) (ma : m α) : m β := 32 | := sorry 33 | 34 | /- 1.2. Prove the identity law for `map`. 35 | 36 | **Hint**: You will need the `bind_pure` property of monads. -/ 37 | 38 | lemma map_id {α} (ma : m α) : map id ma = ma := 39 | sorry 40 | 41 | /- 1.3. Prove the composition law for `map`. -/ 42 | 43 | lemma map_map {α β γ} (f : α → β) (g : β → γ) (ma : m α) : 44 | map g (map f ma) = map (g ∘ f) ma := 45 | sorry 46 | 47 | end map 48 | 49 | 50 | /- Question 2 **optional**: Monadic Structure on Lists -/ 51 | 52 | /- `list` can be seen as a monad, similar to `option` but with several possible 53 | outcomes. It is also similar to `set`, but the results are ordered and finite. 54 | The code below sets `list` up as a monad. -/ 55 | 56 | namespace list 57 | 58 | protected def bind {α β : Type} : list α → (α → list β) → list β 59 | | [] f := [] 60 | | (a :: as) f := f a ++ bind as f 61 | 62 | protected def pure {α : Type} (a : α) : list α := 63 | [a] 64 | 65 | lemma pure_eq_singleton {α : Type} (a : α) : 66 | pure a = [a] := 67 | by refl 68 | 69 | instance : monad list := 70 | { pure := @list.pure, 71 | bind := @list.bind } 72 | 73 | /- 2.1 **optional**. Prove the following properties of `bind` under the empty 74 | list (`[]`), the list constructor (`::`), and `++`. -/ 75 | 76 | @[simp] lemma bind_nil {α β : Type} (f : α → list β) : 77 | [] >>= f = [] := 78 | sorry 79 | 80 | @[simp] lemma bind_cons {α β : Type} (f : α → list β) (a : α) (as : list α) : 81 | list.cons a as >>= f = f a ++ (as >>= f) := 82 | sorry 83 | 84 | @[simp] lemma bind_append {α β : Type} (f : α → list β) : 85 | ∀as as' : list α, (as ++ as') >>= f = (as >>= f) ++ (as' >>= f) 86 | := sorry 87 | 88 | /- 2.2. Prove the monadic laws for `list`. 89 | 90 | **Hint:** The simplifier cannot see through the type class definition of `pure`. 91 | You can use `pure_eq_singleton` to unfold the definition or `show` to state the 92 | lemma statement using `bind` and `[…]`. -/ 93 | 94 | lemma pure_bind {α β : Type} (a : α) (f : α → list β) : 95 | (pure a >>= f) = f a := 96 | sorry 97 | 98 | lemma bind_pure {α : Type} : 99 | ∀as : list α, as >>= pure = as 100 | := sorry 101 | 102 | lemma bind_assoc {α β γ : Type} (f : α → list β) (g : β → list γ) : 103 | ∀as : list α, (as >>= f) >>= g = as >>= (λa, f a >>= g) 104 | := sorry 105 | 106 | lemma bind_pure_comp_eq_map {α β : Type} {f : α → β} : 107 | ∀as : list α, as >>= (pure ∘ f) = list.map f as 108 | := sorry 109 | 110 | end list 111 | 112 | end LoVe 113 | -------------------------------------------------------------------------------- /lean/love06_monads_homework_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 6: Monads -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 2 **optional**: Monadic Structure on Lists -/ 9 | 10 | /- `list` can be seen as a monad, similar to `option` but with several possible 11 | outcomes. It is also similar to `set`, but the results are ordered and finite. 12 | The code below sets `list` up as a monad. -/ 13 | 14 | namespace list 15 | 16 | protected def bind {α β : Type} : list α → (α → list β) → list β 17 | | [] f := [] 18 | | (a :: as) f := f a ++ bind as f 19 | 20 | protected def pure {α : Type} (a : α) : list α := 21 | [a] 22 | 23 | lemma pure_eq_singleton {α : Type} (a : α) : 24 | pure a = [a] := 25 | by refl 26 | 27 | instance : monad list := 28 | { pure := @list.pure, 29 | bind := @list.bind } 30 | 31 | /- 2.1 **optional**. Prove the following properties of `bind` under the empty 32 | list (`[]`), the list constructor (`::`), and `++`. -/ 33 | 34 | @[simp] lemma bind_nil {α β : Type} (f : α → list β) : 35 | [] >>= f = [] := 36 | by refl 37 | 38 | @[simp] lemma bind_cons {α β : Type} (f : α → list β) (a : α) (as : list α) : 39 | list.cons a as >>= f = f a ++ (as >>= f) := 40 | by refl 41 | 42 | @[simp] lemma bind_append {α β : Type} (f : α → list β) : 43 | ∀as as' : list α, (as ++ as') >>= f = (as >>= f) ++ (as' >>= f) 44 | | [] as' := by refl 45 | | (a :: as) as' := by simp [bind_append as as'] 46 | 47 | /- 2.2 **optional**. Prove the monadic laws for `list`. 48 | 49 | Hint: The simplifier cannot see through the type class definition of `pure`. You 50 | can use `pure_eq_singleton` to unfold the definition or `show` to state the 51 | lemma statement using `bind` and `[…]`. -/ 52 | 53 | lemma pure_bind {α β : Type} (a : α) (f : α → list β) : 54 | (pure a >>= f) = f a := 55 | show ([a] >>= f) = f a, 56 | by simp 57 | 58 | lemma bind_pure {α : Type} : 59 | ∀as : list α, as >>= pure = as 60 | | [] := by refl 61 | | (a :: l) := by simp [pure_eq_singleton, bind_pure l] 62 | 63 | lemma bind_assoc {α β γ : Type} (f : α → list β) (g : β → list γ) : 64 | ∀as : list α, (as >>= f) >>= g = as >>= (λa, f a >>= g) 65 | | [] := by refl 66 | | (a :: as) := by simp [bind_append, bind_assoc as] 67 | 68 | lemma bind_pure_comp_eq_map {α β : Type} {f : α → β} : 69 | ∀as : list α, as >>= (pure ∘ f) = list.map f as 70 | | [] := by refl 71 | | (a :: as) := by simp [bind_pure_comp_eq_map as, pure_eq_singleton] 72 | 73 | end list 74 | 75 | end LoVe 76 | -------------------------------------------------------------------------------- /lean/love07_metaprogramming_demo.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Demo 7: Metaprogramming -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- The Tactic Monad -/ 9 | 10 | example : 11 | true := 12 | by tactic.triv 13 | 14 | example : 15 | true := 16 | by do 17 | tactic.trace "Hello, Metacosmos!", 18 | tactic.triv 19 | 20 | meta def hello_world : tactic unit := 21 | do 22 | tactic.trace "Hello, Metacosmos!", 23 | tactic.triv 24 | 25 | example : 26 | true := 27 | by hello_world 28 | 29 | run_cmd tactic.trace "Hello, Metacosmos!" 30 | 31 | open tactic 32 | 33 | example (α : Type) (a : α) : 34 | true := 35 | by do 36 | trace "local context:", 37 | local_context >>= trace, 38 | trace "goals:", 39 | get_goals >>= trace, 40 | trace "target:", 41 | target >>= trace, 42 | triv 43 | 44 | meta def exact_list : list expr → tactic unit 45 | | [] := fail "no matching expression found" 46 | | (e :: es) := 47 | (do 48 | trace "trying ", 49 | trace e, 50 | exact e) 51 | <|> exact_list es 52 | 53 | meta def find_assumption : tactic unit := do 54 | es ← local_context, 55 | exact_list es 56 | 57 | example {p : Prop} {α : Type} (a : α) (h : p) : 58 | p := 59 | by do find_assumption 60 | 61 | example {p : Prop} (h : p) : 62 | p := 63 | by do 64 | p_proof ← get_local `h, 65 | trace "p_proof:", 66 | trace p_proof, 67 | trace (expr.to_raw_fmt p_proof), 68 | trace "type of p_proof:", 69 | infer_type p_proof >>= trace, 70 | trace "type of type of p_proof:", 71 | infer_type p_proof >>= infer_type >>= trace, 72 | apply p_proof 73 | 74 | 75 | /- Names and Expressions -/ 76 | 77 | #print expr 78 | 79 | #check expr tt -- elaborated expressions 80 | #check expr ff -- unelaborated expressions (pre-expressions) 81 | 82 | #print name 83 | 84 | #check (expr.const `ℕ [] : expr) 85 | #check expr.sort level.zero -- Sort 0, i.e., Prop 86 | #check expr.sort (level.succ level.zero) 87 | -- Sort 1, i.e., Type 0 (Type) 88 | #check expr.var 0 -- bound variable with De Bruijn index 0 89 | #check (expr.local_const `uniq_name `pp_name binder_info.default 90 | `(ℕ) : expr) 91 | #check (expr.mvar `uniq_name `pp_name `(ℕ) : expr) 92 | #check (expr.pi `pp_name binder_info.default `(ℕ) 93 | (expr.sort level.zero) : expr) 94 | #check (expr.lam `pp_name binder_info.default `(ℕ) 95 | (expr.var 0) : expr) 96 | #check expr.elet 97 | #check expr.macro 98 | 99 | run_cmd do 100 | let e : expr := `(list.map (λn : ℕ, n + 1) [1, 2, 3]), 101 | trace e 102 | 103 | run_cmd do 104 | let e : expr := `(list.map _ [1, 2, 3]), 105 | -- fails (holes are disallowed) 106 | skip 107 | 108 | run_cmd do 109 | let e₁ : pexpr := ``(list.map (λn, n + 1) [1, 2, 3]), 110 | let e₂ : pexpr := ``(list.map _ [1, 2, 3]), 111 | trace e₁, 112 | trace e₂ 113 | 114 | run_cmd do 115 | let e := ```(some_silly_name), 116 | trace e 117 | 118 | run_cmd trace `some.silly.name 119 | run_cmd trace ``true 120 | run_cmd trace ``some.silly.name -- fails (not found) 121 | 122 | run_cmd do 123 | let x : expr := `(2 : ℕ), 124 | let e : expr := `(%%x + 1), 125 | trace e 126 | 127 | run_cmd do 128 | let x : expr := `(@id ℕ), 129 | let e := ``(list.map %%x), 130 | trace e 131 | 132 | run_cmd do 133 | let x : expr := `(@id ℕ), 134 | let e := ```(a _ %%x), 135 | trace e 136 | 137 | example : 138 | 1 + 2 = 3 := 139 | by do 140 | `(%%a + %%b = %%c) ← target, 141 | trace a, 142 | trace b, 143 | trace c, 144 | `(@eq %%α %%l %%r) ← target, 145 | trace α, 146 | trace l, 147 | trace r, 148 | exact `(refl _ : 3 = 3) 149 | 150 | 151 | /- A Simple Tactic: `destruct_and` -/ 152 | 153 | example {a b c d : Prop} (h : a ∧ (b ∧ c) ∧ d) : 154 | a := 155 | and.elim_left h 156 | 157 | example {a b c d : Prop} (h : a ∧ (b ∧ c) ∧ d) : 158 | b := 159 | and.elim_left (and.elim_left (and.elim_right h)) 160 | 161 | example {a b c d : Prop} (h : a ∧ (b ∧ c) ∧ d) : 162 | b ∧ c := 163 | and.elim_left (and.elim_right h) 164 | 165 | meta def destruct_and_helper : expr → expr → tactic unit 166 | | `(%%a ∧ %%b) h := 167 | exact h 168 | <|> (do 169 | ha ← to_expr ``(and.elim_left %%h), 170 | destruct_and_helper a ha) 171 | <|> (do 172 | hb ← to_expr ``(and.elim_right %%h), 173 | destruct_and_helper b hb) 174 | | _ h := exact h 175 | 176 | meta def destruct_and (nam : name) : tactic unit := 177 | do 178 | h ← get_local nam, 179 | t ← infer_type h, 180 | destruct_and_helper t h 181 | 182 | example {a b c d : Prop} (h : a ∧ b ∧ c) : 183 | a := 184 | by destruct_and `h 185 | 186 | example {a b c d : Prop} (h : a ∧ b ∧ c) : 187 | c := 188 | by destruct_and `h 189 | 190 | example {a b c d : Prop} (h : a ∧ b ∧ c) : 191 | b ∧ c := 192 | by destruct_and `h 193 | 194 | example {a b c d : Prop} (h : a ∧ b ∧ c) : 195 | a ∧ c := 196 | by destruct_and `h -- fails 197 | 198 | 199 | /- Example: A Solvability Advisor -/ 200 | 201 | meta def is_theorem : declaration → bool 202 | | (declaration.defn _ _ _ _ _ _) := ff 203 | | (declaration.thm _ _ _ _) := tt 204 | | (declaration.cnst _ _ _ _) := ff 205 | | (declaration.ax _ _ _) := tt 206 | 207 | meta def get_all_theorems : tactic (list name) := 208 | do 209 | env ← get_env, 210 | pure (environment.fold env [] (λdecl nams, 211 | if is_theorem decl then declaration.to_name decl :: nams 212 | else nams)) 213 | 214 | meta def solve_with_name (nam : name) : tactic unit := 215 | do 216 | cst ← mk_const nam, 217 | apply cst 218 | ({ md := transparency.reducible, unify := ff } : apply_cfg), 219 | all_goals assumption 220 | 221 | meta def solve_direct : tactic unit := 222 | do 223 | nams ← get_all_theorems, 224 | list.mfirst (λnam, 225 | do 226 | solve_with_name nam, 227 | trace ("directly solved by " ++ to_string nam)) 228 | nams 229 | 230 | example {x y : ℕ} (h : x = y) : 231 | y = x := 232 | by solve_direct 233 | 234 | meta def solve_direct_symm : tactic unit := 235 | solve_direct 236 | <|> (do 237 | cst ← mk_const `eq.symm, 238 | apply cst, 239 | solve_direct) 240 | 241 | example {n : ℕ} : 242 | n + 0 = n := 243 | by solve_direct_symm 244 | 245 | example {n : ℕ} : 246 | n = n + 0 := 247 | by solve_direct_symm 248 | 249 | end LoVe 250 | -------------------------------------------------------------------------------- /lean/love07_metaprogramming_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 7: Metaprogramming -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | open expr 8 | open tactic 9 | open declaration 10 | 11 | 12 | /- Question 1: A `safe` Tactic -/ 13 | 14 | /- We develop a tactic that applies all safe introduction and elimination rules 15 | for the connectives and quantifiers exhaustively. A rule is said to be _safe_ if 16 | it always gives rise to provable subgoals. In addition, we will require that 17 | safe rules do not introduce metavariables (which can easily be instantiated 18 | accidentally with the wrong terms.) 19 | 20 | We proceed in three steps. -/ 21 | 22 | /- 1.1. Develop a `safe_intros` tactic that applies the introduction rules for 23 | `true`, `¬`, `∧`, `↔`, and `→`/`∀`. The tactic generalizes `intro_ands` from the 24 | exercise. 25 | 26 | **Hint**: You can use `tactic.intro` or `tactic.intro1` for some of these. 27 | 28 | **Hint**: You can use the `<|>` operator between the rules/tactics for different 29 | symbols. -/ 30 | 31 | meta def safe_intros : tactic unit := 32 | sorry 33 | 34 | example {a b c d : Prop} : 35 | a → ¬ b ∧ (c ↔ d) := 36 | begin 37 | safe_intros, 38 | /- The proof state should be roughly as follows: 39 | 40 | a b c d : Prop, 41 | a_1 : a, 42 | a_2 : b 43 | ⊢ false 44 | 45 | a b c d : Prop, 46 | a_1 : a, 47 | a_2 : c 48 | ⊢ d 49 | 50 | a b c d : Prop, 51 | a_1 : a, 52 | a_2 : d 53 | ⊢ c -/ 54 | repeat { sorry } 55 | end 56 | 57 | /- 1.2. Develop a `safe_destructs` tactic that eliminates `false`, `∧`, `∨`, 58 | `↔`, and `∃`. The tactic generalizes `destruct_ands` from the exercise. -/ 59 | 60 | meta def safe_destructs : tactic unit := 61 | sorry 62 | 63 | example {a b c d e f : Prop} {p : ℕ → Prop} 64 | (hneg: ¬ a) (hand : a ∧ b ∧ c) (hor : c ∨ d) (himp : b → e) (hiff : e ↔ f) 65 | (hex : ∃x, p x) : 66 | false := 67 | begin 68 | safe_destructs, 69 | /- The proof state should be roughly as follows: 70 | 71 | 2 goals 72 | a b c d e f : Prop, 73 | p : ℕ → Prop, 74 | hneg : ¬a, 75 | himp : b → e, 76 | hand_left : a, 77 | hor : c, 78 | hiff_mp : e → f, 79 | hiff_mpr : f → e, 80 | hex_w : ℕ, 81 | hex_h : p hex_w, 82 | hand_right_left : b, 83 | hand_right_right : c 84 | ⊢ false 85 | 86 | a b c d e f : Prop, 87 | p : ℕ → Prop, 88 | hneg : ¬a, 89 | himp : b → e, 90 | hand_left : a, 91 | hor : d, 92 | hiff_mp : e → f, 93 | hiff_mpr : f → e, 94 | hex_w : ℕ, 95 | hex_h : p hex_w, 96 | hand_right_left : b, 97 | hand_right_right : c 98 | ⊢ false -/ 99 | repeat { sorry } 100 | end 101 | 102 | /- 1.3. Implement a `safe` tactic that first performs introduction, then 103 | elimination, and finally proves all the subgoals that can be discharged directly 104 | by `assumption`. The tactic generalizes `destro_and` from the exercise. 105 | 106 | **Hint**: The `try` tactic combinator might be useful. -/ 107 | 108 | meta def safe : tactic unit := 109 | sorry 110 | 111 | example {a b c d e f : Prop} {p : ℕ → Prop} 112 | (hneg: ¬ a) (hand : a ∧ b ∧ c) (hor : c ∨ d) (himp : b → e) (hiff : e ↔ f) 113 | (hex : ∃x, p x) : 114 | a → ¬ b ∧ (c ↔ d) := 115 | begin 116 | safe, 117 | /- The proof state should be roughly as follows: 118 | 119 | 3 goals 120 | a b c d e f : Prop, 121 | p : ℕ → Prop, 122 | hneg : ¬a, 123 | himp : b → e, 124 | a_1 : a, 125 | a_2 : b, 126 | hand_left : a, 127 | hor : c, 128 | hiff_mp : e → f, 129 | hiff_mpr : f → e, 130 | hex_w : ℕ, 131 | hex_h : p hex_w, 132 | hand_right_left : b, 133 | hand_right_right : c 134 | ⊢ false 135 | 136 | a b c d e f : Prop, 137 | p : ℕ → Prop, 138 | hneg : ¬a, 139 | himp : b → e, 140 | a_1 : a, 141 | a_2 : b, 142 | hand_left : a, 143 | hor : d, 144 | hiff_mp : e → f, 145 | hiff_mpr : f → e, 146 | hex_w : ℕ, 147 | hex_h : p hex_w, 148 | hand_right_left : b, 149 | hand_right_right : c 150 | ⊢ false 151 | 152 | a b c d e f : Prop, 153 | p : ℕ → Prop, 154 | hneg : ¬a, 155 | himp : b → e, 156 | a_1 : a, 157 | a_2 : c, 158 | hand_left : a, 159 | hor : c, 160 | hiff_mp : e → f, 161 | hiff_mpr : f → e, 162 | hex_w : ℕ, 163 | hex_h : p hex_w, 164 | hand_right_left : b, 165 | hand_right_right : c 166 | ⊢ d -/ 167 | repeat { sorry } 168 | end 169 | 170 | 171 | /- Question 2 **optional**: An `auto` Tactic -/ 172 | 173 | /- 2.1 **optional**. Develop an Isabelle-style `auto` tactic. 174 | 175 | This tactic would apply all safe introduction and elimination rules. In 176 | addition, it would try unsafe rules (such as `or.intro_left` and `false.elim`) 177 | but backtrack at some point (or try several possibilities in parallel). 178 | Iterative deepening may be a valid approach, or best-first search, or 179 | breadth-first search. The tactic should also attempt to apply assumptions whose 180 | conclusion matches the goal, but backtrack if necessary. 181 | 182 | See also "Automatic Proof and Disproof in Isabelle/HOL" 183 | (https://www.cs.vu.nl/~jbe248/frocos2011-dis-proof.pdf) by Blanchette, Bulwahn, 184 | and Nipkow, and the references they give. -/ 185 | 186 | /- 2.2 **optional**. Test your tactic on some benchmarks. 187 | 188 | You can try your tactic on logic puzzles of the kinds we proved in exercise 2 189 | and homework 2. Please include these below. -/ 190 | 191 | end LoVe 192 | -------------------------------------------------------------------------------- /lean/love08_operational_semantics_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 8: Operational Semantics -/ 2 | 3 | import .love08_operational_semantics_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Program Equivalence -/ 9 | 10 | /- For this question, we introduce the notation of program equivalence 11 | `p₁ ≈ p₂`. -/ 12 | 13 | def program_equiv (S₁ S₂ : program) : Prop := 14 | ∀s t, (S₁, s) ⟹ t ↔ (S₂, s) ⟹ t 15 | 16 | local infix ` ≈ ` := program_equiv 17 | 18 | /- Program equivalence is a equivalence relation, i.e., it is reflexive, 19 | symmetric, and transitive. -/ 20 | 21 | @[refl] lemma program_equiv.refl {S} : 22 | S ≈ S := 23 | assume s t, 24 | show (S, s) ⟹ t ↔ (S, s) ⟹ t, 25 | by refl 26 | 27 | @[symm] lemma program_equiv.symm {S₁ S₂}: 28 | S₁ ≈ S₂ → S₂ ≈ S₁ := 29 | assume h s t, 30 | show (S₂, s) ⟹ t ↔ (S₁, s) ⟹ t, 31 | from iff.symm (h s t) 32 | 33 | @[trans] lemma program_equiv.trans {S₁ S₂ S₃} (h₁₂ : S₁ ≈ S₂) (h₂₃ : S₂ ≈ S₃) : 34 | S₁ ≈ S₃ := 35 | assume s t, 36 | show (S₁, s) ⟹ t ↔ (S₃, s) ⟹ t, 37 | from iff.trans (h₁₂ s t) (h₂₃ s t) 38 | 39 | 40 | /- 1.1. Prove the following program equivalences. -/ 41 | 42 | lemma program_equiv.seq_skip_left {S} : 43 | skip ;; S ≈ S := 44 | sorry 45 | 46 | lemma program_equiv.seq_skip_right {S} : 47 | S ;; skip ≈ S := 48 | sorry 49 | 50 | lemma program_equiv.seq_congr {S₁ S₂ T₁ T₂} (hS : S₁ ≈ S₂) (hT : T₁ ≈ T₂) : 51 | S₁ ;; T₁ ≈ S₂ ;; T₂ := 52 | sorry 53 | 54 | lemma program_equiv.ite_seq_while {b S} : 55 | ite b (S ;; while b S) skip ≈ while b S := 56 | sorry 57 | 58 | /- 1.2. Prove one more equivalence. -/ 59 | 60 | lemma program_equiv.skip_assign_id {x} : 61 | assign x (λs, s x) ≈ skip := 62 | sorry 63 | 64 | 65 | /- Question 2: Guarded Command Language (GCL) -/ 66 | 67 | /- In 1976, E. W. Dijkstra introduced the guarded command language, a 68 | minimalistic imperative language with built-in nondeterminism. A grammar for one 69 | of its variants is given below: 70 | 71 | S ::= x := e -- assignment 72 | | assert b -- assertion 73 | | S ; S -- sequential composition 74 | | S | ⋯ | S -- nondeterministic choice 75 | | loop S -- nondeterministic iteration 76 | 77 | Assignment and sequential composition are as in the WHILE language. The other 78 | statements have the following semantics: 79 | 80 | * `assert b` aborts if `b` evaluates to false; otherwise, the command is a 81 | no-op. 82 | 83 | * `S | ⋯ | S` chooses **any** of the branches and executes it, ignoring the 84 | other branches. 85 | 86 | * `loop S` executes `S` **any** number of times. 87 | 88 | In Lean, GCL is captured by the following inductive type: -/ 89 | 90 | inductive gcl (σ : Type) : Type 91 | | assign : string → (σ → ℕ) → gcl 92 | | assert : (σ → Prop) → gcl 93 | | seq : gcl → gcl → gcl 94 | | choice : list gcl → gcl 95 | | loop : gcl → gcl 96 | 97 | infixr ` ;; `:90 := gcl.seq 98 | 99 | namespace gcl 100 | 101 | /- The parameter `σ` abstracts over the state type. It is necessary to work 102 | around a bug in Lean. 103 | 104 | The big-step semantics is defined as follows: -/ 105 | 106 | inductive big_step : (gcl state × state) → state → Prop 107 | | assign {x a s} : 108 | big_step (assign x a, s) (s{x ↦ a s}) 109 | | assert {b : state → Prop} {s} (hcond : b s) : 110 | big_step (assert b, s) s 111 | | seq {S T s t u} (h₁ : big_step (S, s) t) (h₂ : big_step (T, t) u) : 112 | big_step (S ;; T, s) u 113 | | choice {Ss : list (gcl state)} {s t} (i : ℕ) (hless : i < list.length Ss) 114 | (hbody : big_step (list.nth_le Ss i hless, s) t) : 115 | big_step (choice Ss, s) t 116 | | loop_base {S s} : 117 | big_step (loop S, s) s 118 | | loop_step {S s u} (t) (hbody : big_step (S, s) t) 119 | (hrest : big_step (loop S, t) u) : 120 | big_step (loop S, s) u 121 | 122 | /- Convenience syntax: -/ 123 | 124 | infix ` ~~> `:110 := big_step 125 | 126 | /- 2.1. Prove the following inversion rules, as we did in the lecture for the 127 | WHILE language. -/ 128 | 129 | @[simp] lemma big_step_assign_iff {x a s t} : 130 | (assign x a, s) ~~> t ↔ t = s{x ↦ a s} := 131 | sorry 132 | 133 | @[simp] lemma big_step_assert {b s t} : 134 | (assert b, s) ~~> t ↔ t = s ∧ b s := 135 | sorry 136 | 137 | @[simp] lemma big_step_seq_iff {S₁ S₂ s t} : 138 | (S₁ ;; S₂, s) ~~> t ↔ (∃u, (S₁, s) ~~> u ∧ (S₂, u) ~~> t) := 139 | sorry 140 | 141 | lemma big_step_loop {S s u} : 142 | (loop S, s) ~~> u ↔ (s = u ∨ (∃t, (S, s) ~~> t ∧ (loop S, t) ~~> u)) := 143 | sorry 144 | 145 | @[simp] lemma big_step_choice {Ss s t} : 146 | (choice Ss, s) ~~> t ↔ 147 | (∃(i : ℕ) (hless : i < list.length Ss), 148 | (list.nth_le Ss i hless, s) ~~> t) := 149 | sorry 150 | 151 | /- 2.2. Complete the translation below of a deterministic program to a GCL 152 | program, by filling in the `sorry` placeholders below. -/ 153 | 154 | def of_program : program → gcl state 155 | | program.skip := assert (λ_, true) 156 | | (program.assign x f) := 157 | sorry 158 | | (program.seq S₁ S₂) := 159 | sorry 160 | | (program.ite b S₁ S₂) := 161 | choice [seq (assert b) (of_program S₁), 162 | seq (assert (λs, ¬ b s)) (of_program S₂)] 163 | | (program.while b S) := 164 | seq (loop (seq (assert b) (of_program S))) (assert (λs, ¬ b s)) 165 | 166 | /- 2.3. In the definition of `of_program` above, `skip` is translated to 167 | `assert (λ_, true)`. Looking at the big-step semantics of both constructs, we 168 | can convince ourselves that it makes sense. Can you think of other correct ways 169 | to define the `skip` case? -/ 170 | 171 | -- enter your answer here 172 | 173 | end gcl 174 | 175 | end LoVe 176 | -------------------------------------------------------------------------------- /lean/love08_operational_semantics_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 8: Operational Semantics -/ 2 | 3 | import .lovelib 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Semantics of Regular Expressions 9 | 10 | Regular expression are a very popular tool for software development. Often, when 11 | textual input needs to be analyzed it is matched against a regular expression. 12 | In this homework, we define the syntax of regular expressions and what it means 13 | that a regular expression matches a string. 14 | 15 | We define `regex` to represent the following grammar: 16 | 17 | R ::= c — `char`: accepts one character `c` 18 | | ∅ — `nothing`: accepts nothing 19 | | ε — `empty`: accepts the empty string 20 | | R ⬝ R — `concat`: accepts the concatenation of two regexes 21 | | R + R — `alt`: accepts either of two regexes 22 | | R* — `star`: accept arbitrary many repetitions of a regex 23 | 24 | Notice the rough correspondence with a WHILE language: 25 | 26 | `char` ~ assignment 27 | `empty` ~ `skip` 28 | `concat` ~ sequential composition 29 | `alt` ~ conditional statement 30 | `star` ~ while loop -/ 31 | 32 | inductive regex : Type 33 | | char : char → regex 34 | | nothing : regex 35 | | empty : regex 36 | | concat : regex → regex → regex 37 | | alt : regex → regex → regex 38 | | star : regex → regex 39 | 40 | /- The `accept r s` predicate indicates that the regular expression `r` accepts 41 | the string `s`. -/ 42 | 43 | inductive accept : regex → list char → Prop 44 | /- accept one character -/ 45 | | char (c : char) : 46 | accept (regex.char c) [c] 47 | /- accept the empty string -/ 48 | | empty : 49 | accept regex.empty [] 50 | /- accept two concatenated regexes -/ 51 | | concat {r₁ r₂ : regex} (s₁ s₂ : list char) (h₁ : accept r₁ s₁) 52 | (h₂ : accept r₂ s₂) : 53 | accept (regex.concat r₁ r₂) (s₁ ++ s₂) 54 | /- accept the left alternative -/ 55 | | alt_left {r₁ r₂ : regex} (s : list char) (h : accept r₁ s) : 56 | accept (regex.alt r₁ r₂) s 57 | /- accept the right alternative -/ 58 | | alt_right {r₁ r₂ : regex} (s : list char) (h : accept r₂ s) : 59 | accept (regex.alt r₁ r₂) s 60 | /- accepts the empty string; this is the base case of `R*` -/ 61 | | star_base {r : regex} : accept (regex.star r) [] 62 | /- accepts `R` followed again by `R*`; this is the induction step of `R*` -/ 63 | | star_step {r : regex} (s s' : list char) (h₁ : accept r s) 64 | (h₂ : accept (regex.star r) s') : 65 | accept (regex.star r) (s ++ s') 66 | 67 | /- 1.1. Explain why there is no rule for `nothing`. -/ 68 | 69 | -- enter your answer here 70 | 71 | /- 1.2. Prove the following inversion rules. 72 | 73 | These proofs are very similar to the inversion rules in the lecture and the 74 | exercise. -/ 75 | 76 | @[simp] lemma accept_char {s : list char} {c : char} : 77 | accept (regex.char c) s ↔ s = [c] := 78 | sorry 79 | 80 | @[simp] lemma accept_nothing {s : list char} : 81 | ¬ accept regex.nothing s := 82 | sorry 83 | 84 | @[simp] lemma accept_empty {s : list char} : 85 | accept regex.empty s ↔ s = [] := 86 | sorry 87 | 88 | @[simp] lemma accept_concat {s : list char} {r₁ r₂ : regex} : 89 | accept (regex.concat r₁ r₂) s 90 | ↔ (∃s₁ s₂, accept r₁ s₁ ∧ accept r₂ s₂ ∧ s = s₁ ++ s₂) := 91 | sorry 92 | 93 | @[simp] lemma accept_alt {s : list char} {r₁ r₂ : regex} : 94 | accept (regex.alt r₁ r₂) s ↔ (accept r₁ s ∨ accept r₂ s) := 95 | sorry 96 | 97 | lemma accept_star {s : list char} {r : regex} : 98 | accept (regex.star r) s ↔ 99 | (s = [] ∨ (∃s₁ s₂, accept r s₁ ∧ accept (regex.star r) s₂ ∧ s = s₁ ++ s₂)) := 100 | sorry 101 | 102 | end LoVe 103 | -------------------------------------------------------------------------------- /lean/love09_hoare_logic_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 9: Hoare Logic -/ 2 | 3 | import .love09_hoare_logic_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Program Verification -/ 9 | 10 | section GAUSS 11 | 12 | /- The following WHILE program is intended to compute the Gaussian sum up to 13 | `n`, leaving the result in `r`. -/ 14 | 15 | def GAUSS : program := 16 | assign "r" (λs, 0) ;; 17 | while (λs, s "n" ≠ 0) 18 | (assign "r" (λs, s "r" + s "n") ;; 19 | assign "n" (λs, s "n" - 1)) 20 | 21 | /- The summation function: -/ 22 | 23 | def sum_upto : ℕ → ℕ 24 | | 0 := 0 25 | | (n + 1) := n + 1 + sum_upto n 26 | 27 | /- 1.1. Prove the correctness of `GAUSS`, using `vcg`. The main challenge is to 28 | figure out which invariant to use for the while loop. The invariant should 29 | capture both the work that has been done already (the intermediate result) and 30 | the work that remains to be done. -/ 31 | 32 | lemma GAUSS_correct (n : ℕ) : 33 | {* λs, s "n" = n *} GAUSS {* λs, s "r" = sum_upto n *} := 34 | sorry 35 | 36 | end GAUSS 37 | 38 | section MUL 39 | 40 | /- The following WHILE program is intended to compute the product of `n` and 41 | `m`, leaving the result in `r`. -/ 42 | 43 | def MUL : program := 44 | assign "r" (λs, 0) ;; 45 | while (λs, s "n" ≠ 0) 46 | (assign "r" (λs, s "r" + s "m") ;; 47 | assign "n" (λs, s "n" - 1)) 48 | 49 | /- 1.2 Prove the correctness of `MUL`, using `vcg`. 50 | 51 | Hint: If a variable `x` does not change in a program, it might be useful to 52 | record this in the invariant, by adding a conjunct `s "x" = x`. -/ 53 | 54 | lemma MUL_correct (n m : ℕ) : 55 | {* λs, s "n" = n ∧ s "m" = m *} MUL {* λs, s "r" = n * m *} := 56 | sorry 57 | 58 | end MUL 59 | 60 | 61 | /- Question 2: Hoare Triples for Total Correctness -/ 62 | 63 | def total_hoare (P : state → Prop) (p : program) (Q : state → Prop) : Prop := 64 | ∀s, P s → ∃t, (p, s) ⟹ t ∧ Q t 65 | 66 | notation `[* ` P : 1 ` *] ` p : 1 ` [* ` Q : 1 ` *]` := total_hoare P p Q 67 | 68 | namespace total_hoare 69 | 70 | variables {P P' P₁ P₂ P₃ Q Q' : state → Prop} {x : string} 71 | variables {S S₀ S₁ S₂ : program} 72 | variables {b : state → Prop} {a : state → ℕ} {s s₀ s₁ s₂ t u : state} 73 | 74 | /- 2.1. Prove the consequence rule. -/ 75 | 76 | lemma consequence (h : [* P *] S [* Q *]) (hp : ∀s, P' s → P s) 77 | (hq : ∀s, Q s → Q' s) : 78 | [* P' *] S [* Q' *] := 79 | sorry 80 | 81 | /- 2.2. Prove the rule for `skip`. -/ 82 | 83 | lemma skip_intro : 84 | [* P *] skip [* P *] := 85 | sorry 86 | 87 | /- 2.3. Prove the rule for `assign`. -/ 88 | 89 | lemma assign_intro (P : state → Prop) : 90 | [* λs, P (s{x ↦ a s}) *] assign x a [* P *] := 91 | sorry 92 | 93 | /- 2.4. Prove the rule for `seq`. -/ 94 | 95 | lemma seq_intro (h₁ : [* P₁ *] S₁ [* P₂ *]) 96 | (h₂ : [* P₂ *] S₂ [* P₃ *]) : 97 | [* P₁ *] S₁ ;; S₂ [* P₃ *] := 98 | sorry 99 | 100 | /- 2.5 **optional**. Prove the rule for `ite`. This requires `b s ∨ ¬ b s`. 101 | `classical.em (b s)` provides a proof, even when `b` is not decidable. -/ 102 | 103 | lemma ite_intro (h₁ : [* λs, P s ∧ b s *] S₁ [* Q *]) 104 | (h₂ : [* λs, P s ∧ ¬ b s *] S₂ [* Q *]) : 105 | [* P *] ite b S₁ S₂ [* Q *] := 106 | sorry 107 | 108 | /- 2.6 **optional**. Try to prove the rule for `while`. 109 | 110 | Before we prove our final goal, we introduce an auxiliary proof. This proof 111 | requires well-founded induction. When using `while_intro.aux` as induction 112 | hypothesis we recommend to do it directly after proving that the argument is 113 | less than `n`: 114 | 115 | have ih : ∃u, (while c p, t) ⟹ u ∧ I u ∧ ¬ c u := 116 | have M < n := …, 117 | -- necessary for Lean to figure out the well-founded induction 118 | while_intro.aux M …, 119 | 120 | Similar to `ite`, this requires `c s ∨ ¬ c s`. `classical.em (c s)` provides a 121 | proof. -/ 122 | 123 | lemma while_intro.aux 124 | (I : state → Prop) 125 | (V : state → ℕ) 126 | (h_inv : ∀n, [* λs, I s ∧ b s ∧ V s = n *] S [* λs, I s ∧ V s < n *]) : 127 | ∀n s, V s = n → I s → ∃t, (while b S, s) ⟹ t ∧ I t ∧ ¬ b t 128 | | n s V_eq hs := 129 | sorry 130 | 131 | lemma while_intro 132 | (I : state → Prop) -- invariant in the loop 133 | (V : state → ℕ) -- variant in the loop body (a.k.a. termination measure) 134 | (h_inv : ∀n, [* λs, I s ∧ b s ∧ V s = n *] S [* λs, I s ∧ V s < n *]) : 135 | [* I *] while b S [* λs, I s ∧ ¬ b s *] := 136 | sorry 137 | 138 | end total_hoare 139 | 140 | end LoVe 141 | -------------------------------------------------------------------------------- /lean/love09_hoare_logic_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 9: Hoare Logic -/ 2 | 3 | import .love09_hoare_logic_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Program Verification -/ 9 | 10 | section GAUSS 11 | 12 | /- The following WHILE program is intended to compute the Gaussian sum up to 13 | `n`, leaving the result in `r`. -/ 14 | 15 | def GAUSS : program := 16 | assign "r" (λs, 0) ;; 17 | while (λs, s "n" ≠ 0) 18 | (assign "r" (λs, s "r" + s "n") ;; 19 | assign "n" (λs, s "n" - 1)) 20 | 21 | /- The summation function: -/ 22 | 23 | def sum_upto : ℕ → ℕ 24 | | 0 := 0 25 | | (n + 1) := n + 1 + sum_upto n 26 | 27 | /- 1.1. Prove the correctness of `GAUSS`, using `vcg`. The main challenge is to 28 | figure out which invariant to use for the while loop. The invariant should 29 | capture both the work that has been done already (the intermediate result) and 30 | the work that remains to be done. -/ 31 | 32 | lemma GAUSS_correct (n : ℕ) : 33 | {* λs, s "n" = n *} GAUSS {* λs, s "r" = sum_upto n *} := 34 | show 35 | {* λs, s "n" = n *} 36 | assign "r" (λs, 0) ;; 37 | while_inv (λs, s "r" + sum_upto (s "n") = sum_upto n) (λs, s "n" ≠ 0) 38 | (assign "r" (λs, s "r" + s "n") ;; 39 | assign "n" (λs, s "n" - 1)) 40 | {* λs, s "r" = sum_upto n *}, 41 | begin 42 | vcg; 43 | simp [sum_upto] { contextual := tt }, 44 | intro s, 45 | cases s "n", 46 | { simp }, 47 | { simp [nat.succ_eq_add_one, sum_upto, mul_assoc] { contextual := tt } } 48 | end 49 | 50 | end GAUSS 51 | 52 | section MUL 53 | 54 | /- The following WHILE program is intended to compute the product of `n` and 55 | `m`, leaving the result in `r`. -/ 56 | 57 | def MUL : program := 58 | assign "r" (λs, 0) ;; 59 | while (λs, s "n" ≠ 0) 60 | (assign "r" (λs, s "r" + s "m") ;; 61 | assign "n" (λs, s "n" - 1)) 62 | 63 | /- 1.2 Prove the correctness of `MUL`, using `vcg`. 64 | 65 | Hint: If a variable `x` does not change in a program, it might be useful to 66 | record this in the invariant, by adding a conjunct `s "x" = x`. -/ 67 | 68 | lemma MUL_correct (n m : ℕ) : 69 | {* λs, s "n" = n ∧ s "m" = m *} MUL {* λs, s "r" = n * m *} := 70 | show 71 | {* λs, s "n" = n ∧ s "m" = m *} 72 | assign "r" (λs, 0) ;; 73 | while_inv (λs, s "m" = m ∧ s "r" + s "n" * s "m" = n * s "m") (λs, s "n" ≠ 0) 74 | (assign "r" (λs, s "r" + s "m") ;; 75 | assign "n" (λs, s "n" - 1)) 76 | {* λs, s "r" = n * m *}, 77 | begin 78 | vcg; 79 | simp { contextual := tt }, 80 | intro s, 81 | cases s "n", 82 | { simp }, 83 | { simp [nat.succ_eq_add_one, add_mul] { contextual := tt } } 84 | end 85 | 86 | end MUL 87 | 88 | 89 | /- Question 2: Hoare Triples for Total Correctness -/ 90 | 91 | def total_hoare (P : state → Prop) (p : program) (Q : state → Prop) : Prop := 92 | ∀s, P s → ∃t, (p, s) ⟹ t ∧ Q t 93 | 94 | notation `[* ` P : 1 ` *] ` p : 1 ` [* ` Q : 1 ` *]` := total_hoare P p Q 95 | 96 | namespace total_hoare 97 | 98 | variables {P P' P₁ P₂ P₃ Q Q' : state → Prop} {x : string} 99 | variables {S S₀ S₁ S₂ : program} 100 | variables {b : state → Prop} {a : state → ℕ} {s s₀ s₁ s₂ t u : state} 101 | 102 | /- 2.1. Prove the consequence rule. -/ 103 | 104 | lemma consequence (h : [* P *] S [* Q *]) (hp : ∀s, P' s → P s) 105 | (hq : ∀s, Q s → Q' s) : 106 | [* P' *] S [* Q' *] := 107 | begin 108 | intros s hs, 109 | specialize h s (hp s hs), 110 | cases h with t ht, 111 | use t, 112 | apply and.intro, 113 | { exact and.elim_left ht }, 114 | { exact hq t (and.elim_right ht) } 115 | end 116 | 117 | /- 2.2. Prove the rule for `skip`. -/ 118 | 119 | lemma skip_intro : 120 | [* P *] skip [* P *] := 121 | begin 122 | intros s hs, 123 | use s, 124 | apply and.intro big_step.skip hs 125 | end 126 | 127 | /- 2.3. Prove the rule for `assign`. -/ 128 | 129 | lemma assign_intro (P : state → Prop) : 130 | [* λs, P (s{x ↦ a s}) *] assign x a [* P *] := 131 | begin 132 | intros s hs, 133 | use s{x ↦ a s}, 134 | exact and.intro big_step.assign hs 135 | end 136 | 137 | /- 2.4. Prove the rule for `seq`. -/ 138 | 139 | lemma seq_intro (h₁ : [* P₁ *] S₁ [* P₂ *]) 140 | (h₂ : [* P₂ *] S₂ [* P₃ *]) : 141 | [* P₁ *] S₁ ;; S₂ [* P₃ *] := 142 | begin 143 | intros s hs, 144 | specialize h₁ s hs, 145 | cases h₁ with t h₁, 146 | specialize h₂ t (and.elim_right h₁), 147 | cases h₂ with u h₂, 148 | use u, 149 | apply and.intro, 150 | { exact big_step.seq (and.elim_left h₁) (and.elim_left h₂) }, 151 | { exact and.elim_right h₂ } 152 | end 153 | 154 | /- 2.5 **optional**. Prove the rule for `ite`. This requires `b s ∨ ¬ b s`. 155 | `classical.em (b s)` provides a proof, even when `b` is not decidable. -/ 156 | 157 | lemma ite_intro (h₁ : [* λs, P s ∧ b s *] S₁ [* Q *]) 158 | (h₂ : [* λs, P s ∧ ¬ b s *] S₂ [* Q *]) : 159 | [* P *] ite b S₁ S₂ [* Q *] := 160 | begin 161 | intros s hs, 162 | cases classical.em (b s), 163 | { cases h₁ s (and.intro hs h) with t ht, 164 | use t, 165 | apply and.intro, 166 | { exact big_step.ite_true h (and.elim_left ht) }, 167 | { exact and.elim_right ht } }, 168 | { cases h₂ s (and.intro hs h) with t ht, 169 | use t, 170 | apply and.intro, 171 | { exact big_step.ite_false h (and.elim_left ht) }, 172 | { exact and.elim_right ht } } 173 | end 174 | 175 | /- 2.6 **optional**. Try to prove the rule for `while`. 176 | 177 | Before we prove our final goal, we introduce an auxiliary proof. This proof 178 | requires well-founded induction. When using `while_intro.aux` as induction 179 | hypothesis we recommend to do it directly after proving that the argument is 180 | less than `n`: 181 | 182 | have ih : ∃u, (while c p, t) ⟹ u ∧ I u ∧ ¬ c u := 183 | have M < n := …, 184 | -- necessary for Lean to figure out the well-founded induction 185 | while_intro.aux M …, 186 | 187 | Similar to `ite`, this requires `c s ∨ ¬ c s`. `classical.em (c s)` provides a 188 | proof. -/ 189 | 190 | lemma while_intro.aux 191 | (I : state → Prop) 192 | (V : state → ℕ) 193 | (h_inv : ∀n, [* λs, I s ∧ b s ∧ V s = n *] S [* λs, I s ∧ V s < n *]) : 194 | ∀n s, V s = n → I s → ∃t, (while b S, s) ⟹ t ∧ I t ∧ ¬ b t 195 | | n s V_eq hs := 196 | begin 197 | cases classical.em (b s) with hcs hncs, 198 | { have h_inv : ∃ t, (S, s) ⟹ t ∧ I t ∧ V t < n := 199 | h_inv n s (and.intro hs (and.intro hcs V_eq)), 200 | cases h_inv with t ht, 201 | have ih : ∃u, (while b S, t) ⟹ u ∧ I u ∧ ¬ b u := 202 | have V t < n := and.elim_right (and.elim_right ht), 203 | while_intro.aux (V t) t rfl (and.elim_left (and.elim_right ht)), 204 | cases ih with u hu, 205 | use u, 206 | apply and.intro, 207 | { exact big_step.while_true hcs (and.elim_left ht) (and.elim_left hu) }, 208 | { exact and.elim_right hu } }, 209 | { use s, 210 | apply and.intro, 211 | { exact big_step.while_false hncs }, 212 | { exact and.intro hs hncs } } 213 | end 214 | 215 | lemma while_intro 216 | (I : state → Prop) -- invariant in the loop 217 | (V : state → ℕ) -- variant in the loop body (a.k.a. termination measure) 218 | (h_inv : ∀n, [* λs, I s ∧ b s ∧ V s = n *] S [* λs, I s ∧ V s < n *]) : 219 | [* I *] while b S [* λs, I s ∧ ¬ b s *] := 220 | begin 221 | intros s hs, 222 | exact while_intro.aux I V h_inv (V s) s rfl hs 223 | end 224 | 225 | end total_hoare 226 | 227 | end LoVe 228 | -------------------------------------------------------------------------------- /lean/love09_hoare_logic_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 9: Hoare Logic -/ 2 | 3 | import .love08_operational_semantics_exercise_sheet 4 | import .love09_hoare_logic_demo 5 | 6 | namespace LoVe 7 | 8 | 9 | /- Question 1: Hoare Logic for Dijkstra's Guarded Command Language -/ 10 | 11 | /- Recall the definition of GCL from exercise 8: -/ 12 | 13 | #check gcl 14 | 15 | namespace gcl 16 | 17 | #check big_step 18 | 19 | /- The definition of Hoare triples for partial correctness is unsurprising: -/ 20 | 21 | def partial_hoare (P : state → Prop) (S : gcl state) (Q : state → Prop) : 22 | Prop := 23 | ∀s t, P s → (S, s) ~~> t → Q t 24 | 25 | local notation `{* ` P : 1 ` *} ` S : 1 ` {* ` Q : 1 ` *}` := 26 | partial_hoare P S Q 27 | 28 | namespace partial_hoare 29 | 30 | variables {P P' P₁ P₂ P₃ Q Q' : state → Prop} {x : string} {a : state → ℕ} 31 | variables {S S₀ S₁ S₂ : gcl state} {Ss : list (gcl state)} 32 | 33 | /- 1.1. Prove the consequence rule. -/ 34 | 35 | lemma consequence (h : {* P *} S {* Q *}) (hp : ∀s, P' s → P s) 36 | (hq : ∀s, Q s → Q' s) : 37 | {* P' *} S {* Q' *} := 38 | sorry 39 | 40 | /- 1.2. Prove the rule for `assign`. -/ 41 | 42 | lemma assign_intro (P : state → Prop) : 43 | {* λs : state, P (s{x ↦ a s}) *} assign x a {* P *} := 44 | sorry 45 | 46 | /- 1.3. Prove the rule for `assert`. -/ 47 | 48 | lemma assert_intro : 49 | {* λs, Q s → P s *} assert Q {* P *} := 50 | sorry 51 | 52 | /- 1.4. Prove the rule for `seq`. -/ 53 | 54 | lemma seq_intro (h₁ : {* P₁ *} S₁ {* P₂ *}) (h₂ : {* P₂ *} S₂ {* P₃ *}) : 55 | {* P₁ *} seq S₁ S₂ {* P₃ *} := 56 | sorry 57 | 58 | /- 1.5. Prove the rule for `choice`. -/ 59 | 60 | lemma choice_intro 61 | (h : ∀i (hi : i < list.length Ss), 62 | {* λs, P s *} list.nth_le Ss i hi {* Q *}) : 63 | {* P *} choice Ss {* Q *} := 64 | sorry 65 | 66 | /- 1.6. State the rule for `loop`. 67 | 68 | lemma loop_intro … : 69 | {* … *} loop p {* … *} := 70 | … -/ 71 | 72 | /- 1.7 **optional**. Prove the rule you stated for `loop`. 73 | 74 | Hint: This one is difficult and requires some generalization, as explained in 75 | Section 5.8 ("Induction Pitfalls") of the lecture notes. -/ 76 | 77 | -- enter your answer to question 1.6 here 78 | -- enter your answer to question 1.7 here 79 | 80 | end partial_hoare 81 | 82 | end gcl 83 | 84 | 85 | /- Question 2: Factorial -/ 86 | 87 | section FACT 88 | 89 | /- The following WHILE program is intended to compute the factorial of `n`, 90 | leaving the result in `r`. -/ 91 | 92 | def FACT : program := 93 | assign "r" (λs, 1) ;; 94 | while (λs, s "n" ≠ 0) 95 | (assign "r" (λs, s "r" * s "n") ;; 96 | assign "n" (λs, s "n" - 1)) 97 | 98 | /- 2.1. Define the factorial function. -/ 99 | 100 | def fact : ℕ → ℕ 101 | := sorry 102 | 103 | /- 2.2. Prove the correctness of `FACT` using `vcg`. -/ 104 | 105 | lemma FACT_correct (n : ℕ) : 106 | {* λs, s "n" = n *} FACT {* λs, s "r" = fact n *} := 107 | sorry 108 | 109 | end FACT 110 | 111 | end LoVe 112 | -------------------------------------------------------------------------------- /lean/love09_hoare_logic_homework_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 9: Hoare Logic -/ 2 | 3 | import .love08_operational_semantics_exercise_sheet 4 | import .love09_hoare_logic_demo 5 | 6 | namespace LoVe 7 | 8 | 9 | /- Question 1: Hoare Logic for Dijkstra's Guarded Command Language -/ 10 | 11 | /- Recall the definition of GCL from exercise 8: -/ 12 | 13 | #check gcl 14 | 15 | namespace gcl 16 | 17 | #check big_step 18 | 19 | /- The definition of Hoare triples for partial correctness is unsurprising: -/ 20 | 21 | def partial_hoare (P : state → Prop) (S : gcl state) (Q : state → Prop) : 22 | Prop := 23 | ∀s t, P s → (S, s) ~~> t → Q t 24 | 25 | local notation `{* ` P : 1 ` *} ` S : 1 ` {* ` Q : 1 ` *}` := 26 | partial_hoare P S Q 27 | 28 | namespace partial_hoare 29 | 30 | variables {P : state → Prop} {S : gcl state} 31 | 32 | /- 1.7 **optional**. Prove the rule you stated for `loop`. 33 | 34 | Hint: This one is difficult and requires some generalization, as explained in 35 | Section 5.8 ("Induction Pitfalls") of the lecture notes. -/ 36 | 37 | lemma loop_intro (h : {* P *} S {* P *}) : 38 | {* P *} loop S {* P *} := 39 | begin 40 | intros s t hs, 41 | generalize eq : (loop S, s) = ps, 42 | intro hst, 43 | induction hst generalizing s; 44 | cases eq, 45 | { assumption }, 46 | { clear eq, rename hst_s s, rename hst_t t, rename hst_u u, 47 | apply hst_ih_hrest t, 48 | apply h s, 49 | repeat { assumption }, 50 | { refl } } 51 | end 52 | 53 | end partial_hoare 54 | 55 | end gcl 56 | 57 | end LoVe 58 | -------------------------------------------------------------------------------- /lean/love10_denotational_semantics_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 10: Denotational Semantics -/ 2 | 3 | import .love10_denotational_semantics_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Monotonicity -/ 9 | 10 | /- Prove the following two lemmas from the lecture. -/ 11 | 12 | lemma monotone_comp {α β : Type} [partial_order α] (f g : α → set (β × β)) 13 | (hf : monotone f) (hg : monotone g) : 14 | monotone (λa, f a ◯ g a) := 15 | sorry 16 | 17 | lemma monotone_restrict {α β : Type} [partial_order α] (f : α → set (β × β)) 18 | (p : β → Prop) (hf : monotone f) : 19 | monotone (λa, f a ⇃ p) := 20 | sorry 21 | 22 | 23 | /- Question 2: Kleene's Theorem -/ 24 | 25 | /- We can compute the fixpoint by iteration by taking the union of all finite 26 | iterations of `f`: 27 | 28 | lfp f = ⋃n, f^^[n] ∅ 29 | 30 | where 31 | 32 | f^^[n] = f ∘ ⋯ ∘ f ∘ id 33 | 34 | iterates the function `f` `n` times. However, the above characterization of 35 | `lfp` only holds for continuous functions, a concept we will introduce below. -/ 36 | 37 | def iterate {α : Type} (f : α → α) : ℕ → α → α 38 | | 0 a := a 39 | | (n + 1) a := f (iterate n a) 40 | 41 | notation f`^^[`n`]` := iterate f n 42 | 43 | /- 2.1. Fill in the missing proofs below. -/ 44 | 45 | def Union {α : Type} (s : ℕ → set α) : set α := 46 | {a | ∃n, a ∈ s n} 47 | 48 | lemma Union_le {α : Type} {s : ℕ → set α} (a : set α) (h : ∀i, s i ≤ a) : 49 | Union s ≤ a := 50 | sorry 51 | 52 | /- A continuous function `f` is a function that commutes with the union of any 53 | monotone sequence `s`: -/ 54 | 55 | def continuous {α : Type} (f : set α → set α) : Prop := 56 | ∀s : ℕ → set α, monotone s → f (Union s) = Union (λn, f (s n)) 57 | 58 | /- We need to prove that each continuous function is monotone. To achieve this, 59 | we will need the following sequence: -/ 60 | 61 | def bi_seq {α : Type} (a₁ a₂ : set α) : ℕ → set α 62 | | 0 := a₁ 63 | | (n + 1) := a₂ 64 | 65 | /- For example, `bi_seq 0 1` is the sequence 0, 1, 1, 1, etc. -/ 66 | 67 | lemma monotone_bi_seq {α : Type} (a₁ a₂ : set α) (h : a₁ ≤ a₂) : 68 | monotone (bi_seq a₁ a₂) 69 | | 0 0 _ := le_refl _ 70 | | 0 (n + 1) _ := h 71 | | (n + 1) (m + 1) _ := le_refl _ 72 | 73 | lemma Union_bi_seq {α : Type} (a₁ a₂ : set α) (ha : a₁ ≤ a₂) : 74 | Union (bi_seq a₁ a₂) = a₂ := 75 | sorry 76 | 77 | lemma monotone_of_continuous {α : Type} (f : set α → set α) 78 | (hf : continuous f) : 79 | monotone f := 80 | sorry 81 | 82 | /- 2.2. Provide the following proof, using a similar case distinction as for 83 | `monotone_bi_seq` above. -/ 84 | 85 | lemma monotone_iterate {α : Type} (f : set α → set α) (hf : monotone f) : 86 | monotone (λn, f^^[n] ∅) 87 | := sorry 88 | 89 | /- 2.3. Prove the main theorem. A proof sketch is given below. 90 | 91 | We break the proof into two proofs of inclusion. 92 | 93 | Case 1. lfp f ≤ Union (λn, f^[n] ∅): The key is to use the lemma `lfp_le` 94 | together with continuity of `f`. 95 | 96 | Case 2. Union (λn, f^[n] ∅) ≤ lfp f: The lemma `Union_le` gives us a natural 97 | number `i`, on which you can perform induction. You will also need the lemma 98 | `lfp_eq` to unfold one iteration of `lfp f`. -/ 99 | 100 | lemma lfp_Kleene {α : Type} (f : set α → set α) (hf : continuous f) : 101 | complete_lattice.lfp f = Union (λn, f^^[n] ∅) := 102 | sorry 103 | 104 | 105 | /- Question 3 (**optional**): Regular Expressions -/ 106 | 107 | inductive regex (α : Type) : Type 108 | | empty {} : regex 109 | | nothing {} : regex 110 | | atom (a : α) : regex 111 | | concat : regex → regex → regex 112 | | alt : regex → regex → regex 113 | | star : regex → regex 114 | 115 | /- 3.1 (**optional**). Define a translation of regular expressions to relations. 116 | The idea is that an atom corresponds to a relation, concatenation corresponds to 117 | composition of relations, and alternation is union. -/ 118 | 119 | def rel_of_regex {α : Type} : regex (set (α × α)) → set (α × α) 120 | | regex.empty := Id α 121 | | _ := sorry 122 | 123 | /- 3.2 (**optional**). Prove the following recursive equation about your 124 | definition. -/ 125 | 126 | lemma rel_of_regex_star {α : Type} (r : regex (set (α × α))) : 127 | rel_of_regex (regex.star r) = 128 | rel_of_regex (regex.alt (regex.concat r (regex.star r)) regex.empty) := 129 | sorry 130 | 131 | end LoVe 132 | -------------------------------------------------------------------------------- /lean/love10_denotational_semantics_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 10: Denotational Semantics -/ 2 | 3 | import .love10_denotational_semantics_demo 4 | 5 | namespace LoVe 6 | 7 | 8 | /- Question 1: Monotonicity -/ 9 | 10 | /- Prove the following two lemmas from the lecture. -/ 11 | 12 | lemma monotone_comp {α β : Type} [partial_order α] (f g : α → set (β × β)) 13 | (hf : monotone f) (hg : monotone g) : 14 | monotone (λa, f a ◯ g a) := 15 | begin 16 | intros a₁ a₂ ha b hb, 17 | cases hb with m hm, 18 | cases hm, 19 | use m, 20 | apply and.intro, 21 | { exact hf _ _ ha hm_left }, 22 | { exact hg _ _ ha hm_right } 23 | end 24 | 25 | lemma monotone_restrict {α β : Type} [partial_order α] (f : α → set (β × β)) 26 | (p : β → Prop) (hf : monotone f) : 27 | monotone (λa, f a ⇃ p) := 28 | begin 29 | intros a₁ a₂ ha b hb, 30 | cases hb, 31 | apply and.intro, 32 | { exact hb_left }, 33 | { apply hf _ _ ha, 34 | exact hb_right } 35 | end 36 | 37 | 38 | /- Question 2: Kleene's Theorem -/ 39 | 40 | /- We can compute the fixpoint by iteration by taking the union of all finite 41 | iterations of `f`: 42 | 43 | lfp f = ⋃n, f^^[n] ∅ 44 | 45 | where 46 | 47 | f^^[n] = f ∘ ⋯ ∘ f ∘ id 48 | 49 | iterates the function `f` `n` times. However, the above characterization of 50 | `lfp` only holds for continuous functions, a concept we will introduce below. -/ 51 | 52 | def iterate {α : Type} (f : α → α) : ℕ → α → α 53 | | 0 a := a 54 | | (n + 1) a := f (iterate n a) 55 | 56 | notation f`^^[`n`]` := iterate f n 57 | 58 | /- 2.1. Fill in the missing proofs below. -/ 59 | 60 | def Union {α : Type} (s : ℕ → set α) : set α := 61 | {a | ∃n, a ∈ s n} 62 | 63 | lemma Union_le {α : Type} {s : ℕ → set α} (a : set α) (h : ∀i, s i ≤ a) : 64 | Union s ≤ a := 65 | begin 66 | intros x hx, 67 | cases hx with i hi, 68 | exact h i hi 69 | end 70 | 71 | /- A continuous function `f` is a function that commutes with the union of any 72 | monotone sequence `s`: -/ 73 | 74 | def continuous {α : Type} (f : set α → set α) : Prop := 75 | ∀s : ℕ → set α, monotone s → f (Union s) = Union (λn, f (s n)) 76 | 77 | /- We need to prove that each continuous function is monotone. To achieve this, 78 | we will need the following sequence: -/ 79 | 80 | def bi_seq {α : Type} (a₁ a₂ : set α) : ℕ → set α 81 | | 0 := a₁ 82 | | (n + 1) := a₂ 83 | 84 | /- For example, `bi_seq 0 1` is the sequence 0, 1, 1, 1, etc. -/ 85 | 86 | lemma monotone_bi_seq {α : Type} (a₁ a₂ : set α) (h : a₁ ≤ a₂) : 87 | monotone (bi_seq a₁ a₂) 88 | | 0 0 _ := le_refl _ 89 | | 0 (n + 1) _ := h 90 | | (n + 1) (m + 1) _ := le_refl _ 91 | 92 | lemma Union_bi_seq {α : Type} (a₁ a₂ : set α) (ha : a₁ ≤ a₂) : 93 | Union (bi_seq a₁ a₂) = a₂ := 94 | begin 95 | apply le_antisymm, 96 | { apply Union_le, 97 | intros n a h, 98 | cases n, 99 | { exact ha h }, 100 | { exact h } }, 101 | { intros a h, 102 | use 1, 103 | exact h } 104 | end 105 | 106 | lemma monotone_of_continuous {α : Type} (f : set α → set α) 107 | (hf : continuous f) : 108 | monotone f := 109 | begin 110 | intros a₁ a₂ ha, 111 | rw [← Union_bi_seq a₁ a₂ ha, hf _ (monotone_bi_seq a₁ a₂ ha)], 112 | { intros a ha, 113 | rw Union, 114 | use 0, 115 | exact ha } 116 | end 117 | 118 | /- 2.2. Provide the following proof, using a similar case distinction as for 119 | `monotone_bi_seq` above. -/ 120 | 121 | lemma monotone_iterate {α : Type} (f : set α → set α) (hf : monotone f) : 122 | monotone (λn, f^^[n] ∅) 123 | | 0 0 _ := le_refl _ 124 | | 0 (m + 1) _ := 125 | assume h, 126 | false.elim 127 | | (n + 1) (m + 1) h := 128 | hf _ _ (monotone_iterate n m (nat.le_of_succ_le_succ h)) 129 | 130 | /- 2.3. Prove the main theorem. A proof sketch is given below. 131 | 132 | We break the proof into two proofs of inclusion. 133 | 134 | Case 1. lfp f ≤ Union (λn, f^[n] ∅): The key is to use the lemma `lfp_le` 135 | together with continuity of `f`. 136 | 137 | Case 2. Union (λn, f^[n] ∅) ≤ lfp f: The lemma `Union_le` gives us a natural 138 | number `i`, on which you can perform induction. You will also need the lemma 139 | `lfp_eq` to unfold one iteration of `lfp f`. -/ 140 | 141 | lemma lfp_Kleene {α : Type} (f : set α → set α) (hf : continuous f) : 142 | complete_lattice.lfp f = Union (λn, f^^[n] ∅) := 143 | begin 144 | apply le_antisymm, 145 | { apply complete_lattice.lfp_le _ _ _, 146 | rw [hf], 147 | { apply Union_le, 148 | intros n a hn, 149 | use n + 1, 150 | exact hn }, 151 | { exact monotone_iterate f (monotone_of_continuous f hf) } }, 152 | { apply Union_le (complete_lattice.lfp f), 153 | intro i, 154 | induction i, 155 | { intros a, 156 | exact false.elim }, 157 | { rw [nat.succ_eq_add_one, 158 | complete_lattice.lfp_eq f (monotone_of_continuous f hf)], 159 | exact (monotone_of_continuous f hf) _ _ i_ih } } 160 | end 161 | 162 | 163 | /- Question 3 (**optional**): Regular Expressions -/ 164 | 165 | inductive regex (α : Type) : Type 166 | | empty {} : regex 167 | | nothing {} : regex 168 | | atom (a : α) : regex 169 | | concat : regex → regex → regex 170 | | alt : regex → regex → regex 171 | | star : regex → regex 172 | 173 | /- 3.1 (**optional**). Define a translation of regular expressions to relations. 174 | The idea is that an atom corresponds to a relation, concatenation corresponds to 175 | composition of relations, and alternation is union. -/ 176 | 177 | def rel_of_regex {α : Type} : regex (set (α × α)) → set (α × α) 178 | | regex.empty := Id α 179 | | regex.nothing := ∅ 180 | | (regex.atom r) := r 181 | | (regex.concat r₁ r₂) := rel_of_regex r₁ ◯ rel_of_regex r₂ 182 | | (regex.alt r₁ r₂) := rel_of_regex r₁ ∪ rel_of_regex r₂ 183 | | (regex.star r) := complete_lattice.lfp (λf, (rel_of_regex r ◯ f) ∪ Id α) 184 | 185 | /- 3.2 (**optional**). Prove the following recursive equation about your 186 | definition. -/ 187 | 188 | lemma rel_of_regex_star {α : Type} (r : regex (set (α × α))) : 189 | rel_of_regex (regex.star r) = 190 | rel_of_regex (regex.alt (regex.concat r (regex.star r)) regex.empty) := 191 | begin 192 | apply complete_lattice.lfp_eq _ _, 193 | apply monotone.union, 194 | { apply monotone_comp, 195 | { exact monotone.const _ }, 196 | { exact monotone.id } }, 197 | { exact monotone.const _ } 198 | end 199 | 200 | end LoVe 201 | -------------------------------------------------------------------------------- /lean/love10_denotational_semantics_homework_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 10: Denotational Semantics -/ 2 | 3 | import .love10_denotational_semantics_demo 4 | 5 | namespace LoVe 6 | 7 | /- Denotational semantics are well suited to functional programming. In this 8 | exercise, we will study some representations of functional programs in Lean and 9 | their denotational semantics. -/ 10 | 11 | /- The `nondet` type represents functional programs that can perform 12 | nondeterministic computations: A program can choose between many different 13 | computation paths / return values. Returning no results at all is represented by 14 | `fail`, and nondeterministic choice between two alternatives (identified by the 15 | `bool` values `tt` and `ff`) is represented by `choice`. -/ 16 | 17 | inductive nondet (α : Type) : Type 18 | | pure : α → nondet 19 | | fail {} : nondet 20 | | choice : (bool → nondet) → nondet 21 | 22 | namespace nondet 23 | 24 | 25 | /- Question 1: The `nondet` Monad -/ 26 | 27 | /- The `nondet` inductive type forms a monad. The `pure` operator is 28 | `nondet.pure`; `bind` is as follows: -/ 29 | 30 | def bind {α β : Type} : nondet α → (α → nondet β) → nondet β 31 | | (pure x) f := f x 32 | | fail f := fail 33 | | (choice k) f := choice (λb, bind (k b) f) 34 | 35 | instance : has_pure nondet := { pure := @pure } 36 | instance : has_bind nondet := { bind := @bind } 37 | 38 | /- 1.1. Prove the monadic laws (lecture 6) for `nondet`. 39 | 40 | Hint: To prove `f = g` from `∀x, f x = g x`, use the theorem `funext`. -/ 41 | 42 | lemma pure_bind {α β : Type} (x : α) (f : α → nondet β) : 43 | pure x >>= f = f x := 44 | sorry 45 | 46 | lemma bind_pure {α : Type} : 47 | ∀mx : nondet α, mx >>= pure = mx 48 | := sorry 49 | 50 | lemma bind_assoc {α β γ : Type} : 51 | ∀(mx : nondet α) (f : α → nondet β) (g : β → nondet γ), 52 | ((mx >>= f) >>= g) = (mx >>= (λa, f a >>= g)) 53 | := sorry 54 | 55 | /- The function `portmanteau` computes a portmanteau of two lists: A portmanteau 56 | of `xs` and `ys` has `xs` as a prefix and `ys` as a suffix, and they overlap. We 57 | use `starts_with xs ys` to test that `ys` has `xs` as a prefix. -/ 58 | 59 | def starts_with : list ℕ → list ℕ → bool 60 | | (x :: xs) [] := ff 61 | | [] ys := tt 62 | | (x :: xs) (y :: ys) := (x = y) && starts_with xs ys 63 | 64 | #eval starts_with [1, 2] [1, 2, 3] 65 | #eval starts_with [1, 2, 3] [1, 2] 66 | 67 | def portmanteau : list ℕ → list ℕ → list (list ℕ) 68 | | [] ys := [] 69 | | (x :: xs) ys := 70 | list.map (list.cons x) (portmanteau xs ys) ++ 71 | (if starts_with (x :: xs) ys then [ys] else []) 72 | 73 | /- Here are some examples of portmanteaux: -/ 74 | 75 | #reduce portmanteau [0, 1, 2, 3] [2, 3, 4] 76 | #reduce portmanteau [0, 1] [2, 3, 4] 77 | #reduce portmanteau [0, 1, 2, 1, 2] [1, 2, 1, 2, 3, 4] 78 | 79 | /- 1.2 (**optional**). Translate the `portmanteau` program from the `list` monad 80 | to the `nondet` monad. -/ 81 | 82 | def nondet_portmanteau : list ℕ → list ℕ → nondet (list ℕ) 83 | := sorry 84 | 85 | 86 | /- Question 2: Nondeterminism, Denotationally -/ 87 | 88 | /- 2.1. Give a denotational semantics for `nondet`, mapping it into a `list` of 89 | all results. `pure` returns one result, `fail` returns zero, and `choice` 90 | combines the results of either alternative. -/ 91 | 92 | def list_sem {α : Type} : nondet α → list α 93 | := sorry 94 | 95 | /- Check that the following lines give the same output as for `portmanteau`: -/ 96 | 97 | #reduce list_sem (nondet_portmanteau [0, 1, 2, 3] [2, 3, 4]) 98 | #reduce list_sem (nondet_portmanteau [0, 1] [2, 3, 4]) 99 | #reduce list_sem (nondet_portmanteau [0, 1, 2, 1, 2] [1, 2, 1, 2, 3, 4]) 100 | 101 | /- 2.2. Often, we are not interested in getting all outcomes, just the first 102 | successful one. Give a semantics for `nondet` that produces the first successful 103 | result, if any. -/ 104 | 105 | def option_sem {α : Type} : nondet α → option α 106 | := sorry 107 | 108 | /- 2.3. Prove the theorem `list_option_compat` below, showing that the two 109 | semantics you defined are compatible. -/ 110 | 111 | lemma head'_orelse_eq_head'_append {α : Type} (xs ys : list α) : 112 | (list.head' xs <|> list.head' ys) = list.head' (xs ++ ys) := 113 | by induction xs; simp 114 | 115 | theorem list_option_compat {α : Type} : 116 | ∀mx : nondet α, option_sem mx = list.head' (list_sem mx) 117 | := sorry 118 | 119 | 120 | /- Question 3 (**optional**). Nondeterminism, Operationally -/ 121 | 122 | /- We can define the following big-step operational semantics for `nondet`: -/ 123 | 124 | inductive big_step {α : Type} : nondet α → α → Prop 125 | | pure {x : α} : 126 | big_step (pure x) x 127 | | choice_l {k : bool → nondet α} {x : α} : 128 | big_step (k ff) x → big_step (choice k) x 129 | | choice_r {k : bool → nondet α} {x : α} : 130 | big_step (k tt) x → big_step (choice k) x 131 | -- there is no case for `fail` 132 | 133 | notation mx `⟹` x := big_step mx x 134 | 135 | /- 3.1 (**optional**). Prove the following lemma. 136 | 137 | The lemma states that `choice` has the semantics of "angelic nondeterminism": If 138 | there is a computational path that leads to some `x`, the `choice` operator will 139 | produce this `x`. -/ 140 | 141 | lemma choice_existential {α : Type} (x : α) (k : bool → nondet α) : 142 | nondet.choice k ⟹ x ↔ ∃b, k b ⟹ x := 143 | sorry 144 | 145 | /- 3.2 (**optional**). Prove the compatibility between denotational and 146 | operational semantics. -/ 147 | 148 | theorem den_op_compat {α : Type} : 149 | ∀(x : α) (mx : nondet α), x ∈ list_sem mx ↔ mx ⟹ x 150 | := sorry 151 | 152 | end nondet 153 | 154 | end LoVe 155 | -------------------------------------------------------------------------------- /lean/love10_denotational_semantics_homework_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Homework 10: Denotational Semantics -/ 2 | 3 | import .love10_denotational_semantics_demo 4 | 5 | namespace LoVe 6 | 7 | /- Denotational semantics are well suited to functional programming. In this 8 | exercise, we will study some representations of functional programs in Lean and 9 | their denotational semantics. -/ 10 | 11 | /- The `nondet` type represents functional programs that can perform 12 | nondeterministic computations: A program can choose between many different 13 | computation paths / return values. Returning no results at all is represented by 14 | `fail`, and nondeterministic choice between two alternatives (identified by the 15 | `bool` values `tt` and `ff`) is represented by `choice`. -/ 16 | 17 | inductive nondet (α : Type) : Type 18 | | pure : α → nondet 19 | | fail {} : nondet 20 | | choice : (bool → nondet) → nondet 21 | 22 | namespace nondet 23 | 24 | 25 | /- Question 1: The `nondet` Monad -/ 26 | 27 | def bind {α β : Type} : nondet α → (α → nondet β) → nondet β 28 | | (pure x) f := f x 29 | | fail f := fail 30 | | (choice k) f := choice (λb, bind (k b) f) 31 | 32 | instance : has_pure nondet := { pure := @pure } 33 | instance : has_bind nondet := { bind := @bind } 34 | 35 | def starts_with : list ℕ → list ℕ → bool 36 | | (x :: xs) [] := ff 37 | | [] ys := tt 38 | | (x :: xs) (y :: ys) := (x = y) && starts_with xs ys 39 | 40 | /- 1.2 (**optional**). Translate the `portmanteau` program from the `list` monad 41 | to the `nondet` monad. -/ 42 | 43 | def nondet_portmanteau : list ℕ → list ℕ → nondet (list ℕ) 44 | | [] ys := fail 45 | | (x :: xs) ys := 46 | choice (λb, if b then (if starts_with (x :: xs) ys then pure ys else fail) 47 | else nondet_portmanteau xs ys >>= λzs, pure (list.cons x zs)) 48 | -- this line could also be `else (list.cons x <$> nondet_portmanteau xs ys)` 49 | 50 | 51 | /- Question 2: Nondeterminism, Denotationally -/ 52 | 53 | def list_sem {α : Type} : nondet α → list α 54 | | (pure x) := [x] 55 | | fail := [] 56 | | (choice k) := list_sem (k ff) ++ list_sem (k tt) 57 | 58 | 59 | /- Question 3 (**optional**). Nondeterminism, Operationally -/ 60 | 61 | /- We can define the following big-step operational semantics for `nondet`: -/ 62 | 63 | inductive big_step {α : Type} : nondet α → α → Prop 64 | | pure {x : α} : 65 | big_step (pure x) x 66 | | choice_l {k : bool → nondet α} {x : α} : 67 | big_step (k ff) x → big_step (choice k) x 68 | | choice_r {k : bool → nondet α} {x : α} : 69 | big_step (k tt) x → big_step (choice k) x 70 | -- there is no case for `fail` 71 | 72 | notation mx `⟹` x := big_step mx x 73 | 74 | /- 3.1 (**optional**). Prove the following lemma. 75 | 76 | The lemma states that `choice` has the semantics of "angelic nondeterminism": If 77 | there is a computational path that leads to some `x`, the `choice` operator will 78 | produce this `x`. -/ 79 | 80 | lemma choice_existential {α : Type} (x : α) (k : bool → nondet α) : 81 | nondet.choice k ⟹ x ↔ ∃b, k b ⟹ x := 82 | begin 83 | apply iff.intro, 84 | { intro h, 85 | cases h, 86 | { use ff, 87 | assumption }, 88 | { use tt, 89 | assumption } }, 90 | { intro h, 91 | cases h, 92 | cases h_w, 93 | { apply big_step.choice_l, 94 | assumption }, 95 | { apply big_step.choice_r, 96 | assumption } } 97 | end 98 | 99 | /- 3.2 (**optional**). Prove the compatibility between denotational and 100 | operational semantics. -/ 101 | 102 | theorem den_op_compat {α : Type} : 103 | ∀(x : α) (mx : nondet α), x ∈ list_sem mx ↔ mx ⟹ x 104 | | x (pure x') := 105 | begin 106 | apply iff.intro, 107 | { intro h, 108 | cases h; 109 | cases h, 110 | exact big_step.pure }, 111 | { intro h, 112 | cases h, 113 | apply iff.elim_right list.mem_singleton, 114 | refl } 115 | end 116 | | x fail := 117 | begin 118 | apply iff.intro; 119 | intro h; 120 | cases h 121 | end 122 | | x (choice k) := 123 | begin 124 | apply iff.intro, 125 | { intro h, 126 | cases iff.elim_left list.mem_append h, 127 | { apply big_step.choice_l, 128 | apply iff.elim_left (den_op_compat x (k ff)), 129 | assumption }, 130 | { apply big_step.choice_r, 131 | apply iff.elim_left (den_op_compat x (k tt)), 132 | assumption } }, 133 | { intro h, 134 | cases h; 135 | apply iff.elim_right list.mem_append, 136 | { apply or.intro_left, 137 | apply iff.elim_right (den_op_compat x (k ff)), 138 | assumption }, 139 | { apply or.intro_right, 140 | apply iff.elim_right (den_op_compat x (k tt)), 141 | assumption } } 142 | end 143 | 144 | end nondet 145 | 146 | end LoVe 147 | -------------------------------------------------------------------------------- /lean/love11_logical_foundations_of_mathematics_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 11: Logical Foundations of Mathematics -/ 2 | 3 | import .love11_logical_foundations_of_mathematics_demo 4 | 5 | namespace LoVe 6 | 7 | universe variable u 8 | 9 | set_option pp.beta true 10 | 11 | 12 | /- Question 1: Subtypes -/ 13 | 14 | namespace my_vector 15 | 16 | /- Recall the definition of vectors from the lecture: -/ 17 | 18 | #check vector 19 | 20 | /- The following function adds two lists of integers elementwise. If one 21 | function is longer than the other, the tail of the longer function is 22 | truncated. -/ 23 | 24 | def list_add : list ℤ → list ℤ → list ℤ 25 | | [] [] := [] 26 | | (x :: xs) (y :: ys) := (x + y) :: list_add xs ys 27 | | [] (y :: ys) := [] 28 | | (x :: xs) [] := [] 29 | 30 | /- 1.1. Show that if the lists have the same length, the resulting list also has 31 | that length. -/ 32 | 33 | lemma length_list_add : 34 | ∀(xs : list ℤ) (ys : list ℤ) (h : list.length xs = list.length ys), 35 | list.length (list_add xs ys) = list.length xs 36 | | [] [] := 37 | sorry 38 | | (x :: xs) (y :: ys) := 39 | sorry 40 | | [] (y :: ys) := 41 | sorry 42 | | (x :: xs) [] := 43 | sorry 44 | 45 | /- 1.2. Define componentwise addition on vectors using `list_add` and 46 | `length_list_add`. -/ 47 | 48 | def add {n : ℕ} : vector ℤ n → vector ℤ n → vector ℤ n := 49 | sorry 50 | 51 | /- 1.3. Show that `list_add` and `add` are commutative. -/ 52 | 53 | lemma list_add_comm : 54 | ∀(xs : list ℤ) (ys : list ℤ), list_add xs ys = list_add ys xs 55 | sorry 56 | 57 | lemma add_comm {n : ℕ} (x y : vector ℤ n) : 58 | add x y = add y x := 59 | sorry 60 | 61 | end my_vector 62 | 63 | 64 | /- Question 2: Integers as Quotients -/ 65 | 66 | /- Recall the construction of integers from the lecture: -/ 67 | 68 | #check myℤ.rel 69 | #check rel_iff 70 | #check myℤ 71 | 72 | /- 2.1. Define negation using `quotient.lift`. -/ 73 | 74 | def neg : myℤ → myℤ := 75 | sorry 76 | 77 | /- 2.2. Prove the following lemmas. -/ 78 | 79 | lemma neg_mk (p n : ℕ) : 80 | neg ⟦(p, n)⟧ = ⟦(n, p)⟧ := 81 | sorry 82 | 83 | lemma myℤ.neg_neg (a : myℤ) : 84 | neg (neg a) = a := 85 | sorry 86 | 87 | 88 | /- Question 3: Nonempty Types -/ 89 | 90 | /- In the lecture, we saw the inductive predicate `nonempty` that states that a 91 | type has at least one element: -/ 92 | 93 | #print nonempty 94 | 95 | /- 3.1. The purpose of this exercise is to think about what would happen if all 96 | types had at least one element. To investigate this, we introduce this fact as 97 | an axiom as follows. Introducing axioms should be generally avoided or done 98 | with great care, since they can easily lead to contradictions, as we will 99 | see. -/ 100 | 101 | axiom sort_nonempty (α : Sort u) : 102 | nonempty α 103 | 104 | /- This axiom gives us a fact `sort_nonempty` without having to prove it. It 105 | resembles a lemma proved by sorry, just without the warning. -/ 106 | 107 | #check sort_nonempty 108 | 109 | /- Prove that this axiom leads to a contradiction, i.e., lets us derive 110 | `false`. -/ 111 | 112 | lemma proof_of_false : false := 113 | sorry 114 | 115 | /- 3.2 (**optional**). Prove that even the following weaker axiom leads to a 116 | contradiction. Of course, you may not use the axiom or the lemma from 3.1. 117 | 118 | Hint: Subtypes can help. -/ 119 | 120 | axiom all_nonempty_Type (α : Type u) : nonempty α 121 | 122 | lemma proof_of_false₂ : false := 123 | sorry 124 | 125 | 126 | /- Question 4 (**optional**): Hilbert Choice -/ 127 | 128 | /- The following command enables noncomputable decidability on every `Prop`. The 129 | `priority 0` attribute ensures this is used only when necessary; otherwise, it 130 | would make some computable definitions noncomputable for Lean. -/ 131 | 132 | local attribute [instance, priority 0] classical.prop_decidable 133 | 134 | /- 4.1 (**optional**). Prove the following lemma. -/ 135 | 136 | lemma exists_minimal_arg.aux (f : ℕ → ℕ) : 137 | ∀x n, f n = x → ∃n, ∀i, f n ≤ f i 138 | | x n eq := 139 | begin 140 | -- this works thanks to `classical.prop_decidable` 141 | by_cases (∃n', f n' < x), 142 | repeat { sorry } 143 | end 144 | 145 | /- Now this interesting lemma falls off: -/ 146 | 147 | lemma exists_minimal_arg (f : ℕ → ℕ) : 148 | ∃n : ℕ, ∀i : ℕ, f n ≤ f i := 149 | exists_minimal_arg.aux f _ 0 rfl 150 | 151 | /- 4.2 (**optional**). Use what you learned in the lecture notes to define the 152 | following function, which returns the (or an) index of the minimal element in 153 | `f`'s image. -/ 154 | 155 | noncomputable def minimal_arg (f : ℕ → ℕ) : ℕ := 156 | sorry 157 | 158 | /- 4.3 (**optional**). Prove the following characteristic lemma about your 159 | definition. -/ 160 | 161 | lemma minimal_arg_spec (f : ℕ → ℕ) : 162 | ∀i : ℕ, f (minimal_arg f) ≤ f i := 163 | sorry 164 | 165 | end LoVe 166 | -------------------------------------------------------------------------------- /lean/love11_logical_foundations_of_mathematics_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 11: Logical Foundations of Mathematics -/ 2 | 3 | import .love11_logical_foundations_of_mathematics_demo 4 | 5 | namespace LoVe 6 | 7 | universe variable u 8 | 9 | set_option pp.beta true 10 | 11 | 12 | /- Question 1: Subtypes -/ 13 | 14 | namespace my_vector 15 | 16 | /- Recall the definition of vectors from the lecture: -/ 17 | 18 | #check vector 19 | 20 | /- The following function adds two lists of integers elementwise. If one 21 | function is longer than the other, the tail of the longer function is 22 | truncated. -/ 23 | 24 | def list_add : list ℤ → list ℤ → list ℤ 25 | | [] [] := [] 26 | | (x :: xs) (y :: ys) := (x + y) :: list_add xs ys 27 | | [] (y :: ys) := [] 28 | | (x :: xs) [] := [] 29 | 30 | /- 1.1. Show that if the lists have the same length, the resulting list also has 31 | that length. -/ 32 | 33 | lemma length_list_add : 34 | ∀(xs : list ℤ) (ys : list ℤ) (h : list.length xs = list.length ys), 35 | list.length (list_add xs ys) = list.length xs 36 | | [] [] := 37 | by simp [list_add] 38 | | (x :: xs) (y :: ys) := 39 | begin 40 | simp [list_add, length], 41 | intro h, 42 | rw length_list_add xs ys h 43 | end 44 | | [] (y :: ys) := 45 | begin 46 | intro h, 47 | cases h 48 | end 49 | | (x :: xs) [] := 50 | begin 51 | intro h, 52 | cases h 53 | end 54 | 55 | /- 1.2. Define componentwise addition on vectors using `list_add` and 56 | `length_list_add`. -/ 57 | 58 | def add {n : ℕ} : vector ℤ n → vector ℤ n → vector ℤ n := 59 | λx y, subtype.mk (list_add (subtype.val x) (subtype.val y)) 60 | begin 61 | rw length_list_add, 62 | { exact subtype.property x }, 63 | { rw [subtype.property x, subtype.property y] } 64 | end 65 | 66 | /- 1.3. Show that `list_add` and `add` are commutative. -/ 67 | 68 | lemma list_add_comm : 69 | ∀(xs : list ℤ) (ys : list ℤ), list_add xs ys = list_add ys xs 70 | | [] [] := by refl 71 | | (x :: xs) (y :: ys) := by simp [list_add]; rw list_add_comm 72 | | [] (y :: ys) := by refl 73 | | (x :: xs) [] := by refl 74 | 75 | lemma add_comm {n : ℕ} (x y : vector ℤ n) : 76 | add x y = add y x := 77 | begin 78 | apply subtype.eq, 79 | simp [add], 80 | apply list_add_comm 81 | end 82 | 83 | end my_vector 84 | 85 | 86 | /- Question 2: Integers as Quotients -/ 87 | 88 | /- Recall the construction of integers from the lecture: -/ 89 | 90 | #check myℤ.rel 91 | #check rel_iff 92 | #check myℤ 93 | 94 | /- 2.1. Define negation using `quotient.lift`. -/ 95 | 96 | def neg : myℤ → myℤ := 97 | quotient.lift (λpn, ⟦(prod.snd pn, prod.fst pn)⟧) 98 | begin 99 | intros a b h, 100 | cases a, 101 | cases b, 102 | apply quotient.sound, 103 | simp [rel_iff] at h ⊢, 104 | linarith 105 | end 106 | 107 | /- 2.2. Prove the following lemmas. -/ 108 | 109 | lemma neg_mk (p n : ℕ) : 110 | neg ⟦(p, n)⟧ = ⟦(n, p)⟧ := 111 | by refl 112 | 113 | lemma myℤ.neg_neg (a : myℤ) : 114 | neg (neg a) = a := 115 | begin 116 | apply quotient.induction_on a, 117 | intro a, 118 | cases a, 119 | simp [neg_mk] 120 | end 121 | 122 | 123 | /- Question 3: Nonempty Types -/ 124 | 125 | /- In the lecture, we saw the inductive predicate `nonempty` that states that a 126 | type has at least one element: -/ 127 | 128 | #print nonempty 129 | 130 | /- 3.1. The purpose of this exercise is to think about what would happen if all 131 | types had at least one element. To investigate this, we introduce this fact as 132 | an axiom as follows. Introducing axioms should be generally avoided or done 133 | with great care, since they can easily lead to contradictions, as we will 134 | see. -/ 135 | 136 | axiom sort_nonempty (α : Sort u) : 137 | nonempty α 138 | 139 | /- This axiom gives us a fact `sort_nonempty` without having to prove it. It 140 | resembles a lemma proved by sorry, just without the warning. -/ 141 | 142 | #check sort_nonempty 143 | 144 | /- Prove that this axiom leads to a contradiction, i.e., lets us derive 145 | `false`. -/ 146 | 147 | lemma proof_of_false : 148 | false := 149 | by exact classical.choice (sort_nonempty false) 150 | 151 | -- alternative proof: 152 | lemma proof_of_false' : 153 | false := 154 | begin 155 | cases sort_nonempty false with h, 156 | exact h 157 | end 158 | 159 | /- 3.2 (**optional**). Prove that even the following weaker axiom leads to a 160 | contradiction. Of course, you may not use the axiom or the lemma from 3.1. 161 | 162 | Hint: Subtypes can help. -/ 163 | 164 | axiom all_nonempty_Type (α : Type u) : 165 | nonempty α 166 | 167 | lemma proof_of_false₂ : false := 168 | begin 169 | let t : Type := {a : ℕ // false}, 170 | have h : nonempty t := sort_nonempty t, 171 | let x : t := classical.choice h, 172 | exact subtype.property x 173 | end 174 | 175 | -- alternative proof: 176 | lemma proof_of_false₂' : false := 177 | begin 178 | let t : Type := {a : ℕ // false}, 179 | cases all_nonempty_Type t with x, 180 | exact subtype.property x 181 | end 182 | 183 | 184 | /- Question 4 (**optional**): Hilbert Choice -/ 185 | 186 | /- The following command enables noncomputable decidability on every `Prop`. The 187 | `priority 0` attribute ensures this is used only when necessary; otherwise, it 188 | would make some computable definitions noncomputable for Lean. -/ 189 | 190 | local attribute [instance, priority 0] classical.prop_decidable 191 | 192 | /- 4.1 (**optional**). Prove the following lemma. -/ 193 | 194 | lemma exists_minimal_arg.aux (f : ℕ → ℕ) : 195 | ∀x n, f n = x → ∃n, ∀i, f n ≤ f i 196 | | x n eq := 197 | begin 198 | -- this works thanks to `classical.prop_decidable` 199 | by_cases (∃n', f n' < x), 200 | { cases h with n' h, 201 | exact exists_minimal_arg.aux _ n' rfl }, 202 | { have h' : ∀n', x ≤ f n', 203 | { intro n', 204 | apply le_of_not_gt _, 205 | intro h', 206 | apply h, 207 | use n', 208 | exact h' }, 209 | apply exists.intro n, 210 | rw eq, 211 | exact h' } 212 | end 213 | 214 | /- Now this interesting lemma falls off: -/ 215 | 216 | lemma exists_minimal_arg (f : ℕ → ℕ) : 217 | ∃n : ℕ, ∀i : ℕ, f n ≤ f i := 218 | exists_minimal_arg.aux f _ 0 rfl 219 | 220 | /- 4.2 (**optional**). Use what you learned in the lecture notes to define the 221 | following function, which returns the (or an) index of the minimal element in 222 | `f`'s image. -/ 223 | 224 | noncomputable def minimal_arg (f : ℕ → ℕ) : ℕ := 225 | classical.some (exists_minimal_arg f) 226 | 227 | /- 4.3 (**optional**). Prove the following characteristic lemma about your 228 | definition. -/ 229 | 230 | lemma minimal_arg_spec (f : ℕ → ℕ) : 231 | ∀i : ℕ, f (minimal_arg f) ≤ f i := 232 | classical.some_spec (exists_minimal_arg f) 233 | 234 | end LoVe 235 | -------------------------------------------------------------------------------- /lean/love12_basic_mathematical_structures_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 12: Basic Mathematical Structures -/ 2 | 3 | import .lovelib 4 | import .love12_basic_mathematical_structures_demo 5 | 6 | namespace LoVe 7 | 8 | set_option pp.beta true 9 | 10 | 11 | /- Question 1: Type Classes -/ 12 | 13 | namespace btree 14 | 15 | /- Recall the datatype `btree` we introduced earlier: -/ 16 | 17 | #check btree 18 | 19 | /- The following function takes two trees and attaches copies of the second 20 | tree to each leaf of the first tree. -/ 21 | 22 | def append {α : Type} : btree α -> btree α -> btree α 23 | | empty y := y 24 | | (node a x₁ x₂) y := node a (append x₁ y) (append x₂ y) 25 | 26 | #reduce append (node 1 empty empty) (node 2 empty empty) 27 | 28 | /- 1.1. Prove the following two lemmas by induction on `x`. -/ 29 | 30 | lemma append_assoc {α : Type} (x y z : btree α) : 31 | append (append x y) z = append x (append y z) := 32 | sorry 33 | 34 | lemma append_empty {α : Type} (x : btree α) : 35 | append x empty = x := 36 | sorry 37 | 38 | /- 1.2. Declare btree an instance of `add_monoid` using `append` as addition 39 | operator. -/ 40 | 41 | #print add_monoid 42 | 43 | instance {α : Type} : add_monoid (btree α) := 44 | sorry 45 | 46 | /- 1.3. Explain why `btree` with `append` as addition cannot be declared an 47 | instance of `add_group`. -/ 48 | 49 | #print add_group 50 | 51 | /- 1.4. (**optional**) Prove the following lemma illustrating why this does not 52 | constitute an `add_group`. -/ 53 | 54 | lemma example_no_inverse : 55 | ∃x : btree ℕ, ∀ y : btree ℕ, append y x ≠ empty := 56 | sorry 57 | 58 | end btree 59 | 60 | 61 | /- Question 2: Multisets and Finsets -/ 62 | 63 | /- Recall the following definitions from the lecture: -/ 64 | 65 | #check nodes_multiset 66 | #check nodes_finset 67 | #check nodes_list 68 | 69 | /- 2.1. Prove that the multiset of nodes does not change when mirroring a tree. 70 | 71 | Hint: Use induction on t and `ac_refl`. -/ 72 | 73 | lemma nodes_multiset_mirror (t : btree ℕ) : 74 | nodes_multiset (mirror t) = nodes_multiset t := 75 | sorry 76 | 77 | /- 2.2. Prove that the finset of nodes does not change when mirroring a tree. 78 | 79 | Hint: Use induction on t and `ac_refl`. -/ 80 | 81 | example (t : btree ℕ) : 82 | nodes_finset (mirror t) = nodes_finset t := 83 | sorry 84 | 85 | /- 2.3. Prove that this does not hold for the list of nodes by providing an 86 | example of a btree `t` for which `nodes_list t ≠ nodes_list (mirror t)`. 87 | 88 | Hint: If you define a suitable counterexample, the proof below will succeed 89 | without modifying it. -/ 90 | 91 | def counterexample : btree ℕ := 92 | sorry 93 | 94 | #reduce nodes_list counterexample 95 | #reduce nodes_list (mirror counterexample) 96 | 97 | example : 98 | ∃t : btree ℕ, nodes_list t ≠ nodes_list (mirror t) := 99 | begin 100 | use counterexample, 101 | exact dec_trivial 102 | end 103 | 104 | end LoVe 105 | -------------------------------------------------------------------------------- /lean/love12_basic_mathematical_structures_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 12: Basic Mathematical Structures -/ 2 | 3 | import .lovelib 4 | import .love12_basic_mathematical_structures_demo 5 | 6 | namespace LoVe 7 | 8 | set_option pp.beta true 9 | 10 | 11 | /- Question 1: Type Classes -/ 12 | 13 | namespace btree 14 | 15 | /- Recall the datatype `btree` we introduced earlier: -/ 16 | 17 | #check btree 18 | 19 | /- The following function takes two trees and attaches copies of the second 20 | tree to each leaf of the first tree. -/ 21 | 22 | def append {α : Type} : btree α -> btree α -> btree α 23 | | empty y := y 24 | | (node a x₁ x₂) y := node a (append x₁ y) (append x₂ y) 25 | 26 | #reduce append (node 1 empty empty) (node 2 empty empty) 27 | 28 | /- 1.1. Prove the following two lemmas by induction on `x`. -/ 29 | 30 | lemma append_assoc {α : Type} (x y z : btree α) : 31 | append (append x y) z = append x (append y z) := 32 | begin 33 | induction x, 34 | case btree.empty { 35 | refl }, 36 | case btree.node : a x₁ x₂ ih₁ ih₂ { 37 | simp [append, ih₁, ih₂] } 38 | end 39 | 40 | lemma append_empty {α : Type} (x : btree α) : 41 | append x empty = x := 42 | begin 43 | induction x, 44 | case btree.empty { 45 | refl }, 46 | case btree.node : a x₁ x₂ ih₁ ih₂ { 47 | simp [append, ih₁, ih₂] } 48 | end 49 | 50 | /- 1.2. Declare btree an instance of `add_monoid` using `append` as addition 51 | operator. -/ 52 | 53 | #print add_monoid 54 | 55 | instance {α : Type} : add_monoid (btree α) := 56 | { add := append, 57 | add_assoc := append_assoc, 58 | zero := empty, 59 | add_zero := append_empty, 60 | zero_add := begin intro x, refl end } 61 | 62 | /- 1.3. Explain why `btree` with `append` as addition cannot be declared an 63 | instance of `add_group`. -/ 64 | 65 | #print add_group 66 | 67 | /- If `t₁` is a non-empty tree, `append t₁ t₂` will always yield a nonempty 68 | tree. Therefore, there is no inverse of a non-empty tree. No matter what we 69 | choose `neg` to be, we will not able to prove `add_left_neg`. -/ 70 | 71 | /- 1.4. (**optional**) Prove the following lemma illustrating why this does not 72 | constitute an `add_group`. -/ 73 | 74 | lemma example_no_inverse : 75 | ∃x : btree ℕ, ∀ y : btree ℕ, append y x ≠ empty := 76 | begin 77 | use node 0 empty empty, 78 | intros y hy, 79 | cases y, 80 | cases hy, 81 | cases hy 82 | end 83 | 84 | end btree 85 | 86 | 87 | /- Question 2: Multisets and Finsets -/ 88 | 89 | /- Recall the following definitions from the lecture: -/ 90 | 91 | #check nodes_multiset 92 | #check nodes_finset 93 | #check nodes_list 94 | 95 | /- 2.1. Prove that the multiset of nodes does not change when mirroring a tree. 96 | 97 | Hint: Use induction on t and `ac_refl`. -/ 98 | 99 | lemma nodes_multiset_mirror (t : btree ℕ) : 100 | nodes_multiset (mirror t) = nodes_multiset t := 101 | begin 102 | induction t with a t₁ t₂ h₁ h₂, 103 | refl, 104 | rw nodes_multiset, 105 | rw mirror, 106 | rw [←h₁, ←h₂], 107 | rw nodes_multiset, 108 | ac_refl 109 | end 110 | 111 | /- 2.2. Prove that the finset of nodes does not change when mirroring a tree. 112 | 113 | Hint: Use induction on t and `ac_refl`. -/ 114 | 115 | example (t : btree ℕ) : 116 | nodes_finset (mirror t) = nodes_finset t := 117 | begin 118 | induction t with a t₁ t₂ h₁ h₂, 119 | refl, 120 | rw nodes_finset, 121 | rw mirror, 122 | rw [←h₁, ←h₂], 123 | rw nodes_finset, 124 | ac_refl 125 | end 126 | 127 | /- 2.3. Prove that this does not hold for the list of nodes by providing an 128 | example of a btree `t` for which `nodes_list t ≠ nodes_list (mirror t)`. 129 | 130 | Hint: If you define a suitable counterexample, the proof below will succeed 131 | without modifying it. -/ 132 | 133 | def counterexample : btree ℕ := 134 | node 0 (node 1 empty empty) (node 2 empty empty) 135 | 136 | #reduce nodes_list counterexample 137 | #reduce nodes_list (mirror counterexample) 138 | 139 | example : 140 | ∃t : btree ℕ, nodes_list t ≠ nodes_list (mirror t) := 141 | begin 142 | use counterexample, 143 | exact dec_trivial 144 | end 145 | 146 | end LoVe 147 | -------------------------------------------------------------------------------- /lean/love13_rational_and_real_numbers_exercise_sheet.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 13: Rational and Real Numbers -/ 2 | 3 | import .love05_inductive_predicates_demo 4 | import .love13_rational_and_real_numbers_demo 5 | 6 | namespace LoVe 7 | 8 | set_option pp.beta true 9 | 10 | 11 | /- Question 1: Rationals -/ 12 | 13 | /- 1.1. Prove the following lemma. 14 | 15 | Hint: The lemma `fraction.mk.inj_eq` might be useful. -/ 16 | 17 | #check fraction.mk.inj_eq 18 | 19 | lemma fraction.ext (a b : fraction) (h : fraction.num a = fraction.num b) 20 | (h': fraction.denom a = fraction.denom b) : 21 | a = b := 22 | sorry 23 | 24 | /- 1.2. Extending the `fraction.has_mul` instance from the lecture, declare 25 | `fraction` an instance of `semigroup`. 26 | 27 | Hint: Use the lemma `fraction.ext` above, and possibly `fraction.mul_num`, and 28 | `fraction.mul_denom`. -/ 29 | 30 | #check fraction.ext 31 | #check fraction.mul_num 32 | #check fraction.mul_denom 33 | 34 | instance fraction.semigroup : semigroup fraction := 35 | { mul_assoc := 36 | sorry, 37 | ..fraction.has_mul } 38 | 39 | /- 1.3. Extending the `myℚ.has_mul` instance from the lecture, declare `myℚ` an 40 | instance of `semigroup`. 41 | 42 | Hint: The lemma `quotient.induction_on₃` might be useful. -/ 43 | 44 | #check quotient.induction_on₃ 45 | 46 | instance myℚ.semigroup : semigroup myℚ := 47 | { mul_assoc := 48 | sorry, 49 | ..myℚ.has_mul } 50 | 51 | 52 | /- Question 2: Structural Induction on Paper -/ 53 | 54 | /- This and the next question will exercise your understanding of induction, 55 | especially if you need to perform induction proofs on a whiteboard (or on paper 56 | at the exam). 57 | 58 | Guidelines for paper proofs: 59 | 60 | We expect detailed, rigorous, mathematical proofs. You are welcome to use 61 | standard mathematical notation or Lean structured commands (e.g., `assume`, 62 | `have`, `show`, `calc`). You can also use tactical proofs (e.g., `intro`, 63 | `apply`), but then please indicate some of the intermediate goals, so that we 64 | can follow the chain of reasoning. 65 | 66 | Major proof steps, including applications of induction and invocation of the 67 | induction hypothesis, must be stated explicitly. For each case of a proof by 68 | induction, you must list the inductive hypotheses assumed (if any) and the goal 69 | to be proved. Minor proof steps corresponding to `refl`, `simp`, or `linarith` 70 | need not be justified if you think they are obvious (to humans), but you should 71 | say which key lemmas they follow from. You should be explicit whenever you use a 72 | function definition or an introduction rule for an inductive predicate. -/ 73 | 74 | /- 2.1. Recall the following inductive datatype for binary trees from lecture 4: 75 | 76 | inductive btree (α : Type) : Type 77 | | empty {} : btree 78 | | node : α → btree → btree → btree 79 | 80 | We defined a function `mirror` on these binary trees as follows: 81 | 82 | def mirror {α : Type} : btree α → btree α 83 | | empty := empty 84 | | (node a l r) := node a (mirror r) (mirror l) 85 | 86 | Prove the following lemma by structural induction, as a paper proof: 87 | 88 | lemma mirror_mirror {α : Type} : 89 | ∀t : btree α, mirror (mirror t) = t -/ 90 | 91 | -- enter your "paper" proof here 92 | 93 | /- 2.2. Prove the same lemma in Lean and compare it with your paper proof. -/ 94 | 95 | lemma mirror_mirror₂ {α : Type} : 96 | ∀t : btree α, mirror (mirror t) = t := 97 | sorry 98 | 99 | 100 | /- Question 3: Rule Induction on Paper -/ 101 | 102 | /- 3.1. Recall the following inductive predicate from lecture 5: 103 | 104 | inductive even : ℕ → Prop 105 | | zero : even 0 106 | | add_two : ∀n, even n → even (n + 2) 107 | 108 | Prove the following lemma by rule induction, as a paper proof, following the 109 | guidelines given at the beginning of question 2. This is a good exercise to 110 | develop a deeper understanding of how rule induction works (and is good 111 | practice for the final exam). 112 | 113 | lemma exists_of_even (n : ℕ) (h : even n) : 114 | ∃k : ℕ, n = 2 * k -/ 115 | 116 | -- enter your "paper" proof here 117 | 118 | /- 3.2. Prove the same lemma in Lean and compare it with your paper proof. -/ 119 | 120 | lemma exists_of_even (n : ℕ) (h : even n) : 121 | ∃k : ℕ, n = 2 * k := 122 | sorry 123 | 124 | end LoVe 125 | -------------------------------------------------------------------------------- /lean/love13_rational_and_real_numbers_exercise_solution.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Exercise 13: Rational and Real Numbers -/ 2 | 3 | import .love05_inductive_predicates_demo 4 | import .love13_rational_and_real_numbers_demo 5 | 6 | namespace LoVe 7 | 8 | set_option pp.beta true 9 | 10 | 11 | /- Question 1: Rationals -/ 12 | 13 | /- 1.1. Prove the following lemma. 14 | 15 | Hint: The lemma `fraction.mk.inj_eq` might be useful. -/ 16 | 17 | #check fraction.mk.inj_eq 18 | 19 | lemma fraction.ext (a b : fraction) (h : fraction.num a = fraction.num b) 20 | (h': fraction.denom a = fraction.denom b) : 21 | a = b := 22 | begin 23 | cases a, 24 | cases b, 25 | rw fraction.mk.inj_eq, 26 | exact and.intro h h' 27 | end 28 | 29 | /- 1.2. Extending the `fraction.has_mul` instance from the lecture, declare 30 | `fraction` an instance of `semigroup`. 31 | 32 | Hint: Use the lemma `fraction.ext` above, and possibly `fraction.mul_num`, and 33 | `fraction.mul_denom`. -/ 34 | 35 | #check fraction.ext 36 | #check fraction.mul_num 37 | #check fraction.mul_denom 38 | 39 | instance fraction.semigroup : semigroup fraction := 40 | { mul_assoc := 41 | begin 42 | intros, 43 | apply fraction.ext, 44 | repeat { 45 | simp [fraction.mul_num, fraction.mul_denom], 46 | ac_refl } 47 | end, 48 | ..fraction.has_mul } 49 | 50 | /- 1.3. Extending the `myℚ.has_mul` instance from the lecture, declare `myℚ` an 51 | instance of `semigroup`. 52 | 53 | Hint: The lemma `quotient.induction_on₃` might be useful. -/ 54 | 55 | #check quotient.induction_on₃ 56 | 57 | instance myℚ.semigroup : semigroup myℚ := 58 | { mul_assoc := 59 | begin 60 | intros a b c, 61 | apply quotient.induction_on₃ a b c, 62 | intros x y z, 63 | apply quotient.sound, 64 | rw mul_assoc, 65 | end, 66 | ..myℚ.has_mul } 67 | 68 | 69 | /- Question 2: Structural Induction on Paper -/ 70 | 71 | /- This and the next question will exercise your understanding of induction, 72 | especially if you need to perform induction proofs on a whiteboard (or on paper 73 | at the exam). 74 | 75 | Guidelines for paper proofs: 76 | 77 | We expect detailed, rigorous, mathematical proofs. You are welcome to use 78 | standard mathematical notation or Lean structured commands (e.g., `assume`, 79 | `have`, `show`, `calc`). You can also use tactical proofs (e.g., `intro`, 80 | `apply`), but then please indicate some of the intermediate goals, so that we 81 | can follow the chain of reasoning. 82 | 83 | Major proof steps, including applications of induction and invocation of the 84 | induction hypothesis, must be stated explicitly. For each case of a proof by 85 | induction, you must list the inductive hypotheses assumed (if any) and the goal 86 | to be proved. Minor proof steps corresponding to `refl`, `simp`, or `linarith` 87 | need not be justified if you think they are obvious (to humans), but you should 88 | say which key lemmas they follow from. You should be explicit whenever you use a 89 | function definition or an introduction rule for an inductive predicate. -/ 90 | 91 | /- 2.1. Recall the following inductive datatype for binary trees from lecture 4: 92 | 93 | inductive btree (α : Type) : Type 94 | | empty {} : btree 95 | | node : α → btree → btree → btree 96 | 97 | We defined a function `mirror` on these binary trees as follows: 98 | 99 | def mirror {α : Type} : btree α → btree α 100 | | empty := empty 101 | | (node a l r) := node a (mirror r) (mirror l) 102 | 103 | Prove the following lemma by structural induction, as a paper proof: 104 | 105 | lemma mirror_mirror {α : Type} : 106 | ∀t : btree α, mirror (mirror t) = t -/ 107 | 108 | /- We perform the proof by structural induction on `t`. 109 | 110 | Case `empty`: The goal is `mirror (mirror empty) = empty`. This holds by the 111 | definition of `mirror`. 112 | 113 | Case `node a l r`: The induction hyptheses are `mirror (mirror l) = l` and 114 | `mirror (mirror r) = r`. The goal is 115 | `mirror (mirror (node a l r)) = node a l r`. 116 | 117 | We have: 118 | 119 | mirror (mirror (node a l r)) 120 | = mirror (node a (mirror r) (mirror l)) -- by definition of mirror 121 | = node a (mirror (mirror l)) (mirror (mirror r))) -- by definition of mirror 122 | = node a l r -- by the IHs 123 | 124 | QED -/ 125 | 126 | /- 2.2. Prove the same lemma in Lean and compare it with your paper proof. -/ 127 | 128 | lemma mirror_mirror₂ {α : Type} : 129 | ∀t : btree α, mirror (mirror t) = t := 130 | begin 131 | intro t, 132 | induction t with a l r ihl ihr, 133 | { rw mirror, 134 | rw mirror }, 135 | { rw mirror, 136 | rw mirror, 137 | rw ihl, 138 | rw ihr } 139 | end 140 | 141 | 142 | /- Question 3: Rule Induction on Paper -/ 143 | 144 | /- 3.1. Recall the following inductive predicate from lecture 5: 145 | 146 | inductive even : ℕ → Prop 147 | | zero : even 0 148 | | add_two : ∀n, even n → even (n + 2) 149 | 150 | Prove the following lemma by rule induction, as a paper proof, following the 151 | guidelines given at the beginning of question 2. This is a good exercise to 152 | develop a deeper understanding of how rule induction works (and is good 153 | practice for the final exam). 154 | 155 | lemma exists_of_even (n : ℕ) (h : even n) : 156 | ∃k : ℕ, n = 2 * k -/ 157 | 158 | /- We perform the proof by rule induction on `h`. 159 | 160 | Case `zero`: The goal is `∃k : ℕ, 0 = 2 * k`. We use k = 0. The remaining goal 161 | is `0 = 2 * 0`, which is obviously true. 162 | 163 | Case `add_two`: The induction hypothesis is `∃k : ℕ, n = 2 * k`. The goal is 164 | `∃k : ℕ, n + 2 = 2 * k`. 165 | 166 | The induction hypothesis gives us a number `k`, such that `n = 2 * k`. We use 167 | `k + 1` to instantiate the existential quantifier in the goal. This yields the 168 | goal `n + 2 = 2 * (k + 1)`. 169 | 170 | Using the fact `n = 2 * k`, we can rewrite the goal into 171 | `(2 * k) + 2 = 2 * (k + 1)`, which is obviously true. QED -/ 172 | 173 | /- 3.2. Prove the same lemma in Lean and compare it with your paper proof. -/ 174 | 175 | lemma exists_of_even (n : ℕ) (h : even n) : 176 | ∃k : ℕ, n = 2 * k := 177 | begin 178 | induction h with n h ih, 179 | { use 0, 180 | refl }, 181 | { cases ih with k hk, 182 | use k + 1, 183 | rw hk, 184 | refl } 185 | end 186 | 187 | end LoVe 188 | -------------------------------------------------------------------------------- /lean/lovelib.lean: -------------------------------------------------------------------------------- 1 | /- LoVe Library -/ 2 | 3 | import tactic.explode 4 | import tactic.find 5 | import tactic.linarith 6 | import tactic.rewrite 7 | import tactic.tidy 8 | import tactic.where 9 | import logic.basic 10 | import algebra 11 | import order 12 | import data.real.basic 13 | 14 | namespace LoVe 15 | 16 | /- Options -/ 17 | 18 | set_option pp.beta true 19 | 20 | 21 | /- Logical connectives -/ 22 | 23 | attribute [pattern] or.intro_left or.intro_right 24 | 25 | meta def tactic.dec_trivial := `[exact dec_trivial] 26 | 27 | @[simp] lemma not_not_iff (a : Prop) [decidable a] : ¬¬ a ↔ a := 28 | by by_cases a; simp [h] 29 | 30 | @[simp] lemma and_imp_distrib (a b c : Prop) : (a ∧ b → c) ↔ (a → b → c) := 31 | iff.intro 32 | (assume h ha hb, h ⟨ha, hb⟩) 33 | (assume h ⟨ha, hb⟩, h ha hb) 34 | 35 | @[simp] lemma or_imp_distrib {a b c : Prop} : a ∨ b → c ↔ (a → c) ∧ (b → c) := 36 | iff.intro 37 | (assume h, 38 | ⟨assume ha, h (or.intro_left _ ha), assume hb, h (or.intro_right _ hb)⟩) 39 | (assume ⟨ha, hb⟩ h, match h with or.inl h := ha h | or.inr h := hb h end) 40 | 41 | @[simp] lemma exists_imp_distrib {α : Sort*} {p : α → Prop} {a : Prop} : 42 | ((∃x, p x) → a) ↔ (∀x, p x → a) := 43 | iff.intro 44 | (assume h hp ha, h ⟨hp, ha⟩) 45 | (assume h ⟨hp, ha⟩, h hp ha) 46 | 47 | lemma and_exists {α : Sort*} {p : α → Prop} {a : Prop} : 48 | (a ∧ (∃x, p x)) ↔ (∃x, a ∧ p x) := 49 | iff.intro 50 | (assume ⟨ha, x, hp⟩, ⟨x, ha, hp⟩) 51 | (assume ⟨x, ha, hp⟩, ⟨ha, x, hp⟩) 52 | 53 | @[simp] lemma exists_false {α : Sort*} : (∃x : α, false) ↔ false := 54 | iff.intro (assume ⟨a, f⟩, f) (assume h, h.elim) 55 | 56 | 57 | /- Reflexive transitive closure of a relation -/ 58 | 59 | inductive refl_trans {α : Sort*} (r : α → α → Prop) (a : α) : α → Prop 60 | | refl {} : refl_trans a 61 | | tail {b c} : refl_trans b → r b c → refl_trans c 62 | 63 | attribute [refl] refl_trans.refl 64 | 65 | namespace refl_trans 66 | 67 | variables {α : Sort*} {r : α → α → Prop} {a b c d : α} 68 | 69 | @[trans] lemma trans (hab : refl_trans r a b) (hbc : refl_trans r b c) : 70 | refl_trans r a c := 71 | begin 72 | induction hbc, 73 | case refl_trans.refl { assumption }, 74 | case refl_trans.tail : c d hbc hcd hac { exact hac.tail hcd } 75 | end 76 | 77 | lemma single (hab : r a b) : refl_trans r a b := 78 | refl.tail hab 79 | 80 | lemma head (hab : r a b) (hbc : refl_trans r b c) : refl_trans r a c := 81 | begin 82 | induction hbc, 83 | case refl_trans.refl { exact refl.tail hab }, 84 | case refl_trans.tail : c d hbc hcd hac { exact hac.tail hcd } 85 | end 86 | 87 | lemma head_induction_on {α : Sort*} {r : α → α → Prop} {b : α} 88 | {P : ∀a : α, refl_trans r a b → Prop} {a : α} (h : refl_trans r a b) 89 | (refl : P b refl) 90 | (head : ∀{a c} (h' : r a c) (h : refl_trans r c b), P c h → P a (h.head h')) : 91 | P a h := 92 | begin 93 | induction h generalizing P, 94 | case refl_trans.refl { exact refl }, 95 | case refl_trans.tail : b c hab hbc ih { 96 | apply ih, 97 | show P b _, from head hbc _ refl, 98 | show ∀a a', r a a' → refl_trans r a' b → P a' _ → P a _, 99 | from assume a a' hab hbc, head hab _ } 100 | end 101 | 102 | lemma trans_induction_on {α : Sort*} {r : α → α → Prop} 103 | {P : ∀{a b : α}, refl_trans r a b → Prop} 104 | {a b : α} (h : refl_trans r a b) 105 | (ih₁ : ∀a, @P a a refl) 106 | (ih₂ : ∀{a b} (h : r a b), P (single h)) 107 | (ih₃ : ∀{a b c} (h₁ : refl_trans r a b) (h₂ : refl_trans r b c), P h₁ → P h₂ → 108 | P (h₁.trans h₂)) : 109 | P h := 110 | begin 111 | induction h, 112 | case refl_trans.refl { exact ih₁ a }, 113 | case refl_trans.tail : b c hab hbc ih { 114 | exact ih₃ hab (single hbc) ih (ih₂ hbc) } 115 | end 116 | 117 | lemma lift {β : Sort*} {p : β → β → Prop} (f : α → β) 118 | (h : ∀a b, r a b → p (f a) (f b)) (hab : refl_trans r a b) : 119 | refl_trans p (f a) (f b) := 120 | hab.trans_induction_on 121 | (assume a, refl) 122 | (assume a b, single ∘ h _ _) 123 | (assume a b c _ _, trans) 124 | 125 | lemma mono {p : α → α → Prop} : 126 | (∀a b, r a b → p a b) → refl_trans r a b → refl_trans p a b := 127 | lift id 128 | 129 | lemma refl_trans_refl_trans_eq : refl_trans (refl_trans r) = refl_trans r := 130 | funext $ assume a, funext $ assume b, propext $ 131 | iff.intro 132 | (assume h, begin induction h, { refl }, { transitivity; assumption } end) 133 | (refl_trans.mono (assume a b, single)) 134 | 135 | end refl_trans 136 | 137 | 138 | /- States -/ 139 | 140 | def state := string → ℕ 141 | 142 | def state.update (name : string) (val : ℕ) (s : state) : 143 | state := 144 | λname', if name' = name then val else s name' 145 | 146 | notation s `{` name ` ↦ ` val `}` := state.update name val s 147 | 148 | instance : has_emptyc state := ⟨λ_, 0⟩ 149 | 150 | @[simp] lemma update_apply (name : string) (val : ℕ) 151 | (s : state) : s{name ↦ val} name = val := 152 | if_pos rfl 153 | 154 | @[simp] lemma update_apply_ne (name name' : string) (val : ℕ) 155 | (s : state) (h : name' ≠ name . tactic.dec_trivial) : 156 | s{name ↦ val} name' = s name' := 157 | if_neg h 158 | 159 | @[simp] lemma update_override (name : string) (val₁ val₂ : ℕ) 160 | (s : state) : s{name ↦ val₂}{name ↦ val₁} = s{name ↦ val₁} := 161 | begin 162 | apply funext, 163 | intro name', 164 | by_cases name' = name; 165 | simp [h] 166 | end 167 | 168 | @[simp] lemma update_swap (name₁ name₂ : string) (val₁ val₂ : ℕ) 169 | (s : state) (h : name₁ ≠ name₂ . tactic.dec_trivial) : 170 | s{name₂ ↦ val₂}{name₁ ↦ val₁} = 171 | s{name₁ ↦ val₁}{name₂ ↦ val₂} := 172 | begin 173 | apply funext, 174 | intro name', 175 | by_cases name' = name₁; 176 | by_cases name' = name₂; 177 | simp * at * 178 | end 179 | 180 | @[simp] lemma update_id (name : string) (s : state) : 181 | s{name ↦ s name} = s := 182 | begin 183 | apply funext, 184 | intro name', 185 | by_cases name' = name; 186 | simp * at * 187 | end 188 | 189 | example (s : state) : 190 | s{"a" ↦ 0}{"a" ↦ 2} = s{"a" ↦ 2} := 191 | by simp 192 | 193 | example (s : state) : 194 | s{"a" ↦ 0}{"b" ↦ 2} = s{"b" ↦ 2}{"a" ↦ 0} := 195 | by simp 196 | 197 | example (s : state) : 198 | s{"a" ↦ s "a"}{"b" ↦ 0} = s{"b" ↦ 0} := 199 | by simp 200 | 201 | end LoVe 202 | -------------------------------------------------------------------------------- /leanpkg.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "logical_verification_2019" 3 | version = "1.0" 4 | lean_version = "3.4.2" 5 | path = "lean" 6 | 7 | [dependencies] 8 | mathlib = {git = "https://github.com/leanprover-community/mathlib", rev = "81a31ca4a8c0287bf0b0ce40f1a0df16543b7abe"} -------------------------------------------------------------------------------- /logical_verification_in_lean.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blanchette/logical_verification_2019/7c5f1c90f6e5a0b221a02c8fa4e36c0c66393036/logical_verification_in_lean.pdf -------------------------------------------------------------------------------- /md/love02_tactical_proofs.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 2: Tactical Proofs 2 | 3 | We see how to prove Lean lemmas using tactics, and we review the most important Lean tactics 4 | 5 | 6 | ## Tactic Mode 7 | 8 | A tactic operates on a proof goal and either solves it or creates new subgoals 9 | 10 | Tactics are a _backward_ (or _bottom-up_) proof mechanism: they start from the goal and break it down 11 | 12 | Multiple tactics in sequence: `begin` _tactic1_`,` …`,` _tacticN_ `end` 13 | 14 | Terminal tactic invocation: `by` _tactic_ 15 | 16 | Tactic composition: _tactic1_ `;` _tactic2_, where _tactic2_ is applied to all subgoals emerging from _tactic1_ 17 | 18 | The `{` … `}` combinator focuses on the first subgoal; the tactic inside must (fully) solve it 19 | 20 | 21 | ## Demo 22 | 23 | [`love02_tactical_proofs_demo.lean`](../lean/love02_tactical_proofs_demo.lean) 24 | 25 | 26 | ## Basic Tactics 27 | 28 | ### `intro`(`s`) 29 | 30 | * `intro` [_name_] 31 | * `intros` [_name1_ … _nameN_] 32 | 33 | Moves `∀`-quantified variables, or the assumptions of implications `→`, from the goal into the goal's hypotheses 34 | 35 | ### `apply` 36 | 37 | * `apply` _lemma_ 38 | 39 | Matches the goal's conclusion with the conclusion of the specified lemma and adds the lemma's hypotheses as new goals 40 | 41 | ### `exact` 42 | 43 | * `exact` _lemma_ 44 | 45 | Matches the goal's conclusion with the specified lemma, closing the goal 46 | 47 | We can often use `apply` in such situations, but `exact` communicates our intentions better 48 | 49 | ### `assumption` 50 | 51 | Finds a hypothesis from the local context that matches the goal's conclusion and applies it to solve the goal 52 | 53 | ### `refl` 54 | 55 | Proves `l = r`, where the two sides are equal up to computation 56 | 57 | Computation means unfolding of definitions, β-reduction (application of λ to an argument), projections, `let`, and more 58 | 59 | The calculus of inductive constructions is a _computational logic_: logical formulas have computational content 60 | 61 | ### `ac_refl` 62 | 63 | Proves `l = r`, where the two sides are equal up to associativity and commutativity 64 | 65 | This works for binary operations that are registered as associative and commutative, e.g., `+` and `*` on `ℕ` 66 | 67 | ### `use` 68 | 69 | * `use` [_term_] 70 | 71 | Allows us to supply a witness for an existential quantifier 72 | 73 | 74 | ## Rewriting Tactics 75 | 76 | The tactics below take an optional _position_ as argument: 77 | 78 | * `at` `⊢`: applies to the conclusion 79 | * `at` _hypothesis1_ … _hypothesisN_: applies to the specified hypotheses 80 | * `at` `*`: applies to all possible hypotheses and to the conclusion 81 | 82 | ### `rw` 83 | 84 | * `rw` _lemma_ [`at` _position_] 85 | 86 | Applies a single equation as a left-to-right rewrite rule, once 87 | 88 | To apply an equation right-to-left, prefix its name with `←` 89 | 90 | * `rw` `[`_lemma1_`,` …`, `_lemmaN_`]` [`at` _position_] 91 | 92 | Abbreviates `rw` _lemma1_`,` …`,` `rw` _lemmaN_ 93 | 94 | ### `simp` 95 | 96 | * `simp` [`at` _position_] 97 | 98 | Applies a standard set of rewrite rules (the _simp set_) exhaustively 99 | 100 | The set can be extended using the `@[simp]` attribute 101 | 102 | It is generally more powerful than `rw` because it can rewrite terms containing variables bound by `λ`, `∀`, `∃`, etc. 103 | 104 | * `simp` `[`_lemma1_`,` …`,`_lemmaN_`]` [`at` _position_] 105 | 106 | Same as above, except that the specified lemmas are temporarily added to the simp set 107 | 108 | `*` (as a lemma name) represents all local hypotheses 109 | 110 | `-` in front of a lemma name temporarily removes the lemma from the simp set 111 | 112 | ### `dunfold` 113 | 114 | * `dunfold` _constant1_ … _constantN_ [`at` _position_] 115 | 116 | Expands the definition of one or more constants that are specified without pattern matching (e.g., `not`) 117 | 118 | 119 | ## Induction Tactic 120 | 121 | ### `induction` 122 | 123 | * `induction` _variable_ 124 | 125 | Performs structural induction on the specified variable 126 | 127 | Gives rise to as many subgoals as there are constructors in the definition of the variable's type 128 | 129 | Induction hypotheses are available as hypotheses in the subgoals corresponding to recursive constructors (e.g., `nat.succ`) 130 | 131 | 132 | ## Goal Management Tactics 133 | 134 | ### `rename` 135 | 136 | * `rename` _constant-or-hypothesis_ _new-name_ 137 | 138 | Renames a local constant or hypothesis 139 | 140 | ### `clear` 141 | 142 | * `clear` _constant-or-hypothesis1_ … _constant-or-hypothesisN_ 143 | 144 | Removes the specified local constants and hypotheses, as long as they are not used anywhere else in the goal 145 | 146 | 147 | ### `revert` 148 | 149 | * `revert` _constant-or-hypothesis1_ … _constant-or-hypothesisN_ 150 | 151 | Performs the opposite of `intros`: Moves the specified local constants and hypotheses into the goal’s conclusion using universal quantification (`∀`) for constants and implication (`→`) for hypotheses 152 | 153 | 154 | ## Hints on How to Write Backward Proofs "Mindlessly" 155 | 156 | For logic puzzles, we advocate a "mindless", "video game" style of backward reasoning that relies mostly on `intro`(`s`) and `apply` 157 | 158 | Some heuristics that often work: 159 | 160 | * If the goal's conclusion is an implication `φ → ψ`, invoke `intro hφ` to move `φ` into your hypotheses: `… (hφ : φ) ⊢ ψ` 161 | 162 | * If the goal's conclusion is a universal quantification `∀x : σ, φ`, invoke `intro x` to move it into the local context: `… (x : σ) ⊢ ψ` 163 | 164 | * Otherwise, look for a hypothesis or a lemma whose conclusion has the same shape as the goal's conclusion (possibly containing variables that can be matched against the goal), and `apply` it; for example, if the goal's conclusion is `⊢ ψ` and you have a hypothesis `hφψ : φ → ψ`, try `apply hφψ` 165 | 166 | * A negated goal `⊢ ¬ φ` is definitionally equal to `⊢ φ → false`, so you can invoke `intro hφ` to produce the subgoal `hφ : φ ⊢ false`; expanding negation's definition by invoking `dunfold not` is often a good strategy 167 | 168 | * Sometimes you can make progress by replacing the goal by `false`, by entering `apply false.elim`; as next step, you would typically `apply` a hypothesis of the form `φ → false` or `¬ φ` 169 | 170 | * When you face several choices (e.g., between `or.intro_left` and `or.intro_right`), remember which choices you have made, and backtrack when you reach a dead end or have the impression you are not making any progress 171 | 172 | * If you have difficulties carrying out a proof, it can be a good idea to check whether the goal actually is provable under the given assumptions; be also aware that even if you started with a provable lemma statement, it is possible that the goal is not provable (e.g., if you used "unsafe" rules such as `or.intro_left`) 173 | 174 | It is hard to teach some of these things in class; there is no better way for you to learn this than by doing it, hence the importance of the exercises 175 | -------------------------------------------------------------------------------- /md/love04_functional_programming.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 4: Functional Programming 2 | 3 | ## Inductive Types 4 | 5 | Recall the definition of type `nat` (= `ℕ`): 6 | 7 | inductive nat : Type 8 | | zero : nat 9 | | succ : nat → nat 10 | 11 | Mottos: 12 | 13 | * **No junk**: The type contains no values beyond those expressible using the constructors 14 | 15 | * **No confusion**: Values built in a different ways are different 16 | 17 | For `nat` (= `ℕ`): 18 | 19 | * "No junk" means that there exist no special values like, say, –1 or ε, that cannot be expressed using a finite combination of `zero` and `succ` 20 | 21 | * "No confusion" is what ensures that `zero` ≠ `succ x` 22 | 23 | In addition, inductive types are always finite; `succ (succ (succ …))` is not a value, because there exists no `x` such that `x = succ x` 24 | 25 | 26 | ## Example: Lists 27 | 28 | An inductive polymorphic type constructed from `nil` and `cons`: 29 | 30 | inductive list (α : Type) : Type 31 | | nil {} : list 32 | | cons : α → list → list 33 | 34 | Aliases: 35 | 36 | > `[]` = `nil` 37 | 38 | > _x_ `::` _xs_ = `cons` _x_ _xs_ 39 | 40 | > `[` _x1_, … _xN_ `]` = `cons` _x1_ … (`cons` _xN_ `nil`)…) 41 | 42 | Primitive recursion: 43 | 44 | def f : list α → … 45 | | [] := … 46 | | (x :: xs) := … x … xs … (f xs) … 47 | 48 | Structural induction: 49 | 50 | lemma l : 51 | ∀x : list α, … 52 | | [] := … 53 | | (x :: xs) := … x … xs … (l xs) … 54 | 55 | Pattern matching: 56 | 57 | match xs with 58 | | [] := … 59 | | x :: xs := … 60 | end 61 | 62 | 63 | ## General Pattern Maching within Terms 64 | 65 | > `match` _term1_`,` …`,` _termM_ `with` 66 | > 67 | > `|` _pattern11_`,` …`,` _pattern1M_ `:=` _result1_ 68 | > 69 | > ⋮ 70 | > 71 | > `|` _patternN1_`,` …`,` _patternNM_ `:=` _resultN_ 72 | > 73 | > `end` 74 | 75 | `match` allows nonrecursive pattern matching in terms 76 | 77 | Example: 78 | 79 | match n, xs with 80 | | 0, _ := … 81 | | n + 1, [] := … 82 | | n + 1, x :: xs := … 83 | end 84 | 85 | In contrast to pattern matching after `lemma` or `def`, the patterns are separated by commas (`,`), so parentheses are optional 86 | 87 | 88 | ## Example: Trees 89 | 90 | Inductive types with constructors taking several recursive arguments define tree-like objects 91 | 92 | _Binary trees_ have nodes with at most two children 93 | 94 | Example: 95 | 96 | inductive btree (α : Type) : Type 97 | | empty {} : btree 98 | | node : α → btree → btree → btree 99 | 100 | The type `aexp` of arithmetic expressions was also an example of a tree data structure 101 | 102 | The nodes of a tree, whether inner nodes or leaf nodes, often carry labels or other annotations 103 | 104 | Inductive trees contain **no infinite branches**, not even cycles 105 | 106 | This is less expressive than pointer- or reference-based data structures (in imperative languages) but easier to reason about 107 | 108 | Recursive definitions (and proofs by induction) work roughly as for lists, but we may need to recurse (or invoke the induction hypothesis) on several child nodes 109 | 110 | 111 | ## New Tactics 112 | 113 | ### `by_cases` 114 | 115 | > `by_cases` _proposition_ 116 | 117 | Performs a case analysis on a proposition 118 | 119 | It is useful to reason about the condition in an `if` condition 120 | 121 | ### `cases` 122 | 123 | > `cases` _variable_ 124 | 125 | Performs a case distinction on the specified variable, giving rise to as many subgoals as there are constructors in the definition of the variable's type. 126 | 127 | Unlike `induction`, it does not produce induction hypotheses 128 | 129 | 130 | ## Demo 131 | 132 | [`love04_functional_programming_demo.lean`](../lean/love04_functional_programming_demo.lean) 133 | -------------------------------------------------------------------------------- /md/love05_inductive_predicates.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 5: Inductive Predicates 2 | 3 | We introduce inductive predicates, which correspond to proof trees 4 | 5 | Inductive predicates are reminiscent of the Horn clauses of Prolog, but Lean offers a much stronger logic 6 | 7 | A possible view of Lean: 8 | 9 | > Lean = typed functional programming + logic programming + more logic 10 | 11 | 12 | ## Introductory Examples 13 | 14 | _Inductive predicates_, or (more precisely) _inductively defined propositions_, are familiar from mathematics—e.g. 15 | 16 | > The set `E` of even natural numbers is defined as the smallest set closed under the following rules: 17 | > * `0 ∈ E` 18 | > * for every `n ∈ ℕ`, if `n ∈ E`, then `n + 2 ∈ E` 19 | 20 | In Lean, we write 21 | 22 | inductive even : ℕ → Prop 23 | | zero : even 0 24 | | add_two : ∀n : ℕ, even n → even (n + 2) 25 | 26 | If this looks familiar, it is because it should 27 | 28 | The command introduces a new unary predicate `even` (or, equivalently, a `ℕ`-indexed family of propositions) 29 | 30 | By Curry–Howard, what we have effectively done is introduce a new unary type constructor, `even` 31 | 32 | `even` is equipped with two constructors, `zero` and `add_two`, which can be used to build proof terms 33 | 34 | `even` can be seen as a tree type, the trees being the corresponding proof terms (or proof trees) 35 | 36 | Thanks to the **no junk** guarantee of inductive definitions, `zero` and `add_two` are the only two ways to construct `even` 37 | 38 | 39 | ## Logical Symbols 40 | 41 | The truth values `false` and `true`, the connectives `∧` and `∨`, the `∃` quantifier, and the equality predicate `=` are all defined as inductive propositions or predicates: 42 | 43 | inductive false : Prop 44 | 45 | inductive true : Prop 46 | | intro : true 47 | 48 | inductive and (a b : Prop) : Prop 49 | | intro : a → b → and 50 | 51 | inductive or (a b : Prop) : Prop 52 | | intro_left : a → or 53 | | intro_right : b → or 54 | 55 | inductive Exists {α : Type} (p : α → Prop) : Prop 56 | | intro : ∀a : α, p a → Exists 57 | 58 | inductive eq {α : Type} : α → α → Prop 59 | | refl (a : α) : eq a a 60 | 61 | The notations `∃x : α, p` and `x = y` are syntactic sugar for `Exists (λx : α, p)` and `eq x y`, respectively 62 | 63 | In contrast, `∀` (= `Π`) and `→` are built directly into the logic 64 | 65 | 66 | ## Introduction and Elimination Rules 67 | 68 | We saw in lecture 2 that the logical connectives and the `∃` quantifier are equipped with introduction and elimination rules 69 | 70 | The same is true for arbitrary inductive predicates `p` 71 | 72 | `p`’s constructors are introduction rules; they typically have the form `∀…, … → p …` and can be used to prove goals of the form `p …` 73 | 74 | Elimination works the other way around: It extracts information from a lemma or 75 | hypothesis of the form `p …` 76 | 77 | Elimination takes various forms: pattern matching (at the top-level of a definition or lemma, or with `match`), the `cases` and `induction` tactics, or custom elimination rules (e.g., `and.elim_left`) 78 | 79 | ## Rule Induction 80 | 81 | Just as we can perform induction on a term, we can perform induction on a proof term 82 | 83 | This is sometimes called _rule induction_, because the induction is on the introduction rules (i.e., the constructors of the proof term) 84 | 85 | Thanks to Curry–Howard, this works as expected 86 | 87 | 88 | ## Rule Inversion 89 | 90 | Often it is convenient to rewrite concrete terms of the form `p (c …)`, where `c` is typically a constructor 91 | 92 | We can state and prove an _inversion rule_ to support such eliminative reasoning 93 | 94 | Format: 95 | 96 | ∀x y, p (c x y) → (∃…, … ∧ …) ∨ … ∨ (∃…, … ∧ …) 97 | 98 | It can be useful to combine introduction and elimination into one lemma, which can be used for rewriting both hypotheses and goals: 99 | 100 | ∀x y, p (c x y) ↔ (∃…, … ∧ …) ∨ … ∨ (∃…, … ∧ …) 101 | 102 | Example: 103 | 104 | ∀n : ℕ, even n ↔ n = 0 ∨ ∃m : ℕ, n = m + 2 ∧ even m 105 | 106 | 107 | ## Induction Pitfalls 108 | 109 | Inductive predicates often have parameters that evolve through the induction 110 | 111 | Pattern matching and `cases` handles this gracefully, but some care is necessary with `induction` 112 | 113 | Please read Section 5.8 ("Induction Pitfalls") of the lecture notes for details 114 | 115 | 116 | ## Demo 117 | 118 | [`love05_inductive_predicates_demo.lean`](../lean/love05_inductive_predicates_demo.lean) 119 | -------------------------------------------------------------------------------- /md/love07_metaprogramming.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 7: Metaprogramming 2 | 3 | Users can extend Lean with custom monadic tactics and tools 4 | 5 | This kind of programming—programming the prover—is called metaprogramming 6 | 7 | 8 | ## Overview 9 | 10 | Lean’s metaprogramming framework uses mostly the same notions and syntax as Lean’s input language itself 11 | 12 | Abstract syntax trees _reflect_ internal data structures, e.g. for expressions (terms) 13 | 14 | The prover's C++ internals are exposed through Lean interfaces, which we can use for accessing the current context and goal, unifying expressions, querying and modifying the environment, and setting attributes (e.g., `@[simp]`) 15 | 16 | Most of Lean's predefined tactics are implemented in Lean (and not in C++) 17 | 18 | Example applications: 19 | 20 | * **proof goal transformations** (e.g., apply all safe introduction rules for connectives, put the goal in negation normal form) 21 | 22 | * **heuristic proof search** (e.g., apply unsafe introduction rules for connectives and hypotheses with backtracking) 23 | 24 | * **decision procedures** (e.g., for propositional logic, linear arithmetic) 25 | 26 | * **definition generators** (e.g., Haskell-style `derive` for inductive types) 27 | 28 | * **advisor tools** (e.g., lemma finders, counterexample generators) 29 | 30 | * **exporters** (e.g., documentation generators) 31 | 32 | * **ad hoc automation** (to avoid boilerplate or duplication) 33 | 34 | Advantages of Lean's metaprogramming framework: 35 | 36 | * Users (e.g. mathematicians) do not need to learn another programming language to write metaprograms; they can work with the same constructs and notation used to define ordinary objects in the prover's library 37 | 38 | * Everything in that library is available for metaprogramming purposes (e.g. `ℤ`, `list`, algebraic structures) 39 | 40 | * Metaprograms can be written and debugged in the same interactive environment, encouraging a style where formal libraries and supporting automation are developed at the same time 41 | 42 | ## Metaprograms and Metaconstants 43 | 44 | Any executable Lean definition can be used as a metaprogram 45 | 46 | In addition, we can put `meta` in front of a definition to indicate that is a _metadefinition_; these need not terminate but cannot be used in non-`meta` contexts 47 | 48 | Metaprograms (whether defined with `meta` or not) can communicate with Lean through _metaconstants_, which are implemented in C++ and have no logical meaning (i.e., they are opaque names) 49 | 50 | Important types: 51 | 52 | * `tactic`: the tactic monad, which contains the proof state, the environment, etc. 53 | 54 | * `name`: hierarchical names 55 | 56 | * `expr`: terms, types, proofs are represented as abstract syntax trees 57 | 58 | 59 | ## The Tactic Monad 60 | 61 | Tactics have access to 62 | 63 | * the list of **goals** as metavariables (each metavariables has a type and a local context (hypothesis); they can optionally be instantiated) 64 | 65 | * the **elaborator** (to elaborate expressions and compute their type) 66 | 67 | * the **attributes** (e.g., the list of `simp` rules) 68 | 69 | * the **environment**, containing all declarations and inductive types 70 | 71 | Tactics can also produce trace messages 72 | 73 | The tactic monad is an `alternative`, with `fail` and `<|>` (exercise 6) 74 | 75 | 76 | ## Expressions and Names 77 | 78 | The reflected expression type: 79 | 80 | meta inductive expr : Type 81 | | var {} : nat → expr 82 | | sort {} : level → expr 83 | | const {} : name → list level → expr 84 | | mvar : name → name → expr → expr 85 | | local_const : name → name → binder_info → expr → expr 86 | | app : expr → expr → expr 87 | | lam : name → binder_info → expr → expr → expr 88 | | pi : name → binder_info → expr → expr → expr 89 | | elet : name → expr → expr → expr → expr 90 | | macro : macro_def → list expr → expr 91 | 92 | We can create literal expressions conveniently using backticks (`): 93 | 94 | * Expressions with a single backtick, `(e), must be fully elaborated 95 | 96 | * Expressions with two backticks, ``(e), are pre-expressions: They may contain some holes to be filled in later, based on some context 97 | 98 | * Expressions with three backticks, ```(e), are pre-expressions without name checking 99 | 100 | For names: 101 | 102 | * Names with a single backtick, `n, are not checked for existence 103 | 104 | * Names with two backticks, ``n, are checked 105 | 106 | 107 | ## Demo 108 | 109 | [`love07_metaprogramming_demo.lean`](../lean/love07_metaprogramming_demo.lean) 110 | -------------------------------------------------------------------------------- /md/love10_denotational_semantics.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 10: Denotational Semantics 2 | 3 | We review a third way to specify the semantics of a programming language: denotational semantics 4 | 5 | Denotational semantics attempts to directly specify the semantics of programs 6 | 7 | 8 | ## Motivation 9 | 10 | A _denotational semantics_ defines the meaning of each program as a mathematical object: 11 | 12 | ⟦ ⟧ : syntax → semantics 13 | 14 | A key property of denotational semantics is _compositionality_: The meaning of a compound statement should be defined in terms of the meaning of its components 15 | 16 | This disqualifies 17 | 18 | ⟦c⟧ = {st | (c, st.1) ⟹ st.2} 19 | 20 | because operational semantics are not defined in a compositional way 21 | 22 | In short, we want: 23 | 24 | ⟦c ; c'⟧ = … ⟦c⟧ … ⟦c'⟧ … 25 | ⟦if b then c else c'⟧ = … ⟦c⟧ … ⟦c'⟧ … 26 | ⟦while b do c⟧ = … ⟦c⟧ … 27 | 28 | An evaluation function on arithmetic expressions (`eval : aexp → ((string → ℤ) → ℤ)`) is a denotational semantics; now we want the same for imperative programs 29 | 30 | We can represent the semantics of an imperative program as a function from initial state to final state or more generally as a relation between initial state and final state 31 | 32 | From the operational semantics we can derive a relation of type `state × state → Prop` 33 | 34 | We represent this as a mathematical object by collecting the pairs in a set 35 | 36 | For `skip`, `:=`, `;`, and `ite`, the denotational semantics is easy: 37 | 38 | def den : program → set (state × state) 39 | | skip := Id state 40 | | (assign n a) := {x | x.2 = x.1{n ↦ a x.1}} 41 | | (seq S₁ S₂) := den S₁ ◯ den S₂ 42 | | (ite b S₁ S₂) := (den S₁ ⇃ b) ∪ (den S₂ ⇃ λs, ¬ b s) 43 | 44 | We write `⟦S⟧` for `den S` 45 | 46 | For `while`, we would like to write: 47 | 48 | | (while b S) := ((den S ◯ den (while b S)) ⇃ b) ∪ (Id state ⇃ λs, ¬ b s) 49 | 50 | but this is not well founded due to the unmodified recursive call to `while b S` 51 | 52 | What we are looking for is a solution for `X` in the equation 53 | 54 | X = ((den S ◯ X) ◯ c) ∪ (Id state ◯ λs, ¬ c s)) 55 | 56 | In other words, we are looking for a fixpoint 57 | 58 | The rest of this lecture is concerned with building a fixpoint operator `lfp` that will allow us to define the while case as well: 59 | 60 | | (while b p) := lfp (λX, ((den p ◯ X) ◯ b) ∪ (Id state ◯ λs, ¬ b s)) 61 | 62 | 63 | ## Fixpoints 64 | 65 | A _fixpoint_ (or _fixed point_) of `f` is a solution for `X` in the equation 66 | 67 | X = f X 68 | 69 | In general, fixpoints may not exist at all (cf. `f = nat.succ`) or there may be many different fixpoints (cf. `f = id`) 70 | 71 | But under some conditions on `f`, a (unique) _least fixpoint_ and a _greatest fixpoint_ are guaranteed to exist 72 | 73 | Consider this _fixpoint equation_: 74 | 75 | X = (λ(p : ℕ → Prop) (n : ℕ), n = 0 ∨ ∃m : ℕ, n = m + 2 ∧ p m) X 76 | = λn : ℕ, n = 0 ∨ ∃m : ℕ, n = m + 2 ∧ X m 77 | 78 | where `X : ℕ → Prop` and `f = λ(p : ℕ → Prop) (n : ℕ), n = 0 ∨ ∃m : ℕ, n = m + 2 ∧ p m` 79 | 80 | The above example admits only one fixpoint; the fixpoint equation uniquely specifies `X` as the set of even numbers 81 | 82 | In general, the least and greatest fixpoint may be different: 83 | 84 | X = id X 85 | = X 86 | 87 | Here, the least fixpoint is `(λ_, False)` and the greatest fixpoint is `(λ_, True)` 88 | 89 | Conventionally, `False < True`, and thus `(λ_, False) < (λ_, True)` 90 | 91 | Similarly, `∅ < @set.univ α` if `α` is inhabited 92 | 93 | **Key observation**: Inductive predicates correspond to least fixpoints, but they are built into Lean's logic (the calculus of inductive constructions) 94 | 95 | We used this observation when we defined the operational semantics as an inductive data type 96 | 97 | 98 | ## Least Fixpoints 99 | 100 | For the semantics of programming languages: 101 | 102 | * `X` will have type `set (state × state)` (or e.g. `state → state → Prop`), representing relations between states 103 | 104 | * `f` will correspond to either taking one extra iteration of the loop (if the condition `b` is true) or the identity (if `b` is false) 105 | 106 | _Kleene fixpoint theorem_: `f^0(∅) ∪ f^1(∅) ∪ f^2(∅) ∪ ... = lfp f` 107 | 108 | The **least fixpoint** corresponds to **finite executions** of a program, which is all we care about 109 | 110 | 111 | ## Monotone Functions 112 | 113 | Let `α` and `β` be types with partial order `≤` 114 | 115 | A function `f : α → β` is _monotone_ if 116 | 117 | a ≤ b → f a ≤ f b for all a, b 118 | 119 | Many operations on sets (e.g. `∪`), relations (e.g. `◯`), and functions (e.g. `const`) are monotone 120 | 121 | The set of monotone functions is also well behaved: The identity function is monotone, the composition of monotone functions is again monotone, etc. 122 | 123 | All monotone functions `f : α → α`, where `α` is a **complete lattice**, admit least and greatest fixpoints 124 | 125 | ### Example for a nonmonotone function on sets 126 | 127 | ⎧ s ∪ {a} if a ∉ s 128 | f s = ⎨ 129 | ⎩ ∅ otherwise 130 | 131 | so `∅ ⊆ {a}`, but `f ∅ = {a} ⊈ ∅ = f {a}` 132 | 133 | 134 | ## Complete Lattices 135 | 136 | To define the (least) fixpoints on sets, we only need one operation: intersection `⋂` 137 | 138 | *Complete lattices* capture this concept abstractly 139 | 140 | A complete lattice `α` is an ordered type for which each `set α` has an infimum 141 | 142 | A complete lattice consists of: a partial order `≤ : α → α → Prop` (i.e. reflexive, transitive, and antisymmetric), and an operator `⨅ : set α → α`, called *infimum*, or *greatest lower bound* (*glb*) 143 | 144 | `⨅s` satisfies (and is unique such that): 145 | 146 | * `⨅s` is a lower bound of `s`: 147 | 148 | `⨅s ≤ b` for all `b ∈ s` 149 | 150 | * `⨅s` is the greatest lower bound: 151 | 152 | `b ≤ ⨅s` for all `b`, s.t. `∀x∈s, b ≤ x` 153 | 154 | **Warning:** `⨅s` is not necessarily an element of `s` 155 | 156 | `set α` is an instance w.r.t. `⊆` and `⋂` for all types `α` 157 | 158 | `Prop` is an instance w.r.t. `→` and `∀`, i.e. `⨅s := ∀p ∈ s, p` 159 | 160 | We define: 161 | 162 | lfp f := ⨅{x | f x ≤ x} 163 | 164 | _Knaster-Tarski theorem_: for any monotone function `f`: 165 | 166 | * `lfp f` is a fixpoint: `lfp f = f (lfp f)` 167 | 168 | * `lfp f` is smaller than any other fixpoint: `X = f X → lfp f ≤ X` 169 | 170 | ### Finite Example 171 | 172 | X ⨅{} = ? 173 | / \ ⨅{X} = ? 174 | A B ⨅{A, B} = ? 175 | \ / ⨅{X, A} = ? 176 | Y ⨅{X, A, B, Y} = ? 177 | 178 | ### Other Examples 179 | 180 | enat := ℕ ∪ {∞} 181 | ereal := ℝ ∪ {- ∞, ∞} 182 | … 183 | 184 | For `α` a complete lattice, then also `β → α` is a complete lattice 185 | 186 | For `α`, `β` complete lattices, then also `α × β` is a complete lattice 187 | 188 | ### Non-Examples 189 | 190 | * `ℕ`, `ℤ`, `ℚ`, `ℝ`: no infimum for `∅`, `⨅ℕ`, etc. 191 | 192 | * `erat := ℚ ∪ {- ∞, ∞}`, for example `⨅{q | 2 < q * q} = sqrt 2` is not in `ℚ` 193 | 194 | 195 | ## Demo 196 | 197 | [`love10_denotational_semantics_demo.lean`](../lean/love10_denotational_semantics_demo.lean) 198 | -------------------------------------------------------------------------------- /md/love11_logical_foundations_of_mathematics.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 11: Logical Foundations of Mathematics 2 | 3 | We dive deeper into the logical foundations of Lean 4 | 5 | 6 | ## Type universes 7 | 8 | Not only terms have a type, but also types have a type 9 | 10 | For example, according the the Curry-Howard correspondence (PAT principle): 11 | 12 | @and.intro : ∀a b, a → b → a ∧ b 13 | 14 | In this sense, `∀a b, a → b → a ∧ b` is a type, which in turn is of type `Prop`: 15 | 16 | ∀a b, a → b → a ∧ b : Prop 17 | 18 | What is the type of `Prop`? 19 | `Prop` has the same type as virtually all other types we 20 | have constructed so far, namely `Type`: 21 | 22 | Prop : Type 23 | ℕ : Type 24 | 25 | What is the type of `Type`? 26 | 27 | The typing `Type : Type` would lead to a contradiction, 28 | called Girard's paradox, resembling Russel's paradox 29 | 30 | Type : Type 1 31 | Type 1 : Type 2 32 | Type 2 : Type 3 33 | ⋮ 34 | 35 | Aliases: 36 | 37 | * In fact, `Type` is an abbreviation for `Type 0` 38 | 39 | * We can also write `Sort 0` for `Prop` and `Sort (u + 1)` for `Type u` 40 | 41 | Terminology: 42 | 43 | * `Sort u`, `Type u` and `Prop` are called (*type*) *universes* 44 | 45 | * The `u` in the expression `Sort u` is a *universe level* 46 | 47 | The hierarchy is captured by the following typing judgment: 48 | 49 | —————————————————————————————————— Sort 50 | Γ ⊢ Sort u : Sort (u + 1) 51 | 52 | 53 | ## The Peculiarities of Prop 54 | 55 | `Prop` is different from the other type universes! 56 | 57 | ### Impredicativity 58 | 59 | The function type `σ → τ` is put into the larger one of the 60 | type universes that `σ` and `τ` live in : 61 | 62 | Γ ⊢ σ : Type u Γ ⊢ τ : Type v 63 | —————————————————————————————————— Pi₀ 64 | Γ ⊢ σ → τ : Type (max u v) 65 | 66 | For dependent types this generalizes to: 67 | 68 | Γ ⊢ σ : Type u Γ, x : σ ⊢ τ[x] : Type v 69 | —————————————————————————————————————————————— Pi₁ 70 | Γ ⊢ (Пx : σ, τ[x]) : Type (max u v) 71 | 72 | This behavior of the universes `Type v` is called *predicativity* 73 | 74 | To force expressions such as `∀p : Prop, p → p` 75 | (which is the same as `Пp : Prop, p → p`) 76 | to be of type `Prop` anyway, we have as special typing rule for `Prop`: 77 | 78 | Γ ⊢ σ : Sort u x : σ ⊢ τ[x] : Prop 79 | ———————————————————————————————————————— Pi₂ 80 | Γ ⊢ Пx : σ, τ[x] : Prop 81 | 82 | This behavior of `Prop` is called *impredicativity* 83 | 84 | The rules `Pi₀`, `Pi₁`, and `Pi₂` can be generalized into one rule: 85 | 86 | Γ ⊢ σ : Sort u Γ, x : σ ⊢ τ[x] : Sort v 87 | ————————————————————————————————————————— Pi 88 | Γ ⊢ Пx : σ, τ[x] : Sort (imax u v) 89 | 90 | where 91 | 92 | imax u 0 = 0 93 | imax u v = max u v if v ≠ 0 94 | 95 | In other systems, such as Agda, all type universes are predicative 96 | 97 | ### Proof Irrelevance 98 | 99 | A second difference between `Prop` and `Type u` is *proof irrelevance*: 100 | 101 | ∀(p : Prop) (h₁ h₂ : p), h₁ = h₂ 102 | 103 | This is called _proof irrelevance_ and makes reasoning about dependent types easier 104 | 105 | When viewing a proposition as a type and a proof as an element of that 106 | type, proof irrelevance means that a proposition is either an empty type or has 107 | exactly one inhabitant 108 | 109 | Proof irrelevance can be proved `by refl` 110 | 111 | In contrast: 112 | 113 | * Agda and Coq are _proof relevant_ by default (but are compatible with proof irrelevance) 114 | 115 | * Homotopy type theory and other _constructive_ or _intuitionistic_ type theories build on data in equality proofs and therefore are incompatible with proof irrelevance 116 | 117 | ### Large and Small Elimination 118 | 119 | A further difference between `Prop` and `Type u` is that `Prop` does not allow 120 | *large elimination*, meaning that it impossible to extract data from a proof of 121 | a proposition 122 | 123 | This is necessary to allow proof irrelevance 124 | 125 | 126 | ## The Axiom of Choice 127 | 128 | Consider the following inductive predicate: 129 | 130 | inductive nonempty (α : Sort u) : Prop 131 | | intro (val : α) : nonempty 132 | 133 | The predicate states that `α` has at least one element 134 | 135 | To prove `nonempty α`, we must provide an element of `α` to the `intro` rule: 136 | 137 | lemma nat.nonempty : 138 | nonempty ℕ := 139 | nonempty.intro 0 140 | 141 | Since `nonempty` lives in `Prop`, large elimination is not available, and 142 | thus we cannot extract the element that was used from a proof of 143 | `nonempty α` 144 | 145 | The axiom of choice allows us to obtain some element of type `α` if we can 146 | show `nonempty α`: 147 | 148 | axiom classical.choice {α : Sort u} : 149 | nonempty α → α 150 | 151 | It will just give us an arbitrary element of `α`; we have no way of knowing whether this is the element that was used to prove `nonempty α` 152 | 153 | The constant `classical.choice` is noncomputable, 154 | one of the reasons why some logicians prefer to work without this axiom 155 | 156 | This principle is not built into the 157 | Lean kernel; it is only an axiom in Lean's core library, giving users the 158 | freedom to work with or without it 159 | 160 | 161 | ## Subtypes 162 | 163 | Subtyping is a mechanism to create new types from existing ones 164 | 165 | Given a predicate on the elements of the original type, 166 | the new type contains only those elements of the original type 167 | that fulfill this property 168 | 169 | **Example 1.** Subtype of full binary trees: 170 | 171 | def full_btree (α : Type) : Type := 172 | { t : btree α // is_full t } 173 | 174 | 175 | **Example 2.** Subtype of lists of a given length: 176 | 177 | def vector (α : Type u) (n : ℕ) : Type := 178 | { l : list α // l.length = n } 179 | 180 | 181 | ## Quotient Types 182 | 183 | Quotients are a powerful construction in mathematics used to construct `ℤ`, `ℚ`, `ℝ`, and many other types 184 | 185 | Just like subtypes, quotient types construct a new type 186 | from an existing type 187 | 188 | Unlike a subtype, a quotient type contains all of the 189 | elements of the underlying type, but some elements that were different in the 190 | underlying type are considered equal in the quotient type 191 | 192 | To define a quotient type, we need to provide a type that it is derived from 193 | and a equivalence relation on it that determines which elements are considered equal 194 | 195 | **Example:** The integers `ℤ` 196 | 197 | Quotient over pairs of natural numbers `ℕ × ℕ` 198 | 199 | A pair `(m, n)` of natural numbers represents the integer `m - n` 200 | * Nonnegative integers `m` can be represented by `(m, 0)` 201 | * Negative integers `-n` can be represented by `(0, n)` 202 | * Many representations of the same integer, e.g., `(2, 1)`, 203 | `(3, 2)`, and `(4, 3)` all represent the integer `1` 204 | 205 | Which equivalence relation can we use? 206 | * We want two pairs `(k, l)` and `(m, n)` to 207 | be equal when `k - l` and `m - n` yield the same integer 208 | * The condition `k - l = m - n` does not work because the negation on `ℕ` does 209 | not behave like the negation on integers, e.g., `0 - 1 = 0` 210 | * Instead, we use the condition `k + n = m + l`, which contains only addition 211 | 212 | 213 | ## Demo 214 | 215 | [`love11_logical_foundations_of_mathematics_demo.lean`](../lean/love11_logical_foundations_of_mathematics_demo.lean) 216 | -------------------------------------------------------------------------------- /md/love12_basic_mathematical_structures.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 12: Basic Mathematical Structures 2 | 3 | Today we introduce definitions and proofs about basic mathematical structures 4 | 5 | 6 | ## Type Classes 7 | 8 | A type class is a collection of abstract constants and their properties 9 | 10 | A type can be declared an instance of a type class by providing concrete definitions for the constants and proving that the properties hold 11 | 12 | We have seen some examples of type classes already: 13 | * `is_associative` / `is_commutative` 14 | * `lawful_monad` 15 | * `complete_lattice` 16 | * `setoid` 17 | 18 | The syntax to define a type class is 19 | 20 | class name_of_type_class (α : Type) := 21 | (constant₁ : type_of_constant₁) 22 | (constant₂ : type_of_constant₂) 23 | ⋮ 24 | (property₁ : statement_of_property₁) 25 | (property₂ : statement_of_property₂) 26 | ⋮ 27 | 28 | To instantiate a type class, we write 29 | 30 | instance : name_of_type_class name_of_type := 31 | { constant₁ := definition_of_constant₁, 32 | constant₂ := definition_of_constant₂, 33 | ⋮ 34 | property₁ := proof_of_property₁, 35 | property₂ := proof_of_property₂, 36 | ⋮ } 37 | 38 | ## Groups 39 | 40 | Mathematicians would define a group as follows: 41 | 42 | > A group is a set `G` with a binary operator `• : G ⨉ G → G` fulfilling the following properties, called group axioms: 43 | > * Associativity: For all `a, b, c ∈ G`, we have `(a • b) • c = a • (b • c)` 44 | > * Identity element: There exists an element `e ∈ G` such that for all `a ∈ G`, we have `e • a = a` 45 | > * Inverse element: For each `a ∈ G`, there exists an inverse element `inv(a) ∈ G` such that `inv(a) • a = e` 46 | 47 | Examples of groups are: 48 | * `ℤ` with `+` 49 | * `ℝ` with `+` 50 | * `ℝ \ {0}` with `*` 51 | 52 | In Lean, a type class for groups can be defined as: 53 | 54 | class group (α : Type) := 55 | (mul : α → α → α) 56 | (mul_assoc : ∀a b c, mul (mul a b) c = mul a (mul b c)) 57 | (one : α) 58 | (one_mul : ∀a, mul one a = a) 59 | (inv : α → α) 60 | (mul_left_inv : ∀a, mul (inv a) a = one) 61 | 62 | In mathlib, however, group is part of a larger hierarchy of algebraic structures: 63 | 64 | Type class | Properties 65 | ------------------------ | ------------ 66 | `semigroup` | associativity 67 | `monoid` | `semigroup` with neutral element 68 | `left_cancel_semigroup` | `semigroup` with `x * a = x * b → a = b` 69 | `right_cancel_semigroup` | `semigroup` with `a * x = b * x → a = b` 70 | `group` | `monoid` with inverse elements 71 | 72 | Most of these structures have commutative versions: `comm_semigroup`, `comm_monoid`, `comm_group` 73 | 74 | The _multiplicative_ structures (over `*`, `1`, `⁻¹`) are copied to produce _additive_ versions (over `+`, `0`, `-`): `add_semigroup`, `add_monoid`, `add_group` `add_comm_semigroup`, … 75 | 76 | 77 | ## Fields 78 | 79 | Mathematicians would define a field as follows: 80 | 81 | > A field is a set `F` such that 82 | >* `F` forms a commutative group under an operator `+`, called 83 | > addition, with identity element `0` 84 | >* `F\{0}` forms a commutative group under an operator `*`, called multiplication 85 | >* Multiplication distributes over addition, i.e., 86 | > `a * (b + c) = a * b + a * c` for all `a, b, c ∈ F` 87 | 88 | In mathlib, fields are also part of a larger hierarchy: 89 | 90 | Structure | Properties | Examples 91 | -----------------|------------------------------------------------------------|--------------------- 92 | `semiring` | `monoid` and `add_comm_monoid` with distributivity | `ℝ`, `ℚ`, `ℤ`, `ℕ` 93 | `comm_semiring` | `semiring` with commutativity of `*` | `ℝ`, `ℚ`, `ℤ`, `ℕ` 94 | `ring` | `monoid` and `add_comm_group` with distributivity | `ℝ`, `ℚ`, `ℤ` 95 | `comm_ring` | `ring` with commutativity of `*` | `ℝ`, `ℚ`, `ℤ` 96 | `division_ring` | `ring` with multiplicative inverse `⁻¹` | `ℝ`, `ℚ` 97 | `field` | `division_ring` with commutativity of `*` | `ℝ`, `ℚ` 98 | `discrete_field` | `field` with decidable equality and `∀n, n / 0 = 0` | `ℝ`, `ℚ` 99 | 100 | 101 | ## Coercions 102 | 103 | When dealing with different numbers form `ℕ`, `ℤ`, `ℚ`, and `ℝ` at the same time, we might want to cast from one type to another 104 | 105 | Lean has a mechanism to automatically introduce coercions, represented by `coe` or `↑` 106 | 107 | The coercion operator can be set up to provide implicit coercions between arbitrary types 108 | 109 | Many coercions are already in place, including the following: 110 | * `coe : ℕ → α` casts `ℕ` into another semiring `α` 111 | * `coe : ℤ → α` casts `ℤ` into another ring `α` 112 | * `coe : ℚ → α` casts `ℚ` into another division ring `α` 113 | 114 | 115 | ## Lists, Multisets, and Finite Sets 116 | 117 | For finite collections of elements different structures are available: 118 | * Lists: number of occurrences and order matter 119 | * Multisets: number of occurrences matters, order doesn't 120 | * Finsets: number of occurrences and order don't matter 121 | 122 | ## Demo 123 | 124 | [`love12_basic_mathematical_structures_demo.lean`](../lean/love12_basic_mathematical_structures_demo.lean) 125 | -------------------------------------------------------------------------------- /md/love12_basic_mathematical_structures_hierarchy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/blanchette/logical_verification_2019/7c5f1c90f6e5a0b221a02c8fa4e36c0c66393036/md/love12_basic_mathematical_structures_hierarchy.png -------------------------------------------------------------------------------- /md/love13_rational_and_real_numbers.md: -------------------------------------------------------------------------------- 1 | # LoVe Lecture 13: Rational and Real Numbers 2 | 3 | Today we will go through the construction of `ℚ` and `ℝ` as quotient types 4 | 5 | A general recipe to construct new types with specific properties is as follows: 6 | 7 | 1. Create a new type that can represent all elements, but not necessarily in a unique manner 8 | 9 | 2. Quotient this representation, equating elements that should be considered 10 | equal 11 | 12 | 3. Define operators on the quotient type by lifting functions from the base 13 | type, and prove that they respect the quotient relation 14 | 15 | We have used this approach before to construct `ℤ`; it can be used to 16 | construct `ℚ` and `ℝ` as well 17 | 18 | ## Rational Numbers 19 | 20 | **Step 1.** A rational number is a number that can be expressed as a fraction 21 | `n/d` of integers `n` and `d ≠ 0`: 22 | 23 | structure fraction := 24 | (num : ℤ) 25 | (denom : ℤ) 26 | (denom_ne_zero : denom ≠ 0) 27 | 28 | 29 | The number `n` is called the numerator, and the number `d` is 30 | called the denominator 31 | 32 | The representation of a rational number as a fraction 33 | is not unique, e.g., `1/2 = 2/4 = -1/-2 34 | 35 | **Step 2.** Two fractions `n₁/d₁` and `n₂/d₂` represent the same rational 36 | number if the ratio between numerator and denominator are the same, i.e., 37 | `n₁ * d₂ = n₂ * d₁` 38 | 39 | This will be our equivalence relation `≈` on fractions 40 | 41 | **Step 3.** Define `0 := 0 / 1`, `1 := 1 / 1`, and addition, multiplication, etc: 42 | 43 | n₁ / d₁ + n₂ / d₂ := (n₁ * d₂ + n₂ * d₁) / (d₁ * d₂) 44 | (n₁ / d₁) * (n₂ / d₂) := (n₁ * n₂) / (d₁ * d₂) 45 | 46 | Then show that they respect the relation `≈` 47 | 48 | 49 | **Alternative definitions of `ℚ`:** 50 | 51 | * Like above, but with an additional property 52 | `cop : coprime num denom`, which states that that numerator 53 | and denominator do not have a common divisor (except `1` and `-1`): 54 | 55 | structure rat := 56 | (num : ℤ) 57 | (denom : ℕ) 58 | (pos : 0 < denom) 59 | (cop : coprime num denom) 60 | 61 | This is the definition used in `mathlib` 62 | 63 | Advantages: no quotient required; more efficient computation; more theorems are definitional equalities 64 | 65 | Disadvantage: more complicated function definitions 66 | 67 | * Define all elements syntactically, including the desired operations: 68 | 69 | inductive pre_rat : Type 70 | | zero : pre_rat 71 | | one : pre_rat 72 | | add : pre_rat → pre_rat → pre_rat 73 | | sub : pre_rat → pre_rat → pre_rat 74 | | mul : pre_rat → pre_rat → pre_rat 75 | | div : pre_rat → pre_rat → pre_rat 76 | 77 | Define `≈` to enforce congruence rules the field axioms: 78 | 79 | inductive equiv : pre_rat → pre_rat → Prop 80 | | add_congr {a b c d : pre_rat} : 81 | equiv a b → equiv c d → equiv (add a c) (add b d) 82 | | add_assoc {a b c : pre_rat} : 83 | equiv (add a (add b c)) (add (add a b) c) 84 | | zero_add {a : pre_rat} : equiv (add zero a) a 85 | | add_comm {a b : pre_rat} : equiv (add a b) (add b a) 86 | | etc : equiv sorry sorry 87 | 88 | Advantages: does not require `ℤ`; easy to prove the `field` axioms; general recipe reusable for other algebraic constructions (e.g. free monoids, free groups) 89 | 90 | Disadvantage: the definition of orders and lemmas about them are more complicated 91 | 92 | 93 | ## Real Numbers 94 | 95 | 96 | There are sequences of rational numbers that seem to converge because the 97 | numbers in the sequence get closer and closer to each other, and yet do not 98 | converge to a rational number 99 | 100 | Example: Let `aₙ` be the largest number with `n` digits after the decimal point 101 | such that `aₙ² < 2` 102 | 103 | a₀ = 1 104 | a₁ = 1.4 105 | a₂ = 1.41 106 | a₃ = 1.414 107 | a₄ = 1.4142 108 | a₅ = 1.41421 109 | a₆ = 1.414213 110 | a₇ = 1.4142135 111 | a₈ = 1.41421356 112 | 113 | This sequence 114 | seems to converge because each `aₙ` is at most 115 | `10⁻ⁿ` away from any of the following numbers 116 | 117 | But the limit is `√2`, which is not a rational number 118 | 119 | In that sense, the rational numbers are **incomplete**, and the reals are their 120 | **completion** 121 | 122 | To construct the reals, we need to fill in the gaps that are 123 | revealed by these sequences that seem to converge, but do not 124 | 125 | 126 | > A sequence `a₀, a₁, ...` of rational numbers is _Cauchy_ if 127 | > for any `ε > 0`, there exists an `N ∈ ℕ` 128 | > such that for all `m ≥ N`, we have 129 | > `|a``N`` - aₘ| < ε`. 130 | 131 | In 132 | other words, no matter how small we choose `ε`, we can always find a 133 | point in the sequence from which all following numbers do not deviate more 134 | than `ε` 135 | 136 | In Lean: 137 | 138 | def is_cau_seq (f : ℕ → ℚ) : Prop := 139 | ∀ε > 0, ∃N, ∀m ≥ N, abs (f N - f m) < ε 140 | 141 | We define a type of Cauchy sequences as a subtype: 142 | 143 | def cau_seq : Type := 144 | {f : ℕ → ℚ // is_cau_seq f} 145 | 146 | Idea: Cauchy sequences represent real numbers 147 | 148 | * `aₙ = 1/n` represents the real number `0` 149 | * `1, 1.4, 1.41, ...` represents the real number `√2` 150 | * `aₙ = 0` also represents the real number `0` 151 | 152 | Since different Cauchy sequences can represent the same real number, we 153 | need to take the quotient over sequences representing the same real number 154 | 155 | Formally, two sequences represent the same real number when their difference 156 | converges to zero: 157 | 158 | instance equiv : setoid cau_seq := 159 | { r := λf g, ∀ε > 0, ∃N, ∀m ≥ N, abs (f.val m - g.val m) < ε, 160 | iseqv := sorry } 161 | 162 | The real numbers are the quotient: 163 | 164 | def my_real : Type := 165 | quotient cau_seq.equiv 166 | 167 | 168 | ### Alternative Definitions of the Real Numbers 169 | 170 | * Dedekind cuts: `r : ℝ` is represented essentially as `{x : ℚ | x < r}` 171 | 172 | * Binary sequences `ℕ → bool` for the interval [0, 1] 173 | 174 | 175 | ## Demo 176 | 177 | [`love13_rational_and_real_numbers_demo.lean`](../lean/love13_rational_and_real_numbers_demo.lean) 178 | --------------------------------------------------------------------------------