├── README.md ├── hitchhikers_guide_2024_desktop.pdf ├── hitchhikers_guide_2024_tablet.pdf └── lean ├── LoVe ├── LICENSE.txt ├── LoVe01_TypesAndTerms_Demo.lean ├── LoVe01_TypesAndTerms_ExerciseSheet.lean ├── LoVe01_TypesAndTerms_HomeworkSheet.lean ├── LoVe02_ProgramsAndTheorems_Demo.lean ├── LoVe02_ProgramsAndTheorems_ExerciseSheet.lean ├── LoVe02_ProgramsAndTheorems_HomeworkSheet.lean ├── LoVe03_BackwardProofs_Demo.lean ├── LoVe03_BackwardProofs_ExerciseSheet.lean ├── LoVe03_BackwardProofs_HomeworkSheet.lean ├── LoVe04_ForwardProofs_Demo.lean ├── LoVe04_ForwardProofs_ExerciseSheet.lean ├── LoVe04_ForwardProofs_HomeworkSheet.lean ├── LoVe05_FunctionalProgramming_Demo.lean ├── LoVe05_FunctionalProgramming_ExerciseSheet.lean ├── LoVe05_FunctionalProgramming_HomeworkSheet.lean ├── LoVe06_InductivePredicates_Demo.lean ├── LoVe06_InductivePredicates_ExerciseSheet.lean ├── LoVe06_InductivePredicates_HomeworkSheet.lean ├── LoVe07_EffectfulProgramming_Demo.lean ├── LoVe07_EffectfulProgramming_ExerciseSheet.lean ├── LoVe07_EffectfulProgramming_HomeworkSheet.lean ├── LoVe08_Metaprogramming_Demo.lean ├── LoVe08_Metaprogramming_ExerciseSheet.lean ├── LoVe08_Metaprogramming_HomeworkSheet.lean ├── LoVe09_OperationalSemantics_Demo.lean ├── LoVe09_OperationalSemantics_ExerciseSheet.lean ├── LoVe09_OperationalSemantics_HomeworkSheet.lean ├── LoVe10_HoareLogic_Demo.lean ├── LoVe10_HoareLogic_ExerciseSheet.lean ├── LoVe10_HoareLogic_HomeworkSheet.lean ├── LoVe11_DenotationalSemantics_Demo.lean ├── LoVe11_DenotationalSemantics_ExerciseSheet.lean ├── LoVe11_DenotationalSemantics_HomeworkSheet.lean ├── LoVe12_LogicalFoundationsOfMathematics_Demo.lean ├── LoVe12_LogicalFoundationsOfMathematics_ExerciseSheet.lean ├── LoVe12_LogicalFoundationsOfMathematics_HomeworkSheet.lean ├── LoVe13_BasicMathematicalStructures_Demo.lean ├── LoVe13_BasicMathematicalStructures_ExerciseSheet.lean ├── LoVe14_RationalAndRealNumbers_Demo.lean ├── LoVe14_RationalAndRealNumbers_ExerciseSheet.lean └── LoVelib.lean ├── lake-manifest.json ├── lakefile.lean └── lean-toolchain /README.md: -------------------------------------------------------------------------------- 1 | # Logical Verification 2024 2 | 3 | Files associated with the Hitchhiker's Guide to Logical Verification (2024 Edition). 4 | 5 | 6 | ## Installation 7 | 8 | The Hitchhiker's Guide PDF document is generated from the Lean demo files in 9 | the folder `lean/LoVe`. The same folder also contains an exercise sheet and a 10 | homework sheet for each chapter. 11 | 12 | To edit the Lean files, open the `lean` folder as a Lean 4 project [as described 13 | here](https://leanprover-community.github.io/install/project.html#working-on-an-existing-project). 14 | -------------------------------------------------------------------------------- /hitchhikers_guide_2024_desktop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lean-forward/logical_verification_2024/4e5e78a040dd34c98339f13db2b5357918dda32a/hitchhikers_guide_2024_desktop.pdf -------------------------------------------------------------------------------- /hitchhikers_guide_2024_tablet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lean-forward/logical_verification_2024/4e5e78a040dd34c98339f13db2b5357918dda32a/hitchhikers_guide_2024_tablet.pdf -------------------------------------------------------------------------------- /lean/LoVe/LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, Johannes 2 | Hölzl, and Jannis Limperg 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation and/or 12 | other materials provided with the distribution. 13 | 14 | 3. Neither the name of the copyright holder nor the names of its contributors 15 | may be used to endorse or promote products derived from this software without 16 | specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 22 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 25 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe01_TypesAndTerms_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Preface 8 | 9 | ## Proof Assistants 10 | 11 | Proof assistants (also called interactive theorem provers) 12 | 13 | * check and help develop formal proofs; 14 | * can be used to prove big theorems, not only logic puzzles; 15 | * can be tedious to use; 16 | * are highly addictive (think video games). 17 | 18 | A selection of proof assistants, classified by logical foundations: 19 | 20 | * set theory: Isabelle/ZF, Metamath, Mizar; 21 | * simple type theory: HOL4, HOL Light, Isabelle/HOL; 22 | * **dependent type theory**: Agda, Coq, **Lean**, Matita, PVS. 23 | 24 | 25 | ## Success Stories 26 | 27 | Mathematics: 28 | 29 | * the four-color theorem (in Coq); 30 | * the Kepler conjecture (in HOL Light and Isabelle/HOL); 31 | * the definition of perfectoid spaces (in Lean). 32 | 33 | Computer science: 34 | 35 | * hardware; 36 | * operating systems; 37 | * programming language theory; 38 | * compilers; 39 | * security. 40 | 41 | 42 | ## Lean 43 | 44 | Lean is a proof assistant developed primarily by Leonardo de Moura (Amazon Web 45 | Services) since 2012. 46 | 47 | Its mathematical library, `mathlib`, is developed by a large community of 48 | contributors. 49 | 50 | We use the community version of Lean 4. We use its basic libraries, `mathlib4`, 51 | and `LoVelib`, among others. Lean is a research project. 52 | 53 | Strengths: 54 | 55 | * highly expressive logic based on a dependent type theory called the 56 | **calculus of inductive constructions**; 57 | * extended with classical axioms and quotient types; 58 | * metaprogramming framework; 59 | * modern user interface; 60 | * documentation; 61 | * open source; 62 | * endless source of puns (Lean Forward, Lean Together, Boolean, …). 63 | 64 | 65 | ## Our Goal 66 | 67 | We want you to 68 | 69 | * master fundamental theory and techniques in interactive theorem proving; 70 | * get familiarized with some application areas; 71 | * develop some practical skills you can apply on a larger project (as a hobby, 72 | for an MSc or PhD, or in industry); 73 | * feel ready to move to another proof assistant and apply what you have learned; 74 | * understand the domain well enough to start reading scientific papers. 75 | 76 | This course is neither a pure logical foundations course nor a Lean tutorial. 77 | Lean is our means, not an end of itself. 78 | 79 | 80 | # LoVe Demo 1: Types and Terms 81 | 82 | We start our journey by studying the basics of Lean, starting with terms 83 | (expressions) and their types. -/ 84 | 85 | 86 | set_option autoImplicit false 87 | set_option tactic.hygienic false 88 | 89 | namespace LoVe 90 | 91 | 92 | /- ## A View of Lean 93 | 94 | In a first approximation: 95 | 96 | Lean = functional programming + logic 97 | 98 | In today's lecture, we cover the syntax of types and terms, which are similar to 99 | those of the simply typed λ-calculus or typed functional programming languages 100 | (ML, OCaml, Haskell). 101 | 102 | 103 | ## Types 104 | 105 | Types `σ`, `τ`, `υ`: 106 | 107 | * type variables `α`; 108 | * basic types `T`; 109 | * complex types `T σ1 … σN`. 110 | 111 | Some type constructors `T` are written infix, e.g., `→` (function type). 112 | 113 | The function arrow is right-associative: 114 | `σ₁ → σ₂ → σ₃ → τ` = `σ₁ → (σ₂ → (σ₃ → τ))`. 115 | 116 | Polymorphic types are also possible. In Lean, the type variables must be bound 117 | using `∀`, e.g., `∀α, α → α`. 118 | 119 | 120 | ## Terms 121 | 122 | Terms `t`, `u`: 123 | 124 | * constants `c`; 125 | * variables `x`; 126 | * applications `t u`; 127 | * anonymous functions `fun x ↦ t` (also called λ-expressions). 128 | 129 | __Currying__: functions can be 130 | 131 | * fully applied (e.g., `f x y z` if `f` can take at most 3 arguments); 132 | * partially applied (e.g., `f x y`, `f x`); 133 | * left unapplied (e.g., `f`). 134 | 135 | Application is left-associative: `f x y z` = `((f x) y) z`. 136 | 137 | `#check` reports the type of its argument. -/ 138 | 139 | #check ℕ 140 | #check ℤ 141 | 142 | #check Empty 143 | #check Unit 144 | #check Bool 145 | 146 | #check ℕ → ℤ 147 | #check ℤ → ℕ 148 | #check Bool → ℕ → ℤ 149 | #check (Bool → ℕ) → ℤ 150 | #check ℕ → (Bool → ℕ) → ℤ 151 | 152 | #check fun x : ℕ ↦ x 153 | #check fun f : ℕ → ℕ ↦ fun g : ℕ → ℕ ↦ fun h : ℕ → ℕ ↦ 154 | fun x : ℕ ↦ h (g (f x)) 155 | #check fun (f g h : ℕ → ℕ) (x : ℕ) ↦ h (g (f x)) 156 | 157 | /- `opaque` defines an arbitrary constant of the specified type. -/ 158 | 159 | opaque a : ℤ 160 | opaque b : ℤ 161 | opaque f : ℤ → ℤ 162 | opaque g : ℤ → ℤ → ℤ 163 | 164 | #check fun x : ℤ ↦ g (f (g a x)) (g x b) 165 | #check fun x ↦ g (f (g a x)) (g x b) 166 | 167 | #check fun x ↦ x 168 | 169 | 170 | /- ## Type Checking and Type Inference 171 | 172 | Type checking and type inference are decidable problems (although this property is 173 | quickly lost if features such as overloading or subtyping are added). 174 | 175 | Type judgment: `C ⊢ t : σ`, meaning `t` has type `σ` in local context `C`. 176 | 177 | Typing rules: 178 | 179 | —————————— Cst if c is globally declared with type σ 180 | C ⊢ c : σ 181 | 182 | —————————— Var if x : σ is the rightmost occurrence of x in C 183 | C ⊢ x : σ 184 | 185 | C ⊢ t : σ → τ C ⊢ u : σ 186 | ——————————————————————————— App 187 | C ⊢ t u : τ 188 | 189 | C, x : σ ⊢ t : τ 190 | ——————————————————————————— Fun 191 | C ⊢ (fun x : σ ↦ t) : σ → τ 192 | 193 | If the same variable `x` occurs multiple times in the context C, the rightmost 194 | occurrence shadows the other ones. 195 | 196 | 197 | ## Type Inhabitation 198 | 199 | Given a type `σ`, the __type inhabitation__ problem consists of finding a term 200 | of that type. Type inhabitation is undecidable. 201 | 202 | Recursive procedure: 203 | 204 | 1. If `σ` is of the form `τ → υ`, a candidate inhabitant is an anonymous 205 | function of the form `fun x ↦ _`. 206 | 207 | 2. Alternatively, you can use any constant or variable `x : τ₁ → ⋯ → τN → σ` to 208 | build the term `x _ … _`. -/ 209 | 210 | opaque α : Type 211 | opaque β : Type 212 | opaque γ : Type 213 | 214 | def someFunOfType : (α → β → γ) → ((β → α) → β) → α → γ := 215 | fun f g a ↦ f a (g (fun b ↦ a)) 216 | 217 | end LoVe 218 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe01_TypesAndTerms_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe01_TypesAndTerms_Demo 5 | 6 | 7 | /- # LoVe Exercise 1: Types and Terms 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Terms 19 | 20 | Complete the following definitions, by replacing the `sorry` markers by terms 21 | of the expected type. 22 | 23 | Hint: A procedure for doing so systematically is described in Section 1.4 of 24 | the Hitchhiker's Guide. As explained there, you can use `_` as a placeholder 25 | while constructing a term. By hovering over `_`, you will see the current 26 | logical context. -/ 27 | 28 | def I : α → α := 29 | fun a ↦ a 30 | 31 | def K : α → β → α := 32 | fun a b ↦ a 33 | 34 | def C : (α → β → γ) → β → α → γ := 35 | sorry 36 | 37 | def projFst : α → α → α := 38 | sorry 39 | 40 | /- Give a different answer than for `projFst`. -/ 41 | 42 | def projSnd : α → α → α := 43 | sorry 44 | 45 | def someNonsense : (α → β → γ) → α → (α → γ) → β → γ := 46 | sorry 47 | 48 | 49 | /- ## Question 2: Typing Derivation 50 | 51 | Show the typing derivation for your definition of `C` above, on paper or using 52 | ASCII or Unicode art. You might find the characters `–` (to draw horizontal 53 | bars) and `⊢` useful. -/ 54 | 55 | -- write your solution in a comment here or on paper 56 | 57 | end LoVe 58 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe01_TypesAndTerms_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Homework 1 (10 points): Types and Terms 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Question 1 (6 points): Terms 21 | 22 | We start by declaring four new opaque types. -/ 23 | 24 | opaque α : Type 25 | opaque β : Type 26 | opaque γ : Type 27 | opaque δ : Type 28 | 29 | /- 1.1 (4 points). Complete the following definitions, by providing terms with 30 | the expected type. 31 | 32 | Please use reasonable names for the bound variables, e.g., `a : α`, `b : β`, 33 | `c : γ`. 34 | 35 | Hint: A procedure for doing so systematically is described in Section 1.4 of the 36 | Hitchhiker's Guide. As explained there, you can use `_` as a placeholder while 37 | constructing a term. By hovering over `_`, you will see the current logical 38 | context. -/ 39 | 40 | def B : (α → β) → (γ → α) → γ → β := 41 | sorry 42 | 43 | def S : (α → β → γ) → (α → β) → α → γ := 44 | sorry 45 | 46 | def moreNonsense : ((α → β) → γ → δ) → γ → β → δ := 47 | sorry 48 | 49 | def evenMoreNonsense : (α → β) → (α → γ) → α → β → γ := 50 | sorry 51 | 52 | /- 1.2 (2 points). Complete the following definition. 53 | 54 | This one looks more difficult, but it should be fairly straightforward if you 55 | follow the procedure described in the Hitchhiker's Guide. 56 | 57 | Note: Peirce is pronounced like the English word "purse". -/ 58 | 59 | def weakPeirce : ((((α → β) → α) → α) → β) → β := 60 | sorry 61 | 62 | /- ## Question 2 (4 points): Typing Derivation 63 | 64 | Show the typing derivation for your definition of `B` above, using ASCII or 65 | Unicode art. You might find the characters `–` (to draw horizontal bars) and `⊢` 66 | useful. 67 | 68 | Feel free to introduce abbreviations to avoid repeating large contexts `C`. -/ 69 | 70 | -- write your solution here 71 | 72 | end LoVe 73 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe02_ProgramsAndTheorems_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Demo 2: Programs and Theorems 8 | 9 | We continue our study of the basics of Lean, focusing on programs and theorems, 10 | without carrying out any proofs yet. We review how to define new types and 11 | functions and how to state their intended properties as theorems. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Type Definitions 21 | 22 | An __inductive type__ (also called __inductive datatype__, 23 | __algebraic datatype__, or just __datatype__) is a type that consists all the 24 | values that can be built using a finite number of applications of its 25 | __constructors__, and only those. 26 | 27 | 28 | ### Natural Numbers -/ 29 | 30 | namespace MyNat 31 | 32 | /- Definition of type `Nat` (= `ℕ`) of natural numbers, using unary notation: -/ 33 | 34 | inductive Nat : Type where 35 | | zero : Nat 36 | | succ : Nat → Nat 37 | 38 | #check Nat 39 | #check Nat.zero 40 | #check Nat.succ 41 | 42 | /- `#print` outputs the definition of its argument. -/ 43 | 44 | #print Nat 45 | 46 | end MyNat 47 | 48 | /- Outside namespace `MyNat`, `Nat` refers to the type defined in the Lean core 49 | library unless it is qualified by the `MyNat` namespace. -/ 50 | 51 | #print Nat 52 | #print MyNat.Nat 53 | 54 | /- ### Arithmetic Expressions -/ 55 | 56 | inductive AExp : Type where 57 | | num : ℤ → AExp 58 | | var : String → AExp 59 | | add : AExp → AExp → AExp 60 | | sub : AExp → AExp → AExp 61 | | mul : AExp → AExp → AExp 62 | | div : AExp → AExp → AExp 63 | 64 | 65 | /- ### Lists -/ 66 | 67 | namespace MyList 68 | 69 | inductive List (α : Type) where 70 | | nil : List α 71 | | cons : α → List α → List α 72 | 73 | #check List 74 | #check List.nil 75 | #check List.cons 76 | #print List 77 | 78 | end MyList 79 | 80 | #print List 81 | #print MyList.List 82 | 83 | 84 | /- ## Function Definitions 85 | 86 | The syntax for defining a function operating on an inductive type is very 87 | compact: We define a single function and use __pattern matching__ to extract the 88 | arguments to the constructors. -/ 89 | 90 | def fib : ℕ → ℕ 91 | | 0 => 0 92 | | 1 => 1 93 | | n + 2 => fib (n + 1) + fib n 94 | 95 | /- When there are multiple arguments, separate the patterns by `,`: -/ 96 | 97 | def add : ℕ → ℕ → ℕ 98 | | m, Nat.zero => m 99 | | m, Nat.succ n => Nat.succ (add m n) 100 | 101 | /- `#eval` and `#reduce` evaluate and output the value of a term. -/ 102 | 103 | #eval add 2 7 104 | #reduce add 2 7 105 | 106 | def mul : ℕ → ℕ → ℕ 107 | | _, Nat.zero => Nat.zero 108 | | m, Nat.succ n => add m (mul m n) 109 | 110 | #eval mul 2 7 111 | 112 | #print mul 113 | 114 | def power : ℕ → ℕ → ℕ 115 | | _, Nat.zero => 1 116 | | m, Nat.succ n => mul m (power m n) 117 | 118 | #eval power 2 5 119 | 120 | /- `add`, `mul`, and `power` are artificial examples. These operations are 121 | already available in Lean as `+`, `*`, and `^`. 122 | 123 | If it is not necessary to pattern-match on an argument, it can be moved to 124 | the left of the `:` and made a named argument: -/ 125 | 126 | def powerParam (m : ℕ) : ℕ → ℕ 127 | | Nat.zero => 1 128 | | Nat.succ n => mul m (powerParam m n) 129 | 130 | #eval powerParam 2 5 131 | 132 | def iter (α : Type) (z : α) (f : α → α) : ℕ → α 133 | | Nat.zero => z 134 | | Nat.succ n => f (iter α z f n) 135 | 136 | #check iter 137 | 138 | def powerIter (m n : ℕ) : ℕ := 139 | iter ℕ 1 (mul m) n 140 | 141 | #eval powerIter 2 5 142 | 143 | def append (α : Type) : List α → List α → List α 144 | | List.nil, ys => ys 145 | | List.cons x xs, ys => List.cons x (append α xs ys) 146 | 147 | /- Because `append` must work for any type of list, the type of its elements is 148 | provided as an argument. As a result, the type must be provided in every call 149 | (or use `_` if Lean can infer the type). -/ 150 | 151 | #check append 152 | #eval append ℕ [3, 1] [4, 1, 5] 153 | #eval append _ [3, 1] [4, 1, 5] 154 | 155 | /- If the type argument is enclosed in `{ }` rather than `( )`, it is implicit 156 | and need not be provided in every call (provided Lean can infer it). -/ 157 | 158 | def appendImplicit {α : Type} : List α → List α → List α 159 | | List.nil, ys => ys 160 | | List.cons x xs, ys => List.cons x (appendImplicit xs ys) 161 | 162 | #eval appendImplicit [3, 1] [4, 1, 5] 163 | 164 | /- Prefixing a definition name with `@` gives the corresponding definition in 165 | which all implicit arguments have been made explicit. This is useful in 166 | situations where Lean cannot work out how to instantiate the implicit 167 | arguments. -/ 168 | 169 | #check @appendImplicit 170 | #eval @appendImplicit ℕ [3, 1] [4, 1, 5] 171 | #eval @appendImplicit _ [3, 1] [4, 1, 5] 172 | 173 | /- Aliases: 174 | 175 | `[]` := `List.nil` 176 | `x :: xs` := `List.cons x xs` 177 | `[x₁, …, xN]` := `x₁ :: … :: xN :: []` -/ 178 | 179 | def appendPretty {α : Type} : List α → List α → List α 180 | | [], ys => ys 181 | | x :: xs, ys => x :: appendPretty xs ys 182 | 183 | def reverse {α : Type} : List α → List α 184 | | [] => [] 185 | | x :: xs => reverse xs ++ [x] 186 | 187 | def eval (env : String → ℤ) : AExp → ℤ 188 | | AExp.num i => i 189 | | AExp.var x => env x 190 | | AExp.add e₁ e₂ => eval env e₁ + eval env e₂ 191 | | AExp.sub e₁ e₂ => eval env e₁ - eval env e₂ 192 | | AExp.mul e₁ e₂ => eval env e₁ * eval env e₂ 193 | | AExp.div e₁ e₂ => eval env e₁ / eval env e₂ 194 | 195 | #eval eval (fun x ↦ 7) (AExp.div (AExp.var "y") (AExp.num 0)) 196 | 197 | /- Lean only accepts the function definitions for which it can prove 198 | termination. In particular, it accepts __structurally recursive__ functions, 199 | which peel off exactly one constructor at a time. 200 | 201 | 202 | ## Theorem Statements 203 | 204 | Notice the similarity with `def` commands. `theorem` is like `def` except that 205 | the result is a proposition rather than data or a function. -/ 206 | 207 | namespace SorryTheorems 208 | 209 | theorem add_comm (m n : ℕ) : 210 | add m n = add n m := 211 | sorry 212 | 213 | theorem add_assoc (l m n : ℕ) : 214 | add (add l m) n = add l (add m n) := 215 | sorry 216 | 217 | theorem mul_comm (m n : ℕ) : 218 | mul m n = mul n m := 219 | sorry 220 | 221 | theorem mul_assoc (l m n : ℕ) : 222 | mul (mul l m) n = mul l (mul m n) := 223 | sorry 224 | 225 | theorem mul_add (l m n : ℕ) : 226 | mul l (add m n) = add (mul l m) (mul l n) := 227 | sorry 228 | 229 | theorem reverse_reverse {α : Type} (xs : List α) : 230 | reverse (reverse xs) = xs := 231 | sorry 232 | 233 | /- Axioms are like theorems but without proofs. Opaque declarations are like 234 | definitions but without bodies. -/ 235 | 236 | opaque a : ℤ 237 | opaque b : ℤ 238 | 239 | axiom a_less_b : 240 | a < b 241 | 242 | end SorryTheorems 243 | 244 | end LoVe 245 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe02_ProgramsAndTheorems_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe02_ProgramsAndTheorems_Demo 5 | 6 | 7 | /- # LoVe Exercise 2: Programs and Theorems 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | set_option autoImplicit false 12 | set_option tactic.hygienic false 13 | 14 | namespace LoVe 15 | 16 | 17 | /- ## Question 1: Predecessor Function 18 | 19 | 1.1. Define the function `pred` of type `ℕ → ℕ` that returns the predecessor of 20 | its argument, or 0 if the argument is 0. For example: 21 | 22 | `pred 7 = 6` 23 | `pred 0 = 0` -/ 24 | 25 | def pred : ℕ → ℕ := 26 | sorry 27 | 28 | /- 1.2. Check that your function works as expected. -/ 29 | 30 | #eval pred 0 -- expected: 0 31 | #eval pred 1 -- expected: 0 32 | #eval pred 2 -- expected: 1 33 | #eval pred 3 -- expected: 2 34 | #eval pred 10 -- expected: 9 35 | #eval pred 99 -- expected: 98 36 | 37 | 38 | /- ## Question 2: Arithmetic Expressions 39 | 40 | Consider the type `AExp` from the lecture and the function `eval` that 41 | computes the value of an expression. You will find the definitions in the file 42 | `LoVe02_ProgramsAndTheorems_Demo.lean`. One way to find them quickly is to 43 | 44 | 1. hold the Control (on Linux and Windows) or Command (on macOS) key pressed; 45 | 2. move the cursor to the identifier `AExp` or `eval`; 46 | 3. click the identifier. -/ 47 | 48 | #check AExp 49 | #check eval 50 | 51 | /- 2.1. Test that `eval` behaves as expected. Make sure to exercise each 52 | constructor at least once. You can use the following environment in your tests. 53 | What happens if you divide by zero? 54 | 55 | Note that `#eval` (Lean's evaluation command) and `eval` (our evaluation 56 | function on `AExp`) are unrelated. -/ 57 | 58 | def someEnv : String → ℤ 59 | | "x" => 3 60 | | "y" => 17 61 | | _ => 201 62 | 63 | #eval eval someEnv (AExp.var "x") -- expected: 3 64 | -- invoke `#eval` here 65 | 66 | /- 2.2. The following function simplifies arithmetic expressions involving 67 | addition. It simplifies `0 + e` and `e + 0` to `e`. Complete the definition so 68 | that it also simplifies expressions involving the other three binary 69 | operators. -/ 70 | 71 | def simplify : AExp → AExp 72 | | AExp.add (AExp.num 0) e₂ => simplify e₂ 73 | | AExp.add e₁ (AExp.num 0) => simplify e₁ 74 | -- insert the missing cases here 75 | -- catch-all cases below 76 | | AExp.num i => AExp.num i 77 | | AExp.var x => AExp.var x 78 | | AExp.add e₁ e₂ => AExp.add (simplify e₁) (simplify e₂) 79 | | AExp.sub e₁ e₂ => AExp.sub (simplify e₁) (simplify e₂) 80 | | AExp.mul e₁ e₂ => AExp.mul (simplify e₁) (simplify e₂) 81 | | AExp.div e₁ e₂ => AExp.div (simplify e₁) (simplify e₂) 82 | 83 | /- 2.3. Is the `simplify` function correct? In fact, what would it mean for it 84 | to be correct or not? Intuitively, for `simplify` to be correct, it must 85 | return an arithmetic expression that yields the same numeric value when 86 | evaluated as the original expression. 87 | 88 | Given an environment `env` and an expression `e`, state (without proving it) 89 | the property that the value of `e` after simplification is the same as the 90 | value of `e` before. -/ 91 | 92 | theorem simplify_correct (env : String → ℤ) (e : AExp) : 93 | True := -- replace `True` by your theorem statement 94 | sorry -- leave `sorry` alone 95 | 96 | 97 | /- ## Question 3 (**optional**): Map 98 | 99 | 3.1 (**optional**). Define a generic `map` function that applies a function to 100 | every element in a list. -/ 101 | 102 | def map {α : Type} {β : Type} (f : α → β) : List α → List β := 103 | sorry 104 | 105 | #eval map (fun n ↦ n + 10) [1, 2, 3] -- expected: [11, 12, 13] 106 | 107 | /- 3.2 (**optional**). State (without proving them) the so-called functorial 108 | properties of `map` as theorems. Schematically: 109 | 110 | map (fun x ↦ x) xs = xs 111 | map (fun x ↦ g (f x)) xs = map g (map f xs) 112 | 113 | Try to give meaningful names to your theorems. Also, make sure to state the 114 | second property as generally as possible, for arbitrary types. -/ 115 | 116 | -- enter your theorem statements here 117 | 118 | end LoVe 119 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe02_ProgramsAndTheorems_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe02_ProgramsAndTheorems_Demo 5 | 6 | 7 | /- # LoVe Homework 2 (10 points): Programs and Theorems 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Question 1 (4 points): Snoc 21 | 22 | 1.1 (3 points). Define the function `snoc` that appends a single element to the 23 | end of a list. Your function should be defined by recursion and not using `++` 24 | (`List.append`). -/ 25 | 26 | def snoc {α : Type} : List α → α → List α := 27 | sorry 28 | 29 | /- 1.2 (1 point). Convince yourself that your definition of `snoc` works by 30 | testing it on a few examples. -/ 31 | 32 | #eval snoc [1] 2 33 | -- invoke `#eval` or `#reduce` here 34 | 35 | 36 | /- ## Question 2 (6 points): Sum 37 | 38 | 2.1 (3 points). Define a `sum` function that computes the sum of all the numbers 39 | in a list. -/ 40 | 41 | def sum : List ℕ → ℕ := 42 | sorry 43 | 44 | #eval sum [1, 12, 3] -- expected: 16 45 | 46 | /- 2.2 (3 points). State (without proving them) the following properties of 47 | `sum` as theorems. Schematically: 48 | 49 | sum (snoc ms n) = n + sum ms 50 | sum (ms ++ ns) = sum ms + sum ns 51 | sum (reverse ns) = sum ns 52 | 53 | Try to give meaningful names to your theorems. Use `sorry` as the proof. -/ 54 | 55 | -- enter your theorem statements here 56 | 57 | end LoVe 58 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe03_BackwardProofs_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe02_ProgramsAndTheorems_Demo 5 | 6 | 7 | /- # LoVe Demo 3: Backward Proofs 8 | 9 | A __tactic__ operates on a proof goal and either proves it or creates new 10 | subgoals. Tactics are a __backward__ proof mechanism: They start from the goal 11 | and work towards the available hypotheses and theorems. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | namespace BackwardProofs 20 | 21 | 22 | /- ## Tactic Mode 23 | 24 | Syntax of tactical proofs: 25 | 26 | by 27 | _tactic₁_ 28 | … 29 | _tacticN 30 | 31 | The keyword `by` indicates to Lean the proof is tactical. -/ 32 | 33 | theorem fst_of_two_props : 34 | ∀a b : Prop, a → b → a := 35 | by 36 | intro a b 37 | intro ha hb 38 | apply ha 39 | 40 | /- Note that `a → b → a` is parsed as `a → (b → a)`. 41 | 42 | Propositions in Lean are terms of type `Prop`. `Prop` is a type, just like `Nat` 43 | and `List Bool`. In fact there is a close correspondence between propositions 44 | and types, which will be explained in lecture 4. 45 | 46 | 47 | ## Basic Tactics 48 | 49 | `intro` moves `∀`-quantified variables, or the assumptions of implications `→`, 50 | from the goal's conclusion (after `⊢`) into the goal's hypotheses (before `⊢`). 51 | 52 | `apply` matches the goal's conclusion with the conclusion of the specified 53 | theorem and adds the theorem's hypotheses as new goals. -/ 54 | 55 | theorem fst_of_two_props_params (a b : Prop) (ha : a) (hb : b) : 56 | a := 57 | by apply ha 58 | 59 | theorem prop_comp (a b c : Prop) (hab : a → b) (hbc : b → c) : 60 | a → c := 61 | by 62 | intro ha 63 | apply hbc 64 | apply hab 65 | apply ha 66 | 67 | /- The above proof step by step: 68 | 69 | * Assume we have a proof of `a`. 70 | * The goal is `c`, which we can show if we prove `b` (from `hbc`). 71 | * The goal is `b`, which we can show if we prove `a` (from `hab`). 72 | * We already know `a` (from `ha`). 73 | 74 | Next, `exact` matches the goal's conclusion with the specified theorem, closing 75 | the goal. We can often use `apply` in such situations, but `exact` communicates 76 | our intentions better. -/ 77 | 78 | theorem fst_of_two_props_exact (a b : Prop) (ha : a) (hb : b) : 79 | a := 80 | by exact ha 81 | 82 | /- `assumption` finds a hypothesis from the local context that matches the 83 | goal's conclusion and applies it to prove the goal. -/ 84 | 85 | theorem fst_of_two_props_assumption (a b : Prop) 86 | (ha : a) (hb : b) : 87 | a := 88 | by assumption 89 | 90 | /- `rfl` proves `l = r`, where the two sides are syntactically equal up to 91 | computation. Computation means unfolding of definitions, β-reduction 92 | (application of `fun` to an argument), `let`, and more. -/ 93 | 94 | theorem α_example {α β : Type} (f : α → β) : 95 | (fun x ↦ f x) = (fun y ↦ f y) := 96 | by rfl 97 | 98 | theorem β_example {α β : Type} (f : α → β) (a : α) : 99 | (fun x ↦ f x) a = f a := 100 | by rfl 101 | 102 | def double (n : ℕ) : ℕ := 103 | n + n 104 | 105 | theorem δ_example : 106 | double 5 = 5 + 5 := 107 | by rfl 108 | 109 | /- `let` introduces a definition that is locally scoped. Below, `n := 2` is only 110 | in scope in the expression `n + n`. -/ 111 | 112 | theorem ζ_example : 113 | (let n : ℕ := 2 114 | n + n) = 4 := 115 | by rfl 116 | 117 | theorem η_example {α β : Type} (f : α → β) : 118 | (fun x ↦ f x) = f := 119 | by rfl 120 | 121 | /- `(a, b)` is the pair whose first component is `a` and whose second component 122 | is `b`. `Prod.fst` is a so-called projection that extracts the first component 123 | of a pair. -/ 124 | 125 | theorem ι_example {α β : Type} (a : α) (b : β) : 126 | Prod.fst (a, b) = a := 127 | by rfl 128 | 129 | 130 | /- ## Reasoning about Logical Connectives and Quantifiers 131 | 132 | Introduction rules: -/ 133 | 134 | #check True.intro 135 | #check And.intro 136 | #check Or.inl 137 | #check Or.inr 138 | #check Iff.intro 139 | #check Exists.intro 140 | 141 | /- Elimination rules: -/ 142 | 143 | #check False.elim 144 | #check And.left 145 | #check And.right 146 | #check Or.elim 147 | #check Iff.mp 148 | #check Iff.mpr 149 | #check Exists.elim 150 | 151 | /- Definition of `¬` and related theorems: -/ 152 | 153 | #print Not 154 | #check Classical.em 155 | #check Classical.byContradiction 156 | 157 | /- There are no explicit rules for `Not` (`¬`) since `¬ p` is defined as 158 | `p → False`. -/ 159 | 160 | theorem And_swap (a b : Prop) : 161 | a ∧ b → b ∧ a := 162 | by 163 | intro hab 164 | apply And.intro 165 | apply And.right 166 | exact hab 167 | apply And.left 168 | exact hab 169 | 170 | /- The above proof step by step: 171 | 172 | * Assume we know `a ∧ b`. 173 | * The goal is `b ∧ a`. 174 | * Show `b`, which we can if we can show a conjunction with `b` on the right. 175 | * We can, we already have `a ∧ b`. 176 | * Show `a`, which we can if we can show a conjunction with `a` on the left. 177 | * We can, we already have `a ∧ b`. 178 | 179 | The `{ … }` combinator focuses on a specific subgoal. The tactic inside must 180 | fully prove it. In the proof below, `{ … }` is used for each of the two subgoals 181 | to give more structure to the proof. -/ 182 | 183 | theorem And_swap_braces : 184 | ∀a b : Prop, a ∧ b → b ∧ a := 185 | by 186 | intro a b hab 187 | apply And.intro 188 | { exact And.right hab } 189 | { exact And.left hab } 190 | 191 | /- Notice above how we pass the hypothesis `hab` directly to the theorems 192 | `And.right` and `And.left`, instead of waiting for the theorems' assumptions to 193 | appear as new subgoals. This is a small forward step in an otherwise backward 194 | proof. -/ 195 | 196 | opaque f : ℕ → ℕ 197 | 198 | theorem f5_if (h : ∀n : ℕ, f n = n) : 199 | f 5 = 5 := 200 | by exact h 5 201 | 202 | theorem Or_swap (a b : Prop) : 203 | a ∨ b → b ∨ a := 204 | by 205 | intro hab 206 | apply Or.elim hab 207 | { intro ha 208 | exact Or.inr ha } 209 | { intro hb 210 | exact Or.inl hb } 211 | 212 | theorem modus_ponens (a b : Prop) : 213 | (a → b) → a → b := 214 | by 215 | intro hab ha 216 | apply hab 217 | exact ha 218 | 219 | theorem Not_Not_intro (a : Prop) : 220 | a → ¬¬ a := 221 | by 222 | intro ha hna 223 | apply hna 224 | exact ha 225 | 226 | theorem Exists_double_iden : 227 | ∃n : ℕ, double n = n := 228 | by 229 | apply Exists.intro 0 230 | rfl 231 | 232 | 233 | /- ## Reasoning about Equality -/ 234 | 235 | #check Eq.refl 236 | #check Eq.symm 237 | #check Eq.trans 238 | #check Eq.subst 239 | 240 | /- The above rules can be used directly: -/ 241 | 242 | theorem Eq_trans_symm {α : Type} (a b c : α) 243 | (hab : a = b) (hcb : c = b) : 244 | a = c := 245 | by 246 | apply Eq.trans 247 | { exact hab } 248 | { apply Eq.symm 249 | exact hcb } 250 | 251 | /- `rw` applies a single equation as a left-to-right rewrite rule, once. To 252 | apply an equation right-to-left, prefix its name with `←`. -/ 253 | 254 | theorem Eq_trans_symm_rw {α : Type} (a b c : α) 255 | (hab : a = b) (hcb : c = b) : 256 | a = c := 257 | by 258 | rw [hab] 259 | rw [hcb] 260 | 261 | /- `rw` can expand a definition. Below, `¬¬ a` becomes `¬ a → False`, and `¬ a` 262 | becomes `a → False`. -/ 263 | 264 | theorem a_proof_of_negation (a : Prop) : 265 | a → ¬¬ a := 266 | by 267 | rw [Not] 268 | rw [Not] 269 | intro ha 270 | intro hna 271 | apply hna 272 | exact ha 273 | 274 | /- `simp` applies a standard set of rewrite rules (the __simp set__) 275 | exhaustively. The set can be extended using the `@[simp]` attribute. Theorems 276 | can be temporarily added to the simp set with the syntax 277 | `simp [_theorem₁_, …, _theoremN_]`. -/ 278 | 279 | theorem cong_two_args_1p1 {α : Type} (a b c d : α) 280 | (g : α → α → ℕ → α) (hab : a = b) (hcd : c = d) : 281 | g a c (1 + 1) = g b d 2 := 282 | by simp [hab, hcd] 283 | 284 | /- `ac_rfl` is similar to `rfl`, but it can reason up to associativity and 285 | commutativity of `+`, `*`, and other binary operators. -/ 286 | 287 | theorem abc_Eq_cba (a b c : ℕ) : 288 | a + b + c = c + b + a := 289 | by ac_rfl 290 | 291 | 292 | /- ## Proofs by Mathematical Induction 293 | 294 | `induction` performs induction on the specified variable. It gives rise to one 295 | named subgoal per constructor. -/ 296 | 297 | theorem add_zero (n : ℕ) : 298 | add 0 n = n := 299 | by 300 | induction n with 301 | | zero => rfl 302 | | succ n' ih => simp [add, ih] 303 | 304 | theorem add_succ (m n : ℕ) : 305 | add (Nat.succ m) n = Nat.succ (add m n) := 306 | by 307 | induction n with 308 | | zero => rfl 309 | | succ n' ih => simp [add, ih] 310 | 311 | theorem add_comm (m n : ℕ) : 312 | add m n = add n m := 313 | by 314 | induction n with 315 | | zero => simp [add, add_zero] 316 | | succ n' ih => simp [add, add_succ, ih] 317 | 318 | theorem add_assoc (l m n : ℕ) : 319 | add (add l m) n = add l (add m n) := 320 | by 321 | induction n with 322 | | zero => rfl 323 | | succ n' ih => simp [add, ih] 324 | 325 | /- `ac_rfl` is extensible. We can register `add` as a commutative and 326 | associative operator using the type class instance mechanism (explained in 327 | lecture 5). This is useful for the `ac_rfl` invocation below. -/ 328 | 329 | instance IsAssociative_add : IsAssociative ℕ add := 330 | { assoc := add_assoc } 331 | 332 | instance IsCommutative_add : IsCommutative ℕ add := 333 | { comm := add_comm } 334 | 335 | theorem mul_add (l m n : ℕ) : 336 | mul l (add m n) = add (mul l m) (mul l n) := 337 | by 338 | induction n with 339 | | zero => rfl 340 | | succ n' ih => 341 | simp [add, mul, ih] 342 | ac_rfl 343 | 344 | 345 | /- ## Cleanup Tactics 346 | 347 | `clear` removes unused variables or hypotheses. 348 | 349 | `rename` changes the name of a variable or hypothesis. -/ 350 | 351 | theorem cleanup_example (a b c : Prop) (ha : a) (hb : b) 352 | (hab : a → b) (hbc : b → c) : 353 | c := 354 | by 355 | clear ha hab a 356 | apply hbc 357 | clear hbc c 358 | rename b => h 359 | exact h 360 | 361 | end BackwardProofs 362 | 363 | end LoVe 364 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe03_BackwardProofs_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe03_BackwardProofs_Demo 5 | 6 | 7 | /- # LoVe Exercise 3: Backward Proofs 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | namespace BackwardProofs 18 | 19 | 20 | /- ## Question 1: Connectives and Quantifiers 21 | 22 | 1.1. Carry out the following proofs using basic tactics. 23 | 24 | Hint: Some strategies for carrying out such proofs are described at the end of 25 | Section 3.3 in the Hitchhiker's Guide. -/ 26 | 27 | theorem I (a : Prop) : 28 | a → a := 29 | sorry 30 | 31 | theorem K (a b : Prop) : 32 | a → b → b := 33 | sorry 34 | 35 | theorem C (a b c : Prop) : 36 | (a → b → c) → b → a → c := 37 | sorry 38 | 39 | theorem proj_fst (a : Prop) : 40 | a → a → a := 41 | sorry 42 | 43 | /- Please give a different answer than for `proj_fst`: -/ 44 | 45 | theorem proj_snd (a : Prop) : 46 | a → a → a := 47 | sorry 48 | 49 | theorem some_nonsense (a b c : Prop) : 50 | (a → b → c) → a → (a → c) → b → c := 51 | sorry 52 | 53 | /- 1.2. Prove the contraposition rule using basic tactics. -/ 54 | 55 | theorem contrapositive (a b : Prop) : 56 | (a → b) → ¬ b → ¬ a := 57 | sorry 58 | 59 | /- 1.3. Prove the distributivity of `∀` over `∧` using basic tactics. 60 | 61 | Hint: This exercise is tricky, especially the right-to-left direction. Some 62 | forward reasoning, like in the proof of `and_swap_braces` in the lecture, might 63 | be necessary. -/ 64 | 65 | theorem forall_and {α : Type} (p q : α → Prop) : 66 | (∀x, p x ∧ q x) ↔ (∀x, p x) ∧ (∀x, q x) := 67 | sorry 68 | 69 | 70 | /- ## Question 2: Natural Numbers 71 | 72 | 2.1. Prove the following recursive equations on the first argument of the 73 | `mul` operator defined in lecture 1. -/ 74 | 75 | #check mul 76 | 77 | theorem mul_zero (n : ℕ) : 78 | mul 0 n = 0 := 79 | sorry 80 | 81 | #check add_succ 82 | theorem mul_succ (m n : ℕ) : 83 | mul (Nat.succ m) n = add (mul m n) n := 84 | sorry 85 | 86 | /- 2.2. Prove commutativity and associativity of multiplication using the 87 | `induction` tactic. Choose the induction variable carefully. -/ 88 | 89 | theorem mul_comm (m n : ℕ) : 90 | mul m n = mul n m := 91 | sorry 92 | 93 | theorem mul_assoc (l m n : ℕ) : 94 | mul (mul l m) n = mul l (mul m n) := 95 | sorry 96 | 97 | /- 2.3. Prove the symmetric variant of `mul_add` using `rw`. To apply 98 | commutativity at a specific position, instantiate the rule by passing some 99 | arguments (e.g., `mul_comm _ l`). -/ 100 | 101 | theorem add_mul (l m n : ℕ) : 102 | mul (add l m) n = add (mul n l) (mul n m) := 103 | sorry 104 | 105 | 106 | /- ## Question 3 (**optional**): Intuitionistic Logic 107 | 108 | Intuitionistic logic is extended to classical logic by assuming a classical 109 | axiom. There are several possibilities for the choice of axiom. In this 110 | question, we are concerned with the logical equivalence of three different 111 | axioms: -/ 112 | 113 | def ExcludedMiddle : Prop := 114 | ∀a : Prop, a ∨ ¬ a 115 | 116 | def Peirce : Prop := 117 | ∀a b : Prop, ((a → b) → a) → a 118 | 119 | def DoubleNegation : Prop := 120 | ∀a : Prop, (¬¬ a) → a 121 | 122 | /- For the proofs below, avoid using theorems from Lean's `Classical` namespace. 123 | 124 | 3.1 (**optional**). Prove the following implication using tactics. 125 | 126 | Hint: You will need `Or.elim` and `False.elim`. You can use 127 | `rw [ExcludedMiddle]` to unfold the definition of `ExcludedMiddle`, 128 | and similarly for `Peirce`. -/ 129 | 130 | theorem Peirce_of_EM : 131 | ExcludedMiddle → Peirce := 132 | sorry 133 | 134 | /- 3.2 (**optional**). Prove the following implication using tactics. -/ 135 | 136 | theorem DN_of_Peirce : 137 | Peirce → DoubleNegation := 138 | sorry 139 | 140 | /- We leave the remaining implication for the homework: -/ 141 | 142 | namespace SorryTheorems 143 | 144 | theorem EM_of_DN : 145 | DoubleNegation → ExcludedMiddle := 146 | sorry 147 | 148 | end SorryTheorems 149 | 150 | end BackwardProofs 151 | 152 | end LoVe 153 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe03_BackwardProofs_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe03_BackwardProofs_ExerciseSheet 5 | 6 | 7 | /- # LoVe Homework 3 (10 points): Backward Proofs 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | namespace BackwardProofs 20 | 21 | 22 | /- ## Question 1 (5 points): Connectives and Quantifiers 23 | 24 | 1.1 (4 points). Complete the following proofs using basic tactics such as 25 | `intro`, `apply`, and `exact`. 26 | 27 | Hint: Some strategies for carrying out such proofs are described at the end of 28 | Section 3.3 in the Hitchhiker's Guide. -/ 29 | 30 | theorem B (a b c : Prop) : 31 | (a → b) → (c → a) → c → b := 32 | sorry 33 | 34 | theorem S (a b c : Prop) : 35 | (a → b → c) → (a → b) → a → c := 36 | sorry 37 | 38 | theorem more_nonsense (a b c d : Prop) : 39 | ((a → b) → c → d) → c → b → d := 40 | sorry 41 | 42 | theorem even_more_nonsense (a b c : Prop) : 43 | (a → b) → (a → c) → a → b → c := 44 | sorry 45 | 46 | /- 1.2 (1 point). Prove the following theorem using basic tactics. -/ 47 | 48 | theorem weak_peirce (a b : Prop) : 49 | ((((a → b) → a) → a) → b) → b := 50 | sorry 51 | 52 | 53 | /- ## Question 2 (5 points): Logical Connectives 54 | 55 | 2.1 (1 point). Prove the following property about double negation using basic 56 | tactics. 57 | 58 | Hints: 59 | 60 | * Keep in mind that `¬ a` is defined as `a → False`. You can start by invoking 61 | `simp [Not]` if this helps you. 62 | 63 | * You will need to apply the elimination rule for `False` at a key point in the 64 | proof. -/ 65 | 66 | theorem herman (a : Prop) : 67 | ¬¬ (¬¬ a → a) := 68 | sorry 69 | 70 | /- 2.2 (2 points). Prove the missing link in our chain of classical axiom 71 | implications. 72 | 73 | Hints: 74 | 75 | * One way to find the definitions of `DoubleNegation` and `ExcludedMiddle` 76 | quickly is to 77 | 78 | 1. hold the Control (on Linux and Windows) or Command (on macOS) key pressed; 79 | 2. move the cursor to the identifier `DoubleNegation` or `ExcludedMiddle`; 80 | 3. click the identifier. 81 | 82 | * You can use `rw DoubleNegation` to unfold the definition of 83 | `DoubleNegation`, and similarly for the other definitions. 84 | 85 | * You will need to apply the double negation hypothesis for `a ∨ ¬ a`. You will 86 | also need the left and right introduction rules for `∨` at some point. -/ 87 | 88 | #check DoubleNegation 89 | #check ExcludedMiddle 90 | 91 | theorem EM_of_DN : 92 | DoubleNegation → ExcludedMiddle := 93 | sorry 94 | 95 | /- 2.3 (2 points). We have proved three of the six possible implications 96 | between `ExcludedMiddle`, `Peirce`, and `DoubleNegation`. State and prove the 97 | three missing implications, exploiting the three theorems we already have. -/ 98 | 99 | #check Peirce_of_EM 100 | #check DN_of_Peirce 101 | #check EM_of_DN 102 | 103 | -- enter your solution here 104 | 105 | end BackwardProofs 106 | 107 | end LoVe 108 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe04_ForwardProofs_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe02_ProgramsAndTheorems_Demo 5 | 6 | 7 | /- # LoVe Demo 4: Forward Proofs 8 | 9 | When developing a proof, often it makes sense to work __forward__: to start with 10 | what we already know and proceed step by step towards our goal. Lean's 11 | structured proofs and raw proof terms are two styles that support forward 12 | reasoning. -/ 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | namespace ForwardProofs 20 | 21 | 22 | /- ## Structured Constructs 23 | 24 | Structured proofs are syntactic sugar sprinkled on top of Lean's 25 | __proof terms__. 26 | 27 | The simplest kind of structured proof is the name of a theorem, possibly with 28 | arguments. -/ 29 | 30 | theorem add_comm (m n : ℕ) : 31 | add m n = add n m := 32 | sorry 33 | 34 | theorem add_comm_zero_left (n : ℕ) : 35 | add 0 n = add n 0 := 36 | add_comm 0 n 37 | 38 | /- The equivalent backward proof: -/ 39 | 40 | theorem add_comm_zero_left_by_exact (n : ℕ) : 41 | add 0 n = add n 0 := 42 | by exact add_comm 0 n 43 | 44 | /- `fix` and `assume` move `∀`-quantified variables and assumptions from the 45 | goal into the local context. They can be seen as structured versions of the 46 | `intro` tactic. 47 | 48 | `show` repeats the goal to prove. It is useful as documentation or to rephrase 49 | the goal (up to computation). -/ 50 | 51 | theorem fst_of_two_props : 52 | ∀a b : Prop, a → b → a := 53 | fix a b : Prop 54 | assume ha : a 55 | assume hb : b 56 | show a from 57 | ha 58 | 59 | theorem fst_of_two_props_show (a b : Prop) (ha : a) (hb : b) : 60 | a := 61 | show a from 62 | ha 63 | 64 | theorem fst_of_two_props_no_show (a b : Prop) (ha : a) (hb : b) : 65 | a := 66 | ha 67 | 68 | /- `have` proves an intermediate theorem, which can refer to the local 69 | context. -/ 70 | 71 | theorem prop_comp (a b c : Prop) (hab : a → b) (hbc : b → c) : 72 | a → c := 73 | assume ha : a 74 | have hb : b := 75 | hab ha 76 | have hc : c := 77 | hbc hb 78 | show c from 79 | hc 80 | 81 | theorem prop_comp_inline (a b c : Prop) (hab : a → b) 82 | (hbc : b → c) : 83 | a → c := 84 | assume ha : a 85 | show c from 86 | hbc (hab ha) 87 | 88 | 89 | /- ## Forward Reasoning about Connectives and Quantifiers -/ 90 | 91 | theorem And_swap (a b : Prop) : 92 | a ∧ b → b ∧ a := 93 | assume hab : a ∧ b 94 | have ha : a := 95 | And.left hab 96 | have hb : b := 97 | And.right hab 98 | show b ∧ a from 99 | And.intro hb ha 100 | 101 | theorem or_swap (a b : Prop) : 102 | a ∨ b → b ∨ a := 103 | assume hab : a ∨ b 104 | show b ∨ a from 105 | Or.elim hab 106 | (assume ha : a 107 | show b ∨ a from 108 | Or.inr ha) 109 | (assume hb : b 110 | show b ∨ a from 111 | Or.inl hb) 112 | 113 | def double (n : ℕ) : ℕ := 114 | n + n 115 | 116 | theorem nat_exists_double_iden : 117 | ∃n : ℕ, double n = n := 118 | Exists.intro 0 119 | (show double 0 = 0 from 120 | by rfl) 121 | 122 | theorem nat_exists_double_iden_no_show : 123 | ∃n : ℕ, double n = n := 124 | Exists.intro 0 (by rfl) 125 | 126 | theorem modus_ponens (a b : Prop) : 127 | (a → b) → a → b := 128 | assume hab : a → b 129 | assume ha : a 130 | show b from 131 | hab ha 132 | 133 | theorem not_not_intro (a : Prop) : 134 | a → ¬¬ a := 135 | assume ha : a 136 | assume hna : ¬ a 137 | show False from 138 | hna ha 139 | 140 | /- Just as you can apply forward reasoning inside a backward proof, you can 141 | apply backward reasoning inside a forward proof (indicated with `by`): -/ 142 | 143 | theorem Forall.one_point {α : Type} (t : α) (P : α → Prop) : 144 | (∀x, x = t → P x) ↔ P t := 145 | Iff.intro 146 | (assume hall : ∀x, x = t → P x 147 | show P t from 148 | by 149 | apply hall t 150 | rfl) 151 | (assume hp : P t 152 | fix x : α 153 | assume heq : x = t 154 | show P x from 155 | by 156 | rw [heq] 157 | exact hp) 158 | 159 | theorem beast_666 (beast : ℕ) : 160 | (∀n, n = 666 → beast ≥ n) ↔ beast ≥ 666 := 161 | Forall.one_point _ _ 162 | 163 | #print beast_666 164 | 165 | theorem Exists.one_point {α : Type} (t : α) (P : α → Prop) : 166 | (∃x : α, x = t ∧ P x) ↔ P t := 167 | Iff.intro 168 | (assume hex : ∃x, x = t ∧ P x 169 | show P t from 170 | Exists.elim hex 171 | (fix x : α 172 | assume hand : x = t ∧ P x 173 | have hxt : x = t := 174 | And.left hand 175 | have hpx : P x := 176 | And.right hand 177 | show P t from 178 | by 179 | rw [←hxt] 180 | exact hpx)) 181 | (assume hp : P t 182 | show ∃x : α, x = t ∧ P x from 183 | Exists.intro t 184 | (have tt : t = t := 185 | by rfl 186 | show t = t ∧ P t from 187 | And.intro tt hp)) 188 | 189 | 190 | /- ## Calculational Proofs 191 | 192 | In informal mathematics, we often use transitive chains of equalities, 193 | inequalities, or equivalences (e.g., `a ≥ b ≥ c`). In Lean, such calculational 194 | proofs are supported by `calc`. 195 | 196 | Syntax: 197 | 198 | calc 199 | _term₀_ _op₁_ _term₁_ := 200 | _proof₁_ 201 | _ _op₂_ _term₂_ := 202 | _proof₂_ 203 | ⋮ 204 | _ _opN_ _termN_ := 205 | _proofN_ -/ 206 | 207 | theorem two_mul_example (m n : ℕ) : 208 | 2 * m + n = m + n + m := 209 | calc 210 | 2 * m + n = m + m + n := 211 | by rw [Nat.two_mul] 212 | _ = m + n + m := 213 | by ac_rfl 214 | 215 | /- `calc` saves some repetition, some `have` labels, and some transitive 216 | reasoning: -/ 217 | 218 | theorem two_mul_example_have (m n : ℕ) : 219 | 2 * m + n = m + n + m := 220 | have hmul : 2 * m + n = m + m + n := 221 | by rw [Nat.two_mul] 222 | have hcomm : m + m + n = m + n + m := 223 | by ac_rfl 224 | show _ from 225 | Eq.trans hmul hcomm 226 | 227 | 228 | /- ## Forward Reasoning with Tactics 229 | 230 | The `have`, `let`, and `calc` structured proof commands are also available as a 231 | tactic. Even in tactic mode, it can be useful to state intermediate results and 232 | definitions in a forward fashion. -/ 233 | 234 | theorem prop_comp_tactical (a b c : Prop) (hab : a → b) 235 | (hbc : b → c) : 236 | a → c := 237 | by 238 | intro ha 239 | have hb : b := 240 | hab ha 241 | let c' := c 242 | have hc : c' := 243 | hbc hb 244 | exact hc 245 | 246 | 247 | /- ## Dependent Types 248 | 249 | Dependent types are the defining feature of the dependent type theory family of 250 | logics. 251 | 252 | Consider a function `pick` that take a number `n : ℕ` and that returns a number 253 | between 0 and `n`. Conceptually, `pick` has a dependent type, namely 254 | 255 | `(n : ℕ) → {i : ℕ // i ≤ n}` 256 | 257 | We can think of this type as a `ℕ`-indexed family, where each member's type may 258 | depend on the index: 259 | 260 | `pick n : {i : ℕ // i ≤ n}` 261 | 262 | But a type may also depend on another type, e.g., `List` (or `fun α ↦ List α`) 263 | and `fun α ↦ α → α`. 264 | 265 | A term may depend on a type, e.g., `fun α ↦ fun (x : α) ↦ x` (a polymorphic 266 | identity function). 267 | 268 | Of course, a term may also depend on a term. 269 | 270 | Unless otherwise specified, a __dependent type__ means a type depending on a 271 | term. This is what we mean when we say that simple type theory does not support 272 | dependent types. 273 | 274 | In summary, there are four cases for `fun x ↦ t` in the calculus of inductive 275 | constructions (cf. Barendregt's `λ`-cube): 276 | 277 | Body (`t`) | | Argument (`x`) | Description 278 | ---------- | ------------ | -------------- | ---------------------------------- 279 | A term | depending on | a term | Simply typed anonymous function 280 | A type | depending on | a term | Dependent type (strictly speaking) 281 | A term | depending on | a type | Polymorphic term 282 | A type | depending on | a type | Type constructor 283 | 284 | Revised typing rules: 285 | 286 | C ⊢ t : (x : σ) → τ[x] C ⊢ u : σ 287 | ———————————————————————————————————— App' 288 | C ⊢ t u : τ[u] 289 | 290 | C, x : σ ⊢ t : τ[x] 291 | ———————————————————————————————————— Fun' 292 | C ⊢ (fun x : σ ↦ t) : (x : σ) → τ[x] 293 | 294 | These two rules degenerate to `App` and `Fun` if `x` does not occur in `τ[x]` 295 | 296 | Example of `App'`: 297 | 298 | ⊢ pick : (n : ℕ) → {i : ℕ // i ≤ n} ⊢ 5 : ℕ 299 | ——————————————————————————————————————————————— App' 300 | ⊢ pick 5 : {i : ℕ // i ≤ 5} 301 | 302 | Example of `Fun'`: 303 | 304 | α : Type, x : α ⊢ x : α 305 | —————————————————————————————————— Fun or Fun' 306 | α : Type ⊢ (fun x : α ↦ x) : α → α 307 | ————————————————————————————————————————————————————— Fun' 308 | ⊢ (fun α : Type ↦ fun x : α ↦ x) : (α : Type) → α → α 309 | 310 | Remarkably, universal quantification is simply an alias for a dependent type: 311 | 312 | `∀x : σ, τ` := `(x : σ) → τ` 313 | 314 | This will become clearer below. 315 | 316 | 317 | ## The PAT Principle 318 | 319 | `→` is used both as the implication symbol and as the type constructor of 320 | functions. The two pairs of concepts not only look the same, they are the same, 321 | by the PAT principle: 322 | 323 | * PAT = propositions as types; 324 | * PAT = proofs as terms. 325 | 326 | Types: 327 | 328 | * `σ → τ` is the type of total functions from `σ` to `τ`. 329 | * `(x : σ) → τ[x]` is the dependent function type from `x : σ` to `τ[x]`. 330 | 331 | Propositions: 332 | 333 | * `P → Q` can be read as "`P` implies `Q`", or as the type of functions mapping 334 | proofs of `P` to proofs of `Q`. 335 | * `∀x : σ, Q[x]` can be read as "for all `x`, `Q[x]`", or as the type of 336 | functions of type `(x : σ) → Q[x]`, mapping values `x` of type `σ` to proofs 337 | of `Q[x]`. 338 | 339 | Terms: 340 | 341 | * A constant is a term. 342 | * A variable is a term. 343 | * `t u` is the application of function `t` to value `u`. 344 | * `fun x ↦ t[x]` is a function mapping `x` to `t[x]`. 345 | 346 | Proofs: 347 | 348 | * A theorem or hypothesis name is a proof. 349 | * `H t`, which instantiates the leading parameter or quantifier of proof `H`' 350 | statement with term `t`, is a proof. 351 | * `H G`, which discharges the leading assumption of `H`'s statement with 352 | proof `G`, is a proof. 353 | * `fun h : P ↦ H[h]` is a proof of `P → Q`, assuming `H[h]` is a proof of `Q` 354 | for `h : P`. 355 | * `fun x : σ ↦ H[x]` is a proof of `∀x : σ, Q[x]`, assuming `H[x]` is a proof 356 | of `Q[x]` for `x : σ`. -/ 357 | 358 | theorem And_swap_raw (a b : Prop) : 359 | a ∧ b → b ∧ a := 360 | fun hab : a ∧ b ↦ And.intro (And.right hab) (And.left hab) 361 | 362 | theorem And_swap_tactical (a b : Prop) : 363 | a ∧ b → b ∧ a := 364 | by 365 | intro hab 366 | apply And.intro 367 | apply And.right 368 | exact hab 369 | apply And.left 370 | exact hab 371 | 372 | /- Tactical proofs are reduced to proof terms. -/ 373 | 374 | #print And_swap 375 | #print And_swap_raw 376 | #print And_swap_tactical 377 | 378 | end ForwardProofs 379 | 380 | 381 | /- ## Induction by Pattern Matching and Recursion 382 | 383 | By the PAT principle, a proof by induction is the same as a recursively 384 | specified proof term. Thus, as alternative to the `induction` tactic, induction 385 | can also be done by pattern matching and recursion: 386 | 387 | * the induction hypothesis is then available under the name of the theorem we 388 | are proving; 389 | 390 | * well-foundedness of the argument is often proved automatically. -/ 391 | 392 | #check reverse 393 | 394 | theorem reverse_append {α : Type} : 395 | ∀xs ys : List α, 396 | reverse (xs ++ ys) = reverse ys ++ reverse xs 397 | | [], ys => by simp [reverse] 398 | | x :: xs, ys => by simp [reverse, reverse_append xs] 399 | 400 | theorem reverse_append_tactical {α : Type} (xs ys : List α) : 401 | reverse (xs ++ ys) = reverse ys ++ reverse xs := 402 | by 403 | induction xs with 404 | | nil => simp [reverse] 405 | | cons x xs' ih => simp [reverse, ih] 406 | 407 | theorem reverse_reverse {α : Type} : 408 | ∀xs : List α, reverse (reverse xs) = xs 409 | | [] => by rfl 410 | | x :: xs => 411 | by simp [reverse, reverse_append, reverse_reverse xs] 412 | 413 | end LoVe 414 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe04_ForwardProofs_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Exercise 4: Forward Proofs -/ 8 | 9 | 10 | set_option autoImplicit false 11 | set_option tactic.hygienic false 12 | 13 | namespace LoVe 14 | 15 | 16 | /- ## Question 1: Connectives and Quantifiers 17 | 18 | 1.1. Supply structured proofs of the following theorems. -/ 19 | 20 | theorem I (a : Prop) : 21 | a → a := 22 | sorry 23 | 24 | theorem K (a b : Prop) : 25 | a → b → b := 26 | sorry 27 | 28 | theorem C (a b c : Prop) : 29 | (a → b → c) → b → a → c := 30 | sorry 31 | 32 | theorem proj_fst (a : Prop) : 33 | a → a → a := 34 | sorry 35 | 36 | /- Please give a different answer than for `proj_fst`. -/ 37 | 38 | theorem proj_snd (a : Prop) : 39 | a → a → a := 40 | sorry 41 | 42 | theorem some_nonsense (a b c : Prop) : 43 | (a → b → c) → a → (a → c) → b → c := 44 | sorry 45 | 46 | /- 1.2. Supply a structured proof of the contraposition rule. -/ 47 | 48 | theorem contrapositive (a b : Prop) : 49 | (a → b) → ¬ b → ¬ a := 50 | sorry 51 | 52 | /- 1.3. Supply a structured proof of the distributivity of `∀` over `∧`. -/ 53 | 54 | theorem forall_and {α : Type} (p q : α → Prop) : 55 | (∀x, p x ∧ q x) ↔ (∀x, p x) ∧ (∀x, q x) := 56 | sorry 57 | 58 | /- 1.4 (**optional**). Supply a structured proof of the following property, 59 | which can be used to pull a `∀` quantifier past an `∃` quantifier. -/ 60 | 61 | theorem forall_exists_of_exists_forall {α : Type} (p : α → α → Prop) : 62 | (∃x, ∀y, p x y) → (∀y, ∃x, p x y) := 63 | sorry 64 | 65 | 66 | /- ## Question 2: Chain of Equalities 67 | 68 | 2.1. Write the following proof using `calc`. 69 | 70 | (a + b) * (a + b) 71 | = a * (a + b) + b * (a + b) 72 | = a * a + a * b + b * a + b * b 73 | = a * a + a * b + a * b + b * b 74 | = a * a + 2 * a * b + b * b 75 | 76 | Hint: This is a difficult question. You might need the tactics `simp` and 77 | `ac_rfl` and some of the theorems `mul_add`, `add_mul`, `add_comm`, `add_assoc`, 78 | `mul_comm`, `mul_assoc`, , and `Nat.two_mul`. -/ 79 | 80 | theorem binomial_square (a b : ℕ) : 81 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 82 | sorry 83 | 84 | /- 2.2 (**optional**). Prove the same argument again, this time as a structured 85 | proof, with `have` steps corresponding to the `calc` equations. Try to reuse as 86 | much of the above proof idea as possible, proceeding mechanically. -/ 87 | 88 | theorem binomial_square₂ (a b : ℕ) : 89 | (a + b) * (a + b) = a * a + 2 * a * b + b * b := 90 | sorry 91 | 92 | 93 | /- ## Question 3 (**optional**): One-Point Rules 94 | 95 | 3.1 (**optional**). Prove that the following wrong formulation of the one-point 96 | rule for `∀` is inconsistent, using a structured proof. -/ 97 | 98 | axiom All.one_point_wrong {α : Type} (t : α) (P : α → Prop) : 99 | (∀x : α, x = t ∧ P x) ↔ P t 100 | 101 | theorem All.proof_of_False : 102 | False := 103 | sorry 104 | 105 | /- 3.2 (**optional**). Prove that the following wrong formulation of the 106 | one-point rule for `∃` is inconsistent, using a structured proof. -/ 107 | 108 | axiom Exists.one_point_wrong {α : Type} (t : α) (P : α → Prop) : 109 | (∃x : α, x = t → P x) ↔ P t 110 | 111 | theorem Exists.proof_of_False : 112 | False := 113 | sorry 114 | 115 | end LoVe 116 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe04_ForwardProofs_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe03_BackwardProofs_ExerciseSheet 5 | 6 | 7 | /- # LoVe Homework 4 (10 points): Forward Proofs 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Question 1 (4 points): Logic Puzzles 21 | 22 | Consider the following tactical proof: -/ 23 | 24 | theorem about_Impl : 25 | ∀a b : Prop, ¬ a ∨ b → a → b := 26 | by 27 | intros a b hor ha 28 | apply Or.elim hor 29 | { intro hna 30 | apply False.elim 31 | apply hna 32 | exact ha } 33 | { intro hb 34 | exact hb } 35 | 36 | /- 1.1 (2 points). Prove the same theorem again, this time by providing a proof 37 | term. 38 | 39 | Hint: There is an easy way. -/ 40 | 41 | theorem about_Impl_term : 42 | ∀a b : Prop, ¬ a ∨ b → a → b := 43 | sorry 44 | 45 | /- 1.2 (2 points). Prove the same theorem again, this time by providing a 46 | structured proof, with `fix`, `assume`, and `show`. -/ 47 | 48 | theorem about_Impl_struct : 49 | ∀a b : Prop, ¬ a ∨ b → a → b := 50 | sorry 51 | 52 | 53 | /- ## Question 2 (6 points): Connectives and Quantifiers 54 | 55 | 2.1 (3 points). Supply a structured proof of the commutativity of `∨` under a 56 | `∀` quantifier, using no other theorems than the introduction and elimination 57 | rules for `∀`, `∨`, and `↔`. -/ 58 | 59 | theorem Or_comm_under_All {α : Type} (p q : α → Prop) : 60 | (∀x, p x ∨ q x) ↔ (∀x, q x ∨ p x) := 61 | sorry 62 | 63 | /- 2.2 (3 points). We have proved or stated three of the six possible 64 | implications between `ExcludedMiddle`, `Peirce`, and `DoubleNegation` in the 65 | exercise of lecture 3. Prove the three missing implications using structured 66 | proofs, exploiting the three theorems we already have. -/ 67 | 68 | namespace BackwardProofs 69 | 70 | #check Peirce_of_EM 71 | #check DN_of_Peirce 72 | #check SorryTheorems.EM_of_DN 73 | 74 | theorem Peirce_of_DN : 75 | DoubleNegation → Peirce := 76 | sorry 77 | 78 | theorem EM_of_Peirce : 79 | Peirce → ExcludedMiddle := 80 | sorry 81 | 82 | theorem dn_of_em : 83 | ExcludedMiddle → DoubleNegation := 84 | sorry 85 | 86 | end BackwardProofs 87 | 88 | end LoVe 89 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe05_FunctionalProgramming_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Demo 5: Functional Programming 8 | 9 | We take a closer look at the basics of typed functional programming: inductive 10 | types, proofs by induction, recursive functions, pattern matching, structures 11 | (records), and type classes. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Inductive Types 21 | 22 | Recall the definition of type `Nat`: -/ 23 | 24 | #print Nat 25 | 26 | /- Mottos: 27 | 28 | * **No junk**: The type contains no values beyond those expressible using the 29 | constructors. 30 | 31 | * **No confusion**: Values built in a different ways are different. 32 | 33 | For `Nat`: 34 | 35 | * "No junk" means that there are no special values, say, `–1` or `ε`, that 36 | cannot be expressed using a finite combination of `Nat.zero` and `Nat.succ`. 37 | 38 | * "No confusion" is what ensures that `Nat.zero` ≠ `Nat.succ n`. 39 | 40 | In addition, inductive types are always finite. `Nat.succ (Nat.succ …)` is not a 41 | value. 42 | 43 | 44 | ## Structural Induction 45 | 46 | __Structural induction__ is a generalization of mathematical induction to 47 | inductive types. To prove a property `P[n]` for all natural numbers `n`, it 48 | suffices to prove the base case 49 | 50 | `P[0]` 51 | 52 | and the induction step 53 | 54 | `∀k, P[k] → P[k + 1]` 55 | 56 | For lists, the base case is 57 | 58 | `P[[]]` 59 | 60 | and the induction step is 61 | 62 | `∀y ys, P[ys] → P[y :: ys]` 63 | 64 | In general, there is one subgoal per constructor, and induction hypotheses are 65 | available for all constructor arguments of the type we are doing the induction 66 | on. -/ 67 | 68 | theorem Nat.succ_neq_self (n : ℕ) : 69 | Nat.succ n ≠ n := 70 | by 71 | induction n with 72 | | zero => simp 73 | | succ n' ih => simp [ih] 74 | 75 | 76 | /- ## Structural Recursion 77 | 78 | __Structural recursion__ is a form of recursion that allows us to peel off 79 | one or more constructors from the value on which we recurse. Such functions are 80 | guaranteed to call themselves only finitely many times before the recursion 81 | stops. This is a prerequisite for establishing that the function terminates. -/ 82 | 83 | def fact : ℕ → ℕ 84 | | 0 => 1 85 | | n + 1 => (n + 1) * fact n 86 | 87 | def factThreeCases : ℕ → ℕ 88 | | 0 => 1 89 | | 1 => 1 90 | | n + 1 => (n + 1) * factThreeCases n 91 | 92 | /- For structurally recursive functions, Lean can automatically prove 93 | termination. For more general recursive schemes, the termination check may fail. 94 | Sometimes it does so for a good reason, as in the following example: -/ 95 | 96 | /- 97 | -- fails 98 | def illegal : ℕ → ℕ 99 | | n => illegal n + 1 100 | -/ 101 | 102 | opaque immoral : ℕ → ℕ 103 | 104 | axiom immoral_eq (n : ℕ) : 105 | immoral n = immoral n + 1 106 | 107 | theorem proof_of_False : 108 | False := 109 | have hi : immoral 0 = immoral 0 + 1 := 110 | immoral_eq 0 111 | have him : 112 | immoral 0 - immoral 0 = immoral 0 + 1 - immoral 0 := 113 | by rw [←hi] 114 | have h0eq1 : 0 = 1 := 115 | by simp at him 116 | show False from 117 | by simp at h0eq1 118 | 119 | 120 | /- ## Pattern Matching Expressions 121 | 122 | `match` _term₁_, …, _termM_ `with` 123 | | _pattern₁₁_, …, _pattern₁M_ => _result₁_ 124 | ⋮ 125 | | _patternN₁_, …, _patternNM_ => _resultN_ 126 | 127 | `match` allows nonrecursive pattern matching within terms. -/ 128 | 129 | def bcount {α : Type} (p : α → Bool) : List α → ℕ 130 | | [] => 0 131 | | x :: xs => 132 | match p x with 133 | | true => bcount p xs + 1 134 | | false => bcount p xs 135 | 136 | def min (a b : ℕ) : ℕ := 137 | if a ≤ b then a else b 138 | 139 | 140 | /- ## Structures 141 | 142 | Lean provides a convenient syntax for defining records, or structures. These are 143 | essentially nonrecursive, single-constructor inductive types. -/ 144 | 145 | structure RGB where 146 | red : ℕ 147 | green : ℕ 148 | blue : ℕ 149 | 150 | #check RGB.mk 151 | #check RGB.red 152 | #check RGB.green 153 | #check RGB.blue 154 | 155 | namespace RGB_as_inductive 156 | 157 | /- The RGB structure definition is equivalent to the following set of 158 | definitions: -/ 159 | 160 | inductive RGB : Type where 161 | | mk : ℕ → ℕ → ℕ → RGB 162 | 163 | def RGB.red : RGB → ℕ 164 | | RGB.mk r _ _ => r 165 | 166 | def RGB.green : RGB → ℕ 167 | | RGB.mk _ g _ => g 168 | 169 | def RGB.blue : RGB → ℕ 170 | | RGB.mk _ _ b => b 171 | 172 | end RGB_as_inductive 173 | 174 | /- A new structure can be created by extending an existing structure: -/ 175 | 176 | structure RGBA extends RGB where 177 | alpha : ℕ 178 | 179 | /- An `RGBA` is a `RGB` with the extra field `alpha : ℕ`. -/ 180 | 181 | #print RGBA 182 | 183 | def pureRed : RGB := 184 | RGB.mk 0xff 0x00 0x00 185 | 186 | def pureGreen : RGB := 187 | { red := 0x00 188 | green := 0xff 189 | blue := 0x00 } 190 | 191 | def semitransparentGreen : RGBA := 192 | { pureGreen with 193 | alpha := 0x7f } 194 | 195 | #print pureRed 196 | #print pureGreen 197 | #print semitransparentGreen 198 | 199 | def shuffle (c : RGB) : RGB := 200 | { red := RGB.green c 201 | green := RGB.blue c 202 | blue := RGB.red c } 203 | 204 | /- Alternative definition using pattern matching: -/ 205 | 206 | def shufflePattern : RGB → RGB 207 | | RGB.mk r g b => RGB.mk g b r 208 | 209 | theorem shuffle_shuffle_shuffle (c : RGB) : 210 | shuffle (shuffle (shuffle c)) = c := 211 | by rfl 212 | 213 | 214 | /- ## Type Classes 215 | 216 | A __type class__ is a structure type combining abstract constants and their 217 | properties. A type can be declared an instance of a type class by providing 218 | concrete definitions for the constants and proving that the properties hold. 219 | Based on the type, Lean retrieves the relevant instance. -/ 220 | 221 | #print Inhabited 222 | 223 | instance Nat.Inhabited : Inhabited ℕ := 224 | { default := 0 } 225 | 226 | instance List.Inhabited {α : Type} : Inhabited (List α) := 227 | { default := [] } 228 | 229 | #eval (Inhabited.default : ℕ) 230 | #eval (Inhabited.default : List Int) 231 | 232 | def head {α : Type} [Inhabited α] : List α → α 233 | | [] => Inhabited.default 234 | | x :: _ => x 235 | 236 | theorem head_head {α : Type} [Inhabited α] (xs : List α) : 237 | head [head xs] = head xs := 238 | by rfl 239 | 240 | #eval head ([] : List ℕ) 241 | 242 | #check List.head 243 | 244 | instance Fun.Inhabited {α β : Type} [Inhabited β] : 245 | Inhabited (α → β) := 246 | { default := fun a : α ↦ Inhabited.default } 247 | 248 | instance Prod.Inhabited {α β : Type} 249 | [Inhabited α] [Inhabited β] : 250 | Inhabited (α × β) := 251 | { default := (Inhabited.default, Inhabited.default) } 252 | 253 | /- We encountered these type classes in lecture 3: -/ 254 | 255 | #print IsCommutative 256 | #print IsAssociative 257 | 258 | 259 | /- ## Lists 260 | 261 | `List` is an inductive polymorphic type constructed from `List.nil` and 262 | `List.cons`: -/ 263 | 264 | #print List 265 | 266 | /- `cases` performs a case distinction on the specified term. This gives rise 267 | to as many subgoals as there are constructors in the definition of the term's 268 | type. The tactic behaves the same as `induction` except that it does not 269 | produce induction hypotheses. Here is a contrived example: -/ 270 | 271 | theorem head_head_cases {α : Type} [Inhabited α] 272 | (xs : List α) : 273 | head [head xs] = head xs := 274 | by 275 | cases xs with 276 | | nil => rfl 277 | | cons x xs' => rfl 278 | 279 | /- `match` is the structured equivalent: -/ 280 | 281 | theorem head_head_match {α : Type} [Inhabited α] 282 | (xs : List α) : 283 | head [head xs] = head xs := 284 | match xs with 285 | | List.nil => by rfl 286 | | List.cons x xs' => by rfl 287 | 288 | /- `cases` can also be used on a hypothesis of the form `l = r`. It matches `r` 289 | against `l` and replaces all occurrences of the variables occurring in `r` with 290 | the corresponding terms in `l` everywhere in the goal. -/ 291 | 292 | theorem injection_example {α : Type} (x y : α) (xs ys : List α) 293 | (h : x :: xs = y :: ys) : 294 | x = y ∧ xs = ys := 295 | by 296 | cases h 297 | simp 298 | 299 | /- If `r` fails to match `l`, no subgoals emerge; the proof is complete. -/ 300 | 301 | theorem distinctness_example {α : Type} (y : α) (ys : List α) 302 | (h : [] = y :: ys) : 303 | false := 304 | by cases h 305 | 306 | def map {α β : Type} (f : α → β) : List α → List β 307 | | [] => [] 308 | | x :: xs => f x :: map f xs 309 | 310 | def mapArgs {α β : Type} : (α → β) → List α → List β 311 | | _, [] => [] 312 | | f, x :: xs => f x :: mapArgs f xs 313 | 314 | #check List.map 315 | 316 | theorem map_ident {α : Type} (xs : List α) : 317 | map (fun x ↦ x) xs = xs := 318 | by 319 | induction xs with 320 | | nil => rfl 321 | | cons x xs' ih => simp [map, ih] 322 | 323 | theorem map_comp {α β γ : Type} (f : α → β) (g : β → γ) 324 | (xs : List α) : 325 | map g (map f xs) = map (fun x ↦ g (f x)) xs := 326 | by 327 | induction xs with 328 | | nil => rfl 329 | | cons x xs' ih => simp [map, ih] 330 | 331 | theorem map_append {α β : Type} (f : α → β) 332 | (xs ys : List α) : 333 | map f (xs ++ ys) = map f xs ++ map f ys := 334 | by 335 | induction xs with 336 | | nil => rfl 337 | | cons x xs' ih => simp [map, ih] 338 | 339 | def tail {α : Type} : List α → List α 340 | | [] => [] 341 | | _ :: xs => xs 342 | 343 | def headOpt {α : Type} : List α → Option α 344 | | [] => Option.none 345 | | x :: _ => Option.some x 346 | 347 | def headPre {α : Type} : (xs : List α) → xs ≠ [] → α 348 | | [], hxs => by simp at * 349 | | x :: _, hxs => x 350 | 351 | #eval headOpt [3, 1, 4] 352 | #eval headPre [3, 1, 4] (by simp) 353 | 354 | def zip {α β : Type} : List α → List β → List (α × β) 355 | | x :: xs, y :: ys => (x, y) :: zip xs ys 356 | | [], _ => [] 357 | | _ :: _, [] => [] 358 | 359 | #check List.zip 360 | 361 | def length {α : Type} : List α → ℕ 362 | | [] => 0 363 | | x :: xs => length xs + 1 364 | 365 | #check List.length 366 | 367 | /- `cases` can also be used to perform a case distinction on a proposition, in 368 | conjunction with `Classical.em`. Two cases emerge: one in which the proposition 369 | is true and one in which it is false. -/ 370 | 371 | #check Classical.em 372 | 373 | theorem min_add_add (l m n : ℕ) : 374 | min (m + l) (n + l) = min m n + l := 375 | by 376 | cases Classical.em (m ≤ n) with 377 | | inl h => simp [min, h] 378 | | inr h => simp [min, h] 379 | 380 | theorem min_add_add_match (l m n : ℕ) : 381 | min (m + l) (n + l) = min m n + l := 382 | match Classical.em (m ≤ n) with 383 | | Or.inl h => by simp [min, h] 384 | | Or.inr h => by simp [min, h] 385 | 386 | theorem min_add_add_if (l m n : ℕ) : 387 | min (m + l) (n + l) = min m n + l := 388 | if h : m ≤ n then 389 | by simp [min, h] 390 | else 391 | by simp [min, h] 392 | 393 | theorem length_zip {α β : Type} (xs : List α) (ys : List β) : 394 | length (zip xs ys) = min (length xs) (length ys) := 395 | by 396 | induction xs generalizing ys with 397 | | nil => simp [min, length] 398 | | cons x xs' ih => 399 | cases ys with 400 | | nil => rfl 401 | | cons y ys' => simp [zip, length, ih ys', min_add_add] 402 | 403 | theorem map_zip {α α' β β' : Type} (f : α → α') 404 | (g : β → β') : 405 | ∀xs ys, 406 | map (fun ab : α × β ↦ (f (Prod.fst ab), g (Prod.snd ab))) 407 | (zip xs ys) = 408 | zip (map f xs) (map g ys) 409 | | x :: xs, y :: ys => by simp [zip, map, map_zip f g xs ys] 410 | | [], _ => by rfl 411 | | _ :: _, [] => by rfl 412 | 413 | 414 | /- ## Binary Trees 415 | 416 | Inductive types with constructors taking several recursive arguments define 417 | tree-like objects. __Binary trees__ have nodes with at most two children. -/ 418 | 419 | #print Tree 420 | 421 | /- The type `AExp` of arithmetic expressions was also an example of a tree data 422 | structure. 423 | 424 | The nodes of a tree, whether inner nodes or leaf nodes, often carry labels or 425 | other annotations. 426 | 427 | Inductive trees contain no infinite branches, not even cycles. This is less 428 | expressive than pointer- or reference-based data structures (in imperative 429 | languages) but easier to reason about. 430 | 431 | Recursive definitions (and proofs by induction) work roughly as for lists, but 432 | we may need to recurse (or invoke the induction hypothesis) on several child 433 | nodes. -/ 434 | 435 | def mirror {α : Type} : Tree α → Tree α 436 | | Tree.nil => Tree.nil 437 | | Tree.node a l r => Tree.node a (mirror r) (mirror l) 438 | 439 | theorem mirror_mirror {α : Type} (t : Tree α) : 440 | mirror (mirror t) = t := 441 | by 442 | induction t with 443 | | nil => rfl 444 | | node a l r ih_l ih_r => simp [mirror, ih_l, ih_r] 445 | 446 | theorem mirror_mirror_calc {α : Type} : 447 | ∀t : Tree α, mirror (mirror t) = t 448 | | Tree.nil => by rfl 449 | | Tree.node a l r => 450 | calc 451 | mirror (mirror (Tree.node a l r)) 452 | = mirror (Tree.node a (mirror r) (mirror l)) := 453 | by rfl 454 | _ = Tree.node a (mirror (mirror l)) 455 | (mirror (mirror r)) := 456 | by rfl 457 | _ = Tree.node a l (mirror (mirror r)) := 458 | by rw [mirror_mirror_calc l] 459 | _ = Tree.node a l r := 460 | by rw [mirror_mirror_calc r] 461 | 462 | theorem mirror_Eq_nil_Iff {α : Type} : 463 | ∀t : Tree α, mirror t = Tree.nil ↔ t = Tree.nil 464 | | Tree.nil => by simp [mirror] 465 | | Tree.node _ _ _ => by simp [mirror] 466 | 467 | 468 | /- ## Dependent Inductive Types (**optional**) -/ 469 | 470 | inductive Vec (α : Type) : ℕ → Type where 471 | | nil : Vec α 0 472 | | cons (a : α) {n : ℕ} (v : Vec α n) : Vec α (n + 1) 473 | 474 | #check Vec.nil 475 | #check Vec.cons 476 | 477 | def listOfVec {α : Type} : ∀{n : ℕ}, Vec α n → List α 478 | | _, Vec.nil => [] 479 | | _, Vec.cons a v => a :: listOfVec v 480 | 481 | def vecOfList {α : Type} : 482 | ∀xs : List α, Vec α (List.length xs) 483 | | [] => Vec.nil 484 | | x :: xs => Vec.cons x (vecOfList xs) 485 | 486 | theorem length_listOfVec {α : Type} : 487 | ∀(n : ℕ) (v : Vec α n), List.length (listOfVec v) = n 488 | | _, Vec.nil => by rfl 489 | | _, Vec.cons a v => 490 | by simp [listOfVec, length_listOfVec _ v] 491 | 492 | end LoVe 493 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe05_FunctionalProgramming_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe04_ForwardProofs_Demo 5 | 6 | 7 | /- # LoVe Exercise 5: Functional Programming 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Reverse of a List 19 | 20 | We define an accumulator-based variant of `reverse`. The first argument, `as`, 21 | serves as the accumulator. This definition is __tail-recursive__, meaning that 22 | compilers and interpreters can easily optimize the recursion away, resulting in 23 | more efficient code. -/ 24 | 25 | def reverseAccu {α : Type} : List α → List α → List α 26 | | as, [] => as 27 | | as, x :: xs => reverseAccu (x :: as) xs 28 | 29 | /- 1.1. Our intention is that `reverseAccu [] xs` should be equal to 30 | `reverse xs`. But if we start an induction, we quickly see that the induction 31 | hypothesis is not strong enough. Start by proving the following generalization 32 | (using the `induction` tactic or pattern matching): -/ 33 | 34 | theorem reverseAccu_Eq_reverse_append {α : Type} : 35 | ∀as xs : List α, reverseAccu as xs = reverse xs ++ as := 36 | sorry 37 | 38 | /- 1.2. Derive the desired equation. -/ 39 | 40 | theorem reverseAccu_eq_reverse {α : Type} (xs : List α) : 41 | reverseAccu [] xs = reverse xs := 42 | sorry 43 | 44 | /- 1.3. Prove the following property. 45 | 46 | Hint: A one-line inductionless proof is possible. -/ 47 | 48 | theorem reverseAccu_reverseAccu {α : Type} (xs : List α) : 49 | reverseAccu [] (reverseAccu [] xs) = xs := 50 | sorry 51 | 52 | /- 1.4. Prove the following theorem by structural induction, as a "paper" 53 | proof. This is a good exercise to develop a deeper understanding of how 54 | structural induction works (and is good practice for the final exam). 55 | 56 | theorem reverseAccu_Eq_reverse_append {α : Type} : 57 | ∀as xs : list α, reverseAccu as xs = reverse xs ++ as 58 | 59 | Guidelines for paper proofs: 60 | 61 | We expect detailed, rigorous, mathematical proofs. You are welcome to use 62 | standard mathematical notation or Lean structured commands (e.g., `assume`, 63 | `have`, `show`, `calc`). You can also use tactical proofs (e.g., `intro`, 64 | `apply`), but then please indicate some of the intermediate goals, so that we 65 | can follow the chain of reasoning. 66 | 67 | Major proof steps, including applications of induction and invocation of the 68 | induction hypothesis, must be stated explicitly. For each case of a proof by 69 | induction, you must list the induction hypotheses assumed (if any) and the goal 70 | to be proved. Minor proof steps corresponding to `rfl` or `simp` need not be 71 | justified if you think they are obvious (to humans), but you should say which 72 | key theorems they depend on. You should be explicit whenever you use a function 73 | definition. -/ 74 | 75 | -- enter your paper proof here 76 | 77 | 78 | /- ## Question 2: Drop and Take 79 | 80 | The `drop` function removes the first `n` elements from the front of a list. -/ 81 | 82 | def drop {α : Type} : ℕ → List α → List α 83 | | 0, xs => xs 84 | | _ + 1, [] => [] 85 | | m + 1, _ :: xs => drop m xs 86 | 87 | /- 2.1. Define the `take` function, which returns a list consisting of the first 88 | `n` elements at the front of a list. 89 | 90 | To avoid unpleasant surprises in the proofs, we recommend that you follow the 91 | same recursion pattern as for `drop` above. -/ 92 | 93 | def take {α : Type} : ℕ → List α → List α := 94 | sorry 95 | 96 | #eval take 0 [3, 7, 11] -- expected: [] 97 | #eval take 1 [3, 7, 11] -- expected: [3] 98 | #eval take 2 [3, 7, 11] -- expected: [3, 7] 99 | #eval take 3 [3, 7, 11] -- expected: [3, 7, 11] 100 | #eval take 4 [3, 7, 11] -- expected: [3, 7, 11] 101 | 102 | #eval take 2 ["a", "b", "c"] -- expected: ["a", "b"] 103 | 104 | /- 2.2. Prove the following theorems, using `induction` or pattern matching. 105 | Notice that they are registered as simplification rules thanks to the `@[simp]` 106 | attribute. -/ 107 | 108 | @[simp] theorem drop_nil {α : Type} : 109 | ∀n : ℕ, drop n ([] : List α) = [] := 110 | sorry 111 | 112 | @[simp] theorem take_nil {α : Type} : 113 | ∀n : ℕ, take n ([] : List α) = [] := 114 | sorry 115 | 116 | /- 2.3. Follow the recursion pattern of `drop` and `take` to prove the 117 | following theorems. In other words, for each theorem, there should be three 118 | cases, and the third case will need to invoke the induction hypothesis. 119 | 120 | Hint: Note that there are three variables in the `drop_drop` theorem (but only 121 | two arguments to `drop`). For the third case, `← add_assoc` might be useful. -/ 122 | 123 | theorem drop_drop {α : Type} : 124 | ∀(m n : ℕ) (xs : List α), drop n (drop m xs) = drop (n + m) xs 125 | | 0, n, xs => by rfl 126 | -- supply the two missing cases here 127 | 128 | theorem take_take {α : Type} : 129 | ∀(m : ℕ) (xs : List α), take m (take m xs) = take m xs := 130 | sorry 131 | 132 | theorem take_drop {α : Type} : 133 | ∀(n : ℕ) (xs : List α), take n xs ++ drop n xs = xs := 134 | sorry 135 | 136 | 137 | /- ## Question 3: A Type of Terms 138 | 139 | 3.1. Define an inductive type corresponding to the terms of the untyped 140 | λ-calculus, as given by the following grammar: 141 | 142 | Term ::= `var` String -- variable (e.g., `x`) 143 | | `lam` String Term -- λ-expression (e.g., `λx. t`) 144 | | `app` Term Term -- application (e.g., `t u`) -/ 145 | 146 | -- enter your definition here 147 | 148 | /- 3.2 (**optional**). Register a textual representation of the type `term` as 149 | an instance of the `Repr` type class. Make sure to supply enough parentheses to 150 | guarantee that the output is unambiguous. -/ 151 | 152 | def Term.repr : Term → String 153 | -- enter your answer here 154 | 155 | instance Term.Repr : Repr Term := 156 | { reprPrec := fun t prec ↦ Term.repr t } 157 | 158 | /- 3.3 (**optional**). Test your textual representation. The following command 159 | should print something like `(λx. ((y x) x))`. -/ 160 | 161 | #eval (Term.lam "x" (Term.app (Term.app (Term.var "y") (Term.var "x")) 162 | (Term.var "x"))) 163 | 164 | end LoVe 165 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe05_FunctionalProgramming_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe05_FunctionalProgramming_Demo 5 | 6 | 7 | /- # LoVe Homework 5 (10 points + 2 bonus points): Functional Programming 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Question 1 (6 points): Huffman Trees 21 | 22 | Consider the following type of weighted binary trees: -/ 23 | 24 | inductive HTree (α : Type) 25 | | leaf : ℕ → α → HTree α 26 | | inner : ℕ → HTree α → HTree α → HTree α 27 | 28 | /- Each constructor corresponds to a kind of node. An `HTree.leaf` node stores a 29 | numeric weight and a label of some type `α`, and an `HTree.inner` node stores a 30 | numeric weight, a left subtree, and a right subtree. 31 | 32 | 1.1 (1 point). Define a polymorphic Lean function called `weight` that takes a 33 | tree over some type variable `α` and that returns the weight component of the 34 | root node of the tree: -/ 35 | 36 | def weight {α : Type} : HTree α → ℕ := 37 | sorry 38 | 39 | /- 1.2 (1 point). Define a polymorphic Lean function called `unite` that takes 40 | two trees `l, r : HTree α` and that returns a new tree such that (1) its left 41 | child is `l`; (2) its right child is `r`; and (3) its weight is the sum of the 42 | weights of `l` and `r`. -/ 43 | 44 | def unite {α : Type} : HTree α → HTree α → HTree α := 45 | sorry 46 | 47 | /- 1.3 (2 points). Consider the following `insort` function, which inserts a 48 | tree `u` in a list of trees that is sorted by increasing weight and which 49 | preserves the sorting. (If the input list is not sorted, the result is not 50 | necessarily sorted.) -/ 51 | 52 | def insort {α : Type} (u : HTree α) : List (HTree α) → List (HTree α) 53 | | [] => [u] 54 | | t :: ts => if weight u ≤ weight t then u :: t :: ts else t :: insort u ts 55 | 56 | /- Prove that `insort`ing a tree into a list cannot yield the empty list: -/ 57 | 58 | theorem insort_Neq_nil {α : Type} (t : HTree α) : 59 | ∀ts : List (HTree α), insort t ts ≠ [] := 60 | sorry 61 | 62 | /- 1.4 (2 points). Prove the same property as above again, this time as a 63 | "paper" proof. Follow the guidelines given in question 1.4 of the exercise. -/ 64 | 65 | -- enter your paper proof here 66 | 67 | 68 | /- ## Question 2 (4 points + 2 bonus points): Gauss's Summation Formula 69 | 70 | `sumUpToOfFun f n = f 0 + f 1 + ⋯ + f n`: -/ 71 | 72 | def sumUpToOfFun (f : ℕ → ℕ) : ℕ → ℕ 73 | | 0 => f 0 74 | | m + 1 => sumUpToOfFun f m + f (m + 1) 75 | 76 | /- 2.1 (2 points). Prove the following theorem, discovered by Carl Friedrich 77 | Gauss as a pupil. 78 | 79 | Hints: 80 | 81 | * The `mul_add` and `add_mul` theorems might be useful to reason about 82 | multiplication. 83 | 84 | * The `linarith` tactic introduced in lecture 6 might be useful to reason about 85 | addition. -/ 86 | 87 | /- PAUL: I can't see any reference to `linarith` in the file. Is this something you 88 | mention in the lecture but not the demo file? It would be worth summarising 89 | what it is, or referring to the Guide. 90 | Actually, I just looked ahead, there is a comment in chapter 6. Maybe copy to here? -/ 91 | 92 | #check mul_add 93 | #check add_mul 94 | 95 | theorem sumUpToOfFun_eq : 96 | ∀m : ℕ, 2 * sumUpToOfFun (fun i ↦ i) m = m * (m + 1) := 97 | sorry 98 | 99 | /- 2.2 (2 points). Prove the following property of `sumUpToOfFun`. -/ 100 | 101 | theorem sumUpToOfFun_mul (f g : ℕ → ℕ) : 102 | ∀n : ℕ, sumUpToOfFun (fun i ↦ f i + g i) n = 103 | sumUpToOfFun f n + sumUpToOfFun g n := 104 | sorry 105 | 106 | /- 2.3 (2 bonus points). Prove `sumUpToOfFun_mul` again as a "paper" proof. 107 | Follow the guidelines given in question 1.4 of the exercise. -/ 108 | 109 | -- enter your paper proof here 110 | 111 | end LoVe 112 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe06_InductivePredicates_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe06_InductivePredicates_Demo 5 | 6 | 7 | /- # LoVe Exercise 6: Inductive Predicates 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Even and Odd 19 | 20 | The `Even` predicate is `True` for even numbers and `False` for odd numbers. -/ 21 | 22 | #check Even 23 | 24 | /- We define `Odd` as the negation of `Even`: -/ 25 | 26 | def Odd (n : ℕ) : Prop := 27 | ¬ Even n 28 | 29 | /- 1.1. Prove that 1 is odd and register this fact as a simp rule. 30 | 31 | Hint: `cases` or `induction` is useful to reason about hypotheses of the form 32 | `Even …`. -/ 33 | 34 | @[simp] theorem Odd_1 : 35 | Odd 1 := 36 | sorry 37 | 38 | /- 1.2. Prove that 3 and 5 are odd. -/ 39 | 40 | -- enter your answer here 41 | 42 | /- 1.3. Complete the following proof by structural induction. -/ 43 | 44 | theorem Even_two_times : 45 | ∀m : ℕ, Even (2 * m) 46 | | 0 => Even.zero 47 | | m + 1 => 48 | sorry 49 | 50 | 51 | /- ## Question 2: Tennis Games 52 | 53 | Recall the inductive type of tennis scores from the demo: -/ 54 | 55 | #check Score 56 | 57 | /- 2.1. Define an inductive predicate that returns `True` if the server is 58 | ahead of the receiver and that returns `False` otherwise. -/ 59 | 60 | inductive ServAhead : Score → Prop 61 | -- enter the missing cases here 62 | 63 | /- 2.2. Validate your predicate definition by proving the following theorems. -/ 64 | 65 | theorem ServAhead_vs {m n : ℕ} (hgt : m > n) : 66 | ServAhead (Score.vs m n) := 67 | sorry 68 | 69 | theorem ServAhead_advServ : 70 | ServAhead Score.advServ := 71 | sorry 72 | 73 | theorem not_ServAhead_advRecv : 74 | ¬ ServAhead Score.advRecv := 75 | sorry 76 | 77 | theorem ServAhead_gameServ : 78 | ServAhead Score.gameServ := 79 | sorry 80 | 81 | theorem not_ServAhead_gameRecv : 82 | ¬ ServAhead Score.gameRecv := 83 | sorry 84 | 85 | /- 2.3. Compare the above theorem statements with your definition. What do you 86 | observe? -/ 87 | 88 | -- enter your answer here 89 | 90 | 91 | /- ## Question 3: Binary Trees 92 | 93 | 3.1. Prove the converse of `IsFull_mirror`. You may exploit already proved 94 | theorems (e.g., `IsFull_mirror`, `mirror_mirror`). -/ 95 | 96 | #check IsFull_mirror 97 | #check mirror_mirror 98 | 99 | theorem mirror_IsFull {α : Type} : 100 | ∀t : Tree α, IsFull (mirror t) → IsFull t := 101 | sorry 102 | 103 | /- 3.2. Define a `map` function on binary trees, similar to `List.map`. -/ 104 | 105 | def Tree.map {α β : Type} (f : α → β) : Tree α → Tree β := 106 | sorry 107 | 108 | /- 3.3. Prove the following theorem by case distinction. -/ 109 | 110 | theorem Tree.map_eq_empty_iff {α β : Type} (f : α → β) : 111 | ∀t : Tree α, Tree.map f t = Tree.nil ↔ t = Tree.nil := 112 | sorry 113 | 114 | /- 3.4 (**optional**). Prove the following theorem by rule induction. -/ 115 | 116 | theorem map_mirror {α β : Type} (f : α → β) : 117 | ∀t : Tree α, IsFull t → IsFull (Tree.map f t) := 118 | sorry 119 | 120 | end LoVe 121 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe06_InductivePredicates_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Homework 6 (10 points): Inductive Predicates 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Question 1 (4 points): A Type of Terms 21 | 22 | Recall the type of terms from question 3 of exercise 5: -/ 23 | 24 | inductive Term : Type 25 | | var : String → Term 26 | | lam : String → Term → Term 27 | | app : Term → Term → Term 28 | 29 | /- 1.1 (2 points). Define an inductive predicate `IsLam` that returns `True` if 30 | its argument is of the form `Term.lam …` and that returns `False` otherwise. -/ 31 | 32 | -- enter your definition here 33 | 34 | /- 1.2 (2 points). Validate your answer to question 1.1 by proving the following 35 | theorems: -/ 36 | 37 | theorem IsLam_lam (s : String) (t : Term) : 38 | IsLam (Term.lam s t) := 39 | sorry 40 | 41 | theorem not_IsLamVar (s : String) : 42 | ¬ IsLam (Term.var s) := 43 | sorry 44 | 45 | theorem not_IsLam_app (t u : Term) : 46 | ¬ IsLam (Term.app t u) := 47 | sorry 48 | 49 | 50 | /- ## Question 2 (6 points): Transitive Closure 51 | 52 | In mathematics, the transitive closure `R⁺` of a binary relation `R` over a 53 | set `A` can be defined as the smallest solution satisfying the following rules: 54 | 55 | (base) for all `a, b ∈ A`, if `a R b`, then `a R⁺ b`; 56 | (step) for all `a, b, c ∈ A`, if `a R b` and `b R⁺ c`, then `a R⁺ c`. 57 | 58 | In Lean, we can define this notion as follows, by identifying the set `A` with 59 | the type `α`: -/ 60 | 61 | inductive TCV1 {α : Type} (R : α → α → Prop) : α → α → Prop 62 | | base (a b : α) : R a b → TCV1 R a b 63 | | step (a b c : α) : R a b → TCV1 R b c → TCV1 R a c 64 | 65 | /- 2.1 (2 points). Rule `(step)` makes it convenient to extend transitive chains 66 | by adding links to the left. Another way to define the transitive closure `R⁺` 67 | would use replace `(step)` with the following right-leaning rule: 68 | 69 | (pets) for all `a, b, c ∈ A`, if `a R⁺ b` and `b R c`, then `a R⁺ c`. 70 | 71 | Define a predicate `TCV2` that embodies this alternative definition. -/ 72 | 73 | -- enter your definition here 74 | 75 | /- 2.2 (2 points). Yet another definition of the transitive closure `R⁺` would 76 | use the following symmetric rule instead of `(step)` or `(pets)`: 77 | 78 | (trans) for all `a, b, c ∈ A`, if `a R⁺ b` and `b R⁺ c`, then `a R⁺ c`. 79 | 80 | Define a predicate `TCV3` that embodies this alternative definition. -/ 81 | 82 | -- enter your definition here 83 | 84 | /- 2.3 (1 point). Prove that `(step)` also holds as a theorem about `TCV3`. -/ 85 | 86 | theorem TCV3_step {α : Type} (R : α → α → Prop) (a b c : α) (rab : R a b) 87 | (tbc : TCV3 R b c) : 88 | TCV3 R a c := 89 | sorry 90 | 91 | /- 2.4 (1 point). Prove the following theorem by rule induction: -/ 92 | 93 | theorem TCV1_pets {α : Type} (R : α → α → Prop) (c : α) : 94 | ∀a b, TCV1 R a b → R b c → TCV1 R a c := 95 | sorry 96 | 97 | end LoVe 98 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe07_EffectfulProgramming_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Demo 7: Effectful Programming 8 | 9 | Monads are an important functional programming abstraction. They generalize 10 | computation with side effects, offering effectful programming in a pure 11 | functional programming language. Haskell has shown that they can be used very 12 | successfully to write imperative programs. For us, they are interesting in their 13 | own right and for two more reasons: 14 | 15 | * They provide a nice example of axiomatic reasoning. 16 | 17 | * They are needed for programming Lean itself (metaprogramming, lecture 8). -/ 18 | 19 | 20 | set_option autoImplicit false 21 | set_option tactic.hygienic false 22 | 23 | namespace LoVe 24 | 25 | 26 | /- ## Introductory Example 27 | 28 | Consider the following programming task: 29 | 30 | Implement a function `sum257 ns` that sums up the second, fifth, and 31 | seventh items of a list `ns` of natural numbers. Use `Option ℕ` for the 32 | result so that if the list has fewer than seven elements, you can return 33 | `Option.none`. 34 | 35 | A straightforward solution follows: -/ 36 | 37 | def nth {α : Type} : List α → Nat → Option α 38 | | [], _ => Option.none 39 | | x :: _, 0 => Option.some x 40 | | _ :: xs, n + 1 => nth xs n 41 | 42 | def sum257 (ns : List ℕ) : Option ℕ := 43 | match nth ns 1 with 44 | | Option.none => Option.none 45 | | Option.some n₂ => 46 | match nth ns 4 with 47 | | Option.none => Option.none 48 | | Option.some n₅ => 49 | match nth ns 6 with 50 | | Option.none => Option.none 51 | | Option.some n₇ => Option.some (n₂ + n₅ + n₇) 52 | 53 | /- The code is ugly, because of all the pattern matching on options. 54 | 55 | We can put all the ugliness in one function, which we call `connect`: -/ 56 | 57 | def connect {α : Type} {β : Type} : 58 | Option α → (α → Option β) → Option β 59 | | Option.none, _ => Option.none 60 | | Option.some a, f => f a 61 | 62 | def sum257Connect (ns : List ℕ) : Option ℕ := 63 | connect (nth ns 1) 64 | (fun n₂ ↦ connect (nth ns 4) 65 | (fun n₅ ↦ connect (nth ns 6) 66 | (fun n₇ ↦ Option.some (n₂ + n₅ + n₇)))) 67 | 68 | /- Instead of defining `connect` ourselves, we can use Lean's predefined 69 | general `bind` operation. We can also use `pure` instead of `Option.some`: -/ 70 | 71 | #check bind 72 | 73 | def sum257Bind (ns : List ℕ) : Option ℕ := 74 | bind (nth ns 1) 75 | (fun n₂ ↦ bind (nth ns 4) 76 | (fun n₅ ↦ bind (nth ns 6) 77 | (fun n₇ ↦ pure (n₂ + n₅ + n₇)))) 78 | 79 | /- By using `bind` and `pure`, `sum257Bind` makes no reference to the 80 | constructors `Option.none` and `Option.some`. 81 | 82 | Syntactic sugar: 83 | 84 | `ma >>= f` := `bind ma f` -/ 85 | 86 | def sum257Op (ns : List ℕ) : Option ℕ := 87 | nth ns 1 >>= 88 | fun n₂ ↦ nth ns 4 >>= 89 | fun n₅ ↦ nth ns 6 >>= 90 | fun n₇ ↦ pure (n₂ + n₅ + n₇) 91 | 92 | /- Syntactic sugar: 93 | 94 | do 95 | let a ← ma 96 | t 97 | := 98 | ma >>= (fun a ↦ t) 99 | 100 | do 101 | ma 102 | t 103 | := 104 | ma >>= (fun _ ↦ t) -/ 105 | 106 | def sum257Dos (ns : List ℕ) : Option ℕ := 107 | do 108 | let n₂ ← nth ns 1 109 | do 110 | let n₅ ← nth ns 4 111 | do 112 | let n₇ ← nth ns 6 113 | pure (n₂ + n₅ + n₇) 114 | 115 | /- The `do`s can be combined: -/ 116 | 117 | def sum257Do (ns : List ℕ) : Option ℕ := 118 | do 119 | let n₂ ← nth ns 1 120 | let n₅ ← nth ns 4 121 | let n₇ ← nth ns 6 122 | pure (n₂ + n₅ + n₇) 123 | 124 | /- Although the notation has an imperative flavor, the function is a pure 125 | functional program. 126 | 127 | 128 | ## Two Operations and Three Laws 129 | 130 | The `Option` type constructor is an example of a monad. 131 | 132 | In general, a __monad__ is a type constructor `m` that depends on some type 133 | parameter `α` (i.e., `m α`) equipped with two distinguished operations: 134 | 135 | `pure {α : Type} : α → m α` 136 | `bind {α β : Type} : m α → (α → m β) → m β` 137 | 138 | For `Option`: 139 | 140 | `pure` := `Option.some` 141 | `bind` := `connect` 142 | 143 | Intuitively, we can think of a monad as a "box": 144 | 145 | * `pure` puts the data into the box. 146 | 147 | * `bind` allows us to access the data in the box and modify it (possibly even 148 | changing its type, since the result is an `m β` monad, not a `m α` monad). 149 | 150 | There is no general way to extract the data from the monad, i.e., to obtain an 151 | `α` from an `m α`. 152 | 153 | To summarize, `pure a` provides no side effect and simply provides a box 154 | containing the the value `a`, whereas `bind ma f` (also written `ma >>= f`) 155 | executes `ma`, then executes `f` with the boxed result `a` of `ma`. 156 | 157 | The option monad is only one instance among many. 158 | 159 | Type | Effect 160 | -------------------- | ------------------------------------------------------- 161 | `id` | no effects 162 | `Option` | simple exceptions 163 | `fun α ↦ σ → α × σ` | threading through a state of type `σ` 164 | `Set` | nondeterministic computation returning `α` values 165 | `fun α ↦ t → α` | reading elements of type `t` (e.g., a configuration) 166 | `fun α ↦ ℕ × α` | adjoining running time (e.g., to model time complexity) 167 | `fun α ↦ String × α` | adjoining text output (e.g., for logging) 168 | `IO` | interaction with the operating system 169 | `TacticM` | interaction with the proof assistant 170 | 171 | All of the above are unary type constructors `m : Type → Type`. 172 | 173 | Some effects can be combined (e.g., `Option (t → α)`). 174 | 175 | Some effects are not executable (e.g., `Set α`). They are nonetheless useful for 176 | modeling programs abstractly in the logic. 177 | 178 | Specific monads may provide a way to extract the boxed value stored in the monad 179 | without `bind`'s requirement of putting it back in a monad. 180 | 181 | Monads have several benefits, including: 182 | 183 | * They provide the convenient and highly readable `do` notation. 184 | 185 | * They support generic operations, such as 186 | `mmap {α β : Type} : (α → m β) → List α → m (List β)`, which work uniformly 187 | across all monads. 188 | 189 | The `bind` and `pure` operations are normally required to obey three laws. Pure 190 | data as the first program can be simplified away: 191 | 192 | do 193 | let a' ← pure a, 194 | f a' 195 | = 196 | f a 197 | 198 | Pure data as the second program can be simplified away: 199 | 200 | do 201 | let a ← ma 202 | pure a 203 | = 204 | ma 205 | 206 | Nested programs `ma`, `f`, `g` can be flattened using this associativity rule: 207 | 208 | do 209 | let b ← 210 | do 211 | let a ← ma 212 | f a 213 | g b 214 | = 215 | do 216 | let a ← ma 217 | let b ← f a 218 | g b 219 | 220 | 221 | ## A Type Class of Monads 222 | 223 | Monads are a mathematical structure, so we use class to add them as a type 224 | class. We can think of a type class as a structure that is parameterized by a 225 | type, or here, by a type constructor `m : Type → Type`. -/ 226 | 227 | class LawfulMonad (m : Type → Type) 228 | extends Pure m, Bind m where 229 | pure_bind {α β : Type} (a : α) (f : α → m β) : 230 | (pure a >>= f) = f a 231 | bind_pure {α : Type} (ma : m α) : 232 | (ma >>= pure) = ma 233 | bind_assoc {α β γ : Type} (f : α → m β) (g : β → m γ) 234 | (ma : m α) : 235 | ((ma >>= f) >>= g) = (ma >>= (fun a ↦ f a >>= g)) 236 | 237 | /- Step by step: 238 | 239 | * We are creating a structure parameterized by a unary type constructor `m`. 240 | 241 | * The structure inherits the fields, and any syntactic sugar, from structures 242 | called `Bind` and `Pure`, which provide the `bind` and `pure` operations on 243 | `m` and some syntactic sugar. 244 | 245 | * The definition adds three fields to those already provided by `Bind` and 246 | `Pure`, to store the proofs of the laws. 247 | 248 | To instantiate this definition with a concrete monad, we must supply the type 249 | constructor `m` (e.g., `Option`), `bind` and `pure` operators, and proofs of the 250 | laws. 251 | 252 | 253 | ## No Effects 254 | 255 | Our first monad is the trivial monad `m := id` (i.e., `m := (fun α ↦ α)`). -/ 256 | 257 | def id.pure {α : Type} : α → id α 258 | | a => a 259 | 260 | def id.bind {α β : Type} : id α → (α → id β) → id β 261 | | a, f => f a 262 | 263 | instance id.LawfulMonad : LawfulMonad id := 264 | { pure := id.pure 265 | bind := id.bind 266 | pure_bind := 267 | by 268 | intro α β a f 269 | rfl 270 | bind_pure := 271 | by 272 | intro α ma 273 | rfl 274 | bind_assoc := 275 | by 276 | intro α β γ f g ma 277 | rfl } 278 | 279 | 280 | /- ## Basic Exceptions 281 | 282 | As we saw above, the option type provides a basic exception mechanism. -/ 283 | 284 | def Option.pure {α : Type} : α → Option α := 285 | Option.some 286 | 287 | def Option.bind {α β : Type} : 288 | Option α → (α → Option β) → Option β 289 | | Option.none, _ => Option.none 290 | | Option.some a, f => f a 291 | 292 | instance Option.LawfulMonad : LawfulMonad Option := 293 | { pure := Option.pure 294 | bind := Option.bind 295 | pure_bind := 296 | by 297 | intro α β a f 298 | rfl 299 | bind_pure := 300 | by 301 | intro α ma 302 | cases ma with 303 | | none => rfl 304 | | some _ => rfl 305 | bind_assoc := 306 | by 307 | intro α β γ f g ma 308 | cases ma with 309 | | none => rfl 310 | | some _ => rfl } 311 | 312 | def Option.throw {α : Type} : Option α := 313 | Option.none 314 | 315 | def Option.catch {α : Type} : Option α → Option α → Option α 316 | | Option.none, ma' => ma' 317 | | Option.some a, _ => Option.some a 318 | 319 | 320 | /- ## Mutable State 321 | 322 | The state monad provides an abstraction corresponding to a mutable state. Some 323 | compilers recognize the state monad to produce efficient imperative code. -/ 324 | 325 | def Action (σ α : Type) : Type := 326 | σ → α × σ 327 | 328 | def Action.read {σ : Type} : Action σ σ 329 | | s => (s, s) 330 | 331 | def Action.write {σ : Type} (s : σ) : Action σ Unit 332 | | _ => ((), s) 333 | 334 | def Action.pure {σ α : Type} (a : α) : Action σ α 335 | | s => (a, s) 336 | 337 | def Action.bind {σ : Type} {α β : Type} (ma : Action σ α) 338 | (f : α → Action σ β) : 339 | Action σ β 340 | | s => 341 | match ma s with 342 | | (a, s') => f a s' 343 | 344 | /- `Action.pure` is like a `return` statement; it does not change the state. 345 | 346 | `Action.bind` is like the sequential composition of two statements with 347 | respect to a state. -/ 348 | 349 | instance Action.LawfulMonad {σ : Type} : 350 | LawfulMonad (Action σ) := 351 | { pure := Action.pure 352 | bind := Action.bind 353 | pure_bind := 354 | by 355 | intro α β a f 356 | rfl 357 | bind_pure := 358 | by 359 | intro α ma 360 | rfl 361 | bind_assoc := 362 | by 363 | intro α β γ f g ma 364 | rfl } 365 | 366 | def increasingly : List ℕ → Action ℕ (List ℕ) 367 | | [] => pure [] 368 | | (n :: ns) => 369 | do 370 | let prev ← Action.read 371 | if n < prev then 372 | increasingly ns 373 | else 374 | do 375 | Action.write n 376 | let ns' ← increasingly ns 377 | pure (n :: ns') 378 | 379 | #eval increasingly [1, 2, 3, 2] 0 380 | #eval increasingly [1, 2, 3, 2, 4, 5, 2] 0 381 | 382 | 383 | /- ## Nondeterminism 384 | 385 | The set monad stores an arbitrary, possibly infinite number of `α` values. -/ 386 | 387 | #check Set 388 | 389 | def Set.pure {α : Type} : α → Set α 390 | | a => {a} 391 | 392 | def Set.bind {α β : Type} : Set α → (α → Set β) → Set β 393 | | A, f => {b | ∃a, a ∈ A ∧ b ∈ f a} 394 | 395 | instance Set.LawfulMonad : LawfulMonad Set := 396 | { pure := Set.pure 397 | bind := Set.bind 398 | pure_bind := 399 | by 400 | intro α β a f 401 | simp [Pure.pure, Bind.bind, Set.pure, Set.bind] 402 | bind_pure := 403 | by 404 | intro α ma 405 | simp [Pure.pure, Bind.bind, Set.pure, Set.bind] 406 | bind_assoc := 407 | by 408 | intro α β γ f g ma 409 | simp [Pure.pure, Bind.bind, Set.pure, Set.bind] 410 | apply Set.ext 411 | aesop } 412 | 413 | /- `aesop` is a general-purpose proof search tactic. Among others, it performs 414 | elimination of the logical symbols `∧`, `∨`, `↔`, and `∃` in hypotheses and 415 | introduction of `∧`, `↔`, and `∃` in the target, and it regularly invokes the 416 | simplifier. It can succeed at proving a goal, fail, or succeed partially, 417 | leaving some unfinished subgoals to the user. 418 | 419 | 420 | ## A Generic Algorithm: Iteration over a List 421 | 422 | We consider a generic effectful program `mmap` that iterates over a list and 423 | applies a function `f` to each element. -/ 424 | 425 | def nthsFine {α : Type} (xss : List (List α)) (n : ℕ) : 426 | List (Option α) := 427 | List.map (fun xs ↦ nth xs n) xss 428 | 429 | #eval nthsFine [[11, 12, 13, 14], [21, 22, 23]] 2 430 | #eval nthsFine [[11, 12, 13, 14], [21, 22, 23]] 3 431 | 432 | def mmap {m : Type → Type} [LawfulMonad m] {α β : Type} 433 | (f : α → m β) : 434 | List α → m (List β) 435 | | [] => pure [] 436 | | a :: as => 437 | do 438 | let b ← f a 439 | let bs ← mmap f as 440 | pure (b :: bs) 441 | 442 | def nthsCoarse {α : Type} (xss : List (List α)) (n : ℕ) : 443 | Option (List α) := 444 | mmap (fun xs ↦ nth xs n) xss 445 | 446 | #eval nthsCoarse [[11, 12, 13, 14], [21, 22, 23]] 2 447 | #eval nthsCoarse [[11, 12, 13, 14], [21, 22, 23]] 3 448 | 449 | theorem mmap_append {m : Type → Type} [LawfulMonad m] 450 | {α β : Type} (f : α → m β) : 451 | ∀as as' : List α, mmap f (as ++ as') = 452 | do 453 | let bs ← mmap f as 454 | let bs' ← mmap f as' 455 | pure (bs ++ bs') 456 | | [], _ => 457 | by simp [mmap, LawfulMonad.bind_pure, LawfulMonad.pure_bind] 458 | | a :: as, as' => 459 | by simp [mmap, mmap_append _ as as', LawfulMonad.pure_bind, 460 | LawfulMonad.bind_assoc] 461 | 462 | end LoVe 463 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe07_EffectfulProgramming_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe07_EffectfulProgramming_Demo 5 | 6 | 7 | /- # LoVe Exercise 7: Effectful Programming 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: A State Monad with Failure 19 | 20 | We introduce a richer notion of lawful monad that provides an `orelse` 21 | operator satisfying some laws, given below. `emp` denotes failure. `orelse x y` 22 | tries `x` first, falling back on `y` on failure. -/ 23 | 24 | class LawfulMonadWithOrelse (m : Type → Type) 25 | extends LawfulMonad m where 26 | emp {α : Type} : m α 27 | orelse {α : Type} : m α → m α → m α 28 | emp_orelse {α : Type} (a : m α) : 29 | orelse emp a = a 30 | orelse_emp {α : Type} (a : m α) : 31 | orelse a emp = a 32 | orelse_assoc {α : Type} (a b c : m α) : 33 | orelse (orelse a b) c = orelse a (orelse b c) 34 | emp_bind {α β : Type} (f : α → m β) : 35 | (emp >>= f) = emp 36 | bind_emp {α β : Type} (f : m α) : 37 | (f >>= (fun a ↦ (emp : m β))) = emp 38 | 39 | /- 1.1. We set up the `Option` type constructor to be a 40 | `LawfulMonad_with_orelse`. Complete the proofs. 41 | 42 | Hint: Use `simp [Bind.bind]` to unfold the definition of the bind operator and 43 | `simp [Option.orelse]` to unfold the definition of the `orelse` operator. -/ 44 | 45 | def Option.orelse {α : Type} : Option α → Option α → Option α 46 | | Option.none, ma' => ma' 47 | | Option.some a, _ => Option.some a 48 | 49 | instance Option.LawfulMonadWithOrelse : 50 | LawfulMonadWithOrelse Option := 51 | { Option.LawfulMonad with 52 | emp := Option.none 53 | orelse := Option.orelse 54 | emp_orelse := 55 | sorry 56 | orelse_emp := 57 | by 58 | intro α ma 59 | simp [Option.orelse] 60 | cases ma 61 | { rfl } 62 | { rfl } 63 | orelse_assoc := 64 | sorry 65 | emp_bind := 66 | by 67 | intro α β f 68 | simp [Bind.bind] 69 | rfl 70 | bind_emp := 71 | sorry 72 | } 73 | 74 | @[simp] theorem Option.some_bind {α β : Type} (a : α) (g : α → Option β) : 75 | (Option.some a >>= g) = g a := 76 | sorry 77 | 78 | /- 1.2. Now we are ready to define `FAction σ`: a monad with an internal state 79 | of type `σ` that can fail (unlike `Action σ`). 80 | 81 | We start with defining `FAction σ α`, where `σ` is the type of the internal 82 | state, and `α` is the type of the value stored in the monad. We use `Option` to 83 | model failure. This means we can also use the monad operations of `Option` when 84 | defining the monad operations on `FAction`. 85 | 86 | Hints: 87 | 88 | * Remember that `FAction σ α` is an alias for a function type, so you can use 89 | pattern matching and `fun s ↦ …` to define values of type `FAction σ α`. 90 | 91 | * `FAction` is very similar to `Action` from the lecture's demo. You can look 92 | there for inspiration. -/ 93 | 94 | def FAction (σ : Type) (α : Type) : Type := 95 | sorry 96 | 97 | /- 1.3. Define the `get` and `set` function for `FAction`, where `get` returns 98 | the state passed along the state monad and `set s` changes the state to `s`. -/ 99 | 100 | def get {σ : Type} : FAction σ σ := 101 | sorry 102 | 103 | def set {σ : Type} (s : σ) : FAction σ Unit := 104 | sorry 105 | 106 | /- We set up the `>>=` syntax on `FAction`: -/ 107 | 108 | def FAction.bind {σ α β : Type} (f : FAction σ α) (g : α → FAction σ β) : 109 | FAction σ β 110 | | s => f s >>= (fun (a, s) ↦ g a s) 111 | 112 | instance FAction.Bind {σ : Type} : Bind (FAction σ) := 113 | { bind := FAction.bind } 114 | 115 | theorem FAction.bind_apply {σ α β : Type} (f : FAction σ α) 116 | (g : α → FAction σ β) (s : σ) : 117 | (f >>= g) s = (f s >>= (fun as ↦ g (Prod.fst as) (Prod.snd as))) := 118 | by rfl 119 | 120 | /- 1.4. Define the operator `pure` for `FAction`, in such a way that it will 121 | satisfy the three laws. -/ 122 | 123 | def FAction.pure {σ α : Type} (a : α) : FAction σ α := 124 | sorry 125 | 126 | /- We set up the syntax for `pure` on `FAction`: -/ 127 | 128 | instance FAction.Pure {σ : Type} : Pure (FAction σ) := 129 | { pure := FAction.pure } 130 | 131 | theorem FAction.pure_apply {σ α : Type} (a : α) (s : σ) : 132 | (pure a : FAction σ α) s = Option.some (a, s) := 133 | by rfl 134 | 135 | /- 1.5. Register `FAction` as a monad. 136 | 137 | Hints: 138 | 139 | * The `funext` theorem is useful when you need to prove equality between two 140 | functions. 141 | 142 | * The theorem `FAction.pure_apply` or `FAction.bind_apply` might prove useful. -/ 143 | 144 | instance FAction.LawfulMonad {σ : Type} : LawfulMonad (FAction σ) := 145 | { FAction.Bind, FAction.Pure with 146 | pure_bind := 147 | by 148 | sorry 149 | bind_pure := 150 | by 151 | intro α ma 152 | apply funext 153 | intro s 154 | have bind_pure_helper : 155 | (do 156 | let x ← ma s 157 | pure (Prod.fst x) (Prod.snd x)) = 158 | ma s := 159 | by apply LawfulMonad.bind_pure 160 | aesop 161 | bind_assoc := 162 | sorry 163 | } 164 | 165 | 166 | /- ## Question 2 (**optional**): Kleisli Operator 167 | 168 | The Kleisli operator `>=>` (not to be confused with `>>=`) is useful for 169 | pipelining effectful functions. Note that `fun a ↦ f a >>= g` is to be parsed as 170 | `fun a ↦ (f a >>= g)`, not as `(fun a ↦ f a) >>= g`. -/ 171 | 172 | def kleisli {m : Type → Type} [LawfulMonad m] {α β γ : Type} (f : α → m β) 173 | (g : β → m γ) : α → m γ := 174 | fun a ↦ f a >>= g 175 | 176 | infixr:90 (priority := high) " >=> " => kleisli 177 | 178 | /- 2.1 (**optional**). Prove that `pure` is a left and right unit for the 179 | Kleisli operator. -/ 180 | 181 | theorem pure_kleisli {m : Type → Type} [LawfulMonad m] {α β : Type} 182 | (f : α → m β) : 183 | (pure >=> f) = f := 184 | sorry 185 | 186 | theorem kleisli_pure {m : Type → Type} [LawfulMonad m] {α β : Type} 187 | (f : α → m β) : 188 | (f >=> pure) = f := 189 | sorry 190 | 191 | /- 2.2 (**optional**). Prove that the Kleisli operator is associative. -/ 192 | 193 | theorem kleisli_assoc {m : Type → Type} [LawfulMonad m] {α β γ δ : Type} 194 | (f : α → m β) (g : β → m γ) (h : γ → m δ) : 195 | ((f >=> g) >=> h) = (f >=> (g >=> h)) := 196 | sorry 197 | 198 | end LoVe 199 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe07_EffectfulProgramming_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe07_EffectfulProgramming_Demo 5 | 6 | 7 | /- # LoVe Homework 7 (10 points + 1 bonus point): Monads 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Question 1 (5 points): `map` for Monads 21 | 22 | We will define a `map` function for monads and derive its so-called functorial 23 | properties from the three laws. 24 | 25 | 1.1 (2 points). Define `map` on `m`. This function should not be confused 26 | with `mmap` from the lecture's demo. 27 | 28 | Hint: The challenge is to find a way to create a value of type `m β`. Follow the 29 | types. Inventory all the arguments and operations available (e.g., `pure`, 30 | `>>=`) with their types and see if you can plug them together like Lego 31 | bricks. -/ 32 | 33 | def map {m : Type → Type} [LawfulMonad m] {α β : Type} (f : α → β) (ma : m α) : 34 | m β := := 35 | sorry 36 | 37 | /- 1.2 (1 point). Prove the identity law for `map`. 38 | 39 | Hint: You will need `LawfulMonad.bind_pure`. -/ 40 | 41 | theorem map_id {m : Type → Type} [LawfulMonad m] {α : Type} 42 | (ma : m α) : 43 | map id ma = ma := 44 | sorry 45 | 46 | /- 1.3 (2 points). Prove the composition law for `map`. -/ 47 | 48 | theorem map_map {m : Type → Type} [LawfulMonad m] {α β γ : Type} 49 | (f : α → β) (g : β → γ) (ma : m α) : 50 | map g (map f ma) = map (fun x ↦ g (f x)) ma := 51 | sorry 52 | 53 | 54 | /- ## Question 2 (5 points + 1 bonus point): Monadic Structure on Lists 55 | 56 | `List` can be seen as a monad, similar to `Option` but with several possible 57 | outcomes. It is also similar to `Set`, but the results are ordered and finite. 58 | The code below sets `List` up as a monad. -/ 59 | 60 | namespace List 61 | 62 | def bind {α β : Type} : List α → (α → List β) → List β 63 | | [], f => [] 64 | | a :: as, f => f a ++ bind as f 65 | 66 | def pure {α : Type} (a : α) : List α := 67 | [a] 68 | 69 | /- 2.1 (1 point). Prove the following property of `bind` under the append 70 | operation. -/ 71 | 72 | theorem bind_append {α β : Type} (f : α → List β) : 73 | ∀as as' : List α, bind (as ++ as') f = bind as f ++ bind as' f := 74 | sorry 75 | 76 | /- 2.2 (3 points). Prove the three laws for `List`. -/ 77 | 78 | theorem pure_bind {α β : Type} (a : α) (f : α → List β) : 79 | bind (pure a) f = f a := 80 | sorry 81 | 82 | theorem bind_pure {α : Type} : 83 | ∀as : List α, bind as pure = as := 84 | sorry 85 | 86 | theorem bind_assoc {α β γ : Type} (f : α → List β) (g : β → List γ) : 87 | ∀as : List α, bind (bind as f) g = bind as (fun a ↦ bind (f a) g) := 88 | sorry 89 | 90 | /- 2.3 (1 point). Prove the following list-specific law. -/ 91 | 92 | theorem bind_pure_comp_eq_map {α β : Type} {f : α → β} : 93 | ∀as : List α, bind as (fun a ↦ pure (f a)) = List.map f as := 94 | sorry 95 | 96 | /- 2.4 (1 bonus point). Register `List` as a lawful monad: -/ 97 | 98 | instance LawfulMonad : LawfulMonad List := 99 | sorry 100 | 101 | end List 102 | 103 | end LoVe 104 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe08_Metaprogramming_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe06_InductivePredicates_Demo 5 | 6 | 7 | /- # LoVe Demo 8: Metaprogramming 8 | 9 | Users can extend Lean with custom tactics and tools. This kind of 10 | programming—programming the prover—is called metaprogramming. 11 | 12 | Lean's metaprogramming framework uses mostly the same notions and syntax as 13 | Lean's input language itself. Abstract syntax trees __reflect__ internal data 14 | structures, e.g., for expressions (terms). The prover's internals are exposed 15 | through Lean interfaces, which we can use for 16 | 17 | * accessing the current context and goal; 18 | * unifying expressions; 19 | * querying and modifying the environment; 20 | * setting attributes. 21 | 22 | Most of Lean itself is implemented in Lean. 23 | 24 | Example applications: 25 | 26 | * proof goal transformations; 27 | * heuristic proof search; 28 | * decision procedures; 29 | * definition generators; 30 | * advisor tools; 31 | * exporters; 32 | * ad hoc automation. 33 | 34 | Advantages of Lean's metaprogramming framework: 35 | 36 | * Users do not need to learn another programming language to write 37 | metaprograms; they can work with the same constructs and notation used to 38 | define ordinary objects in the prover's library. 39 | 40 | * Everything in that library is available for metaprogramming purposes. 41 | 42 | * Metaprograms can be written and debugged in the same interactive environment, 43 | encouraging a style where formal libraries and supporting automation are 44 | developed at the same time. -/ 45 | 46 | 47 | set_option autoImplicit false 48 | set_option tactic.hygienic false 49 | 50 | open Lean 51 | open Lean.Meta 52 | open Lean.Elab.Tactic 53 | open Lean.TSyntax 54 | 55 | namespace LoVe 56 | 57 | 58 | /- ## Tactic Combinators 59 | 60 | When programming our own tactics, we often need to repeat some actions on 61 | several goals, or to recover if a tactic fails. Tactic combinators help in such 62 | cases. 63 | 64 | `repeat'` applies its argument repeatedly on all (sub…sub)goals until it cannot 65 | be applied any further. -/ 66 | 67 | theorem repeat'_example : 68 | Even 4 ∧ Even 7 ∧ Even 3 ∧ Even 0 := 69 | by 70 | repeat' apply And.intro 71 | repeat' apply Even.add_two 72 | repeat' sorry 73 | 74 | /- The "first" combinator `first | ⋯ | ⋯ | ⋯` tries its first argument. If that 75 | fails, it applies its second argument. If that fails, it applies its third 76 | argument. And so on. -/ 77 | 78 | theorem repeat'_first_example : 79 | Even 4 ∧ Even 7 ∧ Even 3 ∧ Even 0 := 80 | by 81 | repeat' apply And.intro 82 | repeat' 83 | first 84 | | apply Even.add_two 85 | | apply Even.zero 86 | repeat' sorry 87 | 88 | /- `all_goals` applies its argument exactly once to each goal. It succeeds only 89 | if the argument succeeds on **all** goals. -/ 90 | 91 | /- 92 | theorem all_goals_example : 93 | Even 4 ∧ Even 7 ∧ Even 3 ∧ Even 0 := 94 | by 95 | repeat' apply And.intro 96 | all_goals apply Even.add_two -- fails 97 | repeat' sorry 98 | -/ 99 | 100 | /- `try` transforms its argument into a tactic that never fails. -/ 101 | 102 | theorem all_goals_try_example : 103 | Even 4 ∧ Even 7 ∧ Even 3 ∧ Even 0 := 104 | by 105 | repeat' apply And.intro 106 | all_goals try apply Even.add_two 107 | repeat sorry 108 | 109 | /- `any_goals` applies its argument exactly once to each goal. It succeeds 110 | if the argument succeeds on **any** goal. -/ 111 | 112 | theorem any_goals_example : 113 | Even 4 ∧ Even 7 ∧ Even 3 ∧ Even 0 := 114 | by 115 | repeat' apply And.intro 116 | any_goals apply Even.add_two 117 | repeat' sorry 118 | 119 | /- `solve | ⋯ | ⋯ | ⋯` is like `first` except that it succeeds only if one of 120 | the arguments fully proves the current goal. -/ 121 | 122 | theorem any_goals_solve_repeat_first_example : 123 | Even 4 ∧ Even 7 ∧ Even 3 ∧ Even 0 := 124 | by 125 | repeat' apply And.intro 126 | any_goals 127 | solve 128 | | repeat' 129 | first 130 | | apply Even.add_two 131 | | apply Even.zero 132 | repeat' sorry 133 | 134 | /- The combinator `repeat'` can easily lead to infinite looping: -/ 135 | 136 | /- 137 | -- loops 138 | theorem repeat'_Not_example : 139 | ¬ Even 1 := 140 | by repeat' apply Not.intro 141 | -/ 142 | 143 | 144 | /- ## Macros -/ 145 | 146 | /- We start with the actual metaprogramming, by coding a custom tactic as a 147 | macro. The tactic embodies the behavior we hardcoded in the `solve` example 148 | above: -/ 149 | 150 | macro "intro_and_even" : tactic => 151 | `(tactic| 152 | (repeat' apply And.intro 153 | any_goals 154 | solve 155 | | repeat' 156 | first 157 | | apply Even.add_two 158 | | apply Even.zero)) 159 | 160 | /- Let us apply our custom tactic: -/ 161 | 162 | theorem intro_and_even_example : 163 | Even 4 ∧ Even 7 ∧ Even 3 ∧ Even 0 := 164 | by 165 | intro_and_even 166 | repeat' sorry 167 | 168 | 169 | /- ## The Metaprogramming Monads 170 | 171 | `MetaM` is the low-level metaprogramming monad. `TacticM` extends `MetaM` with 172 | goal management. 173 | 174 | * `MetaM` is a state monad providing access to the global context (including all 175 | definitions and inductive types), notations, and attributes (e.g., the list of 176 | `@[simp]` theorems), among others. `TacticM` additionally provides access to 177 | the list of goals. 178 | 179 | * `MetaM` and `TacticM` behave like an option monad. The metaprogram `failure` 180 | leaves the monad in an error state. 181 | 182 | * `MetaM` and `TacticM` support tracing, so we can use `logInfo` to display 183 | messages. 184 | 185 | * Like other monads, `MetaM` and `TacticM` support imperative constructs such as 186 | `for`–`in`, `continue`, and `return`. -/ 187 | 188 | def traceGoals : TacticM Unit := 189 | do 190 | logInfo m!"Lean version {Lean.versionString}" 191 | logInfo "All goals:" 192 | let goals ← getUnsolvedGoals 193 | logInfo m!"{goals}" 194 | match goals with 195 | | [] => return 196 | | _ :: _ => 197 | logInfo "First goal's target:" 198 | let target ← getMainTarget 199 | logInfo m!"{target}" 200 | 201 | elab "trace_goals" : tactic => 202 | traceGoals 203 | 204 | theorem Even_18_and_Even_20 (α : Type) (a : α) : 205 | Even 18 ∧ Even 20 := 206 | by 207 | apply And.intro 208 | trace_goals 209 | intro_and_even 210 | 211 | 212 | /- ## First Example: An Assumption Tactic 213 | 214 | We define a `hypothesis` tactic that behaves essentially the same as the 215 | predefined `assumption` tactic. -/ 216 | 217 | def hypothesis : TacticM Unit := 218 | withMainContext 219 | (do 220 | let target ← getMainTarget 221 | let lctx ← getLCtx 222 | for ldecl in lctx do 223 | if ! LocalDecl.isImplementationDetail ldecl then 224 | let eq ← isDefEq (LocalDecl.type ldecl) target 225 | if eq then 226 | let goal ← getMainGoal 227 | MVarId.assign goal (LocalDecl.toExpr ldecl) 228 | return 229 | failure) 230 | 231 | elab "hypothesis" : tactic => 232 | hypothesis 233 | 234 | theorem hypothesis_example {α : Type} {p : α → Prop} {a : α} 235 | (hpa : p a) : 236 | p a := 237 | by hypothesis 238 | 239 | 240 | /- ## Expressions 241 | 242 | The metaprogramming framework revolves around the type `Expr` of expressions or 243 | terms. -/ 244 | 245 | #print Expr 246 | 247 | 248 | /- ### Names 249 | 250 | We can create literal names with backticks: 251 | 252 | * Names with a single backtick, `n, are not checked for existence. 253 | 254 | * Names with two backticks, ``n, are resolved and checked. -/ 255 | 256 | #check `x 257 | #eval `x 258 | #eval `Even -- wrong 259 | #eval `LoVe.Even -- suboptimal 260 | #eval ``Even 261 | /- 262 | #eval ``EvenThough -- fails 263 | -/ 264 | 265 | 266 | /- ### Constants -/ 267 | 268 | #check Expr.const 269 | 270 | #eval ppExpr (Expr.const ``Nat.add []) 271 | #eval ppExpr (Expr.const ``Nat []) 272 | 273 | 274 | /- ### Sorts (lecture 12) -/ 275 | 276 | #check Expr.sort 277 | 278 | #eval ppExpr (Expr.sort Level.zero) 279 | #eval ppExpr (Expr.sort (Level.succ Level.zero)) 280 | 281 | 282 | /- ### Free Variables -/ 283 | 284 | #check Expr.fvar 285 | 286 | #check FVarId.mk "n" 287 | #eval ppExpr (Expr.fvar (FVarId.mk "n")) 288 | 289 | 290 | /- ### Metavariables -/ 291 | 292 | #check Expr.mvar 293 | 294 | 295 | /- ### Applications -/ 296 | 297 | #check Expr.app 298 | 299 | #eval ppExpr (Expr.app (Expr.const ``Nat.succ []) 300 | (Expr.const ``Nat.zero [])) 301 | 302 | 303 | /- ### Anonymous Functions and Bound Variables -/ 304 | 305 | #check Expr.bvar 306 | #check Expr.lam 307 | 308 | #eval ppExpr (Expr.bvar 0) 309 | 310 | #eval ppExpr (Expr.lam `x (Expr.const ``Nat []) (Expr.bvar 0) 311 | BinderInfo.default) 312 | 313 | #eval ppExpr (Expr.lam `x (Expr.const ``Nat []) 314 | (Expr.lam `y (Expr.const ``Nat []) (Expr.bvar 1) 315 | BinderInfo.default) 316 | BinderInfo.default) 317 | 318 | 319 | /- ### Dependent Function Types -/ 320 | 321 | #check Expr.forallE 322 | 323 | #eval ppExpr (Expr.forallE `n (Expr.const ``Nat []) 324 | (Expr.app (Expr.const ``Even []) (Expr.bvar 0)) 325 | BinderInfo.default) 326 | 327 | #eval ppExpr (Expr.forallE `dummy (Expr.const `Nat []) 328 | (Expr.const `Bool []) BinderInfo.default) 329 | 330 | 331 | /- ### Other Constructors -/ 332 | 333 | #check Expr.letE 334 | #check Expr.lit 335 | #check Expr.mdata 336 | #check Expr.proj 337 | 338 | 339 | /- ## Second Example: A Conjunction-Destructing Tactic 340 | 341 | We define a `destruct_and` tactic that automates the elimination of `∧` in 342 | premises, automating proofs such as these: -/ 343 | 344 | theorem abc_a (a b c : Prop) (h : a ∧ b ∧ c) : 345 | a := 346 | And.left h 347 | 348 | theorem abc_b (a b c : Prop) (h : a ∧ b ∧ c) : 349 | b := 350 | And.left (And.right h) 351 | 352 | theorem abc_bc (a b c : Prop) (h : a ∧ b ∧ c) : 353 | b ∧ c := 354 | And.right h 355 | 356 | theorem abc_c (a b c : Prop) (h : a ∧ b ∧ c) : 357 | c := 358 | And.right (And.right h) 359 | 360 | /- Our tactic relies on a helper function, which takes as argument the 361 | hypothesis `h` to use as an expression: -/ 362 | 363 | partial def destructAndExpr (hP : Expr) : TacticM Bool := 364 | withMainContext 365 | (do 366 | let target ← getMainTarget 367 | let P ← inferType hP 368 | let eq ← isDefEq P target 369 | if eq then 370 | let goal ← getMainGoal 371 | MVarId.assign goal hP 372 | return true 373 | else 374 | match Expr.and? P with 375 | | Option.none => return false 376 | | Option.some (Q, R) => 377 | let hQ ← mkAppM ``And.left #[hP] 378 | let success ← destructAndExpr hQ 379 | if success then 380 | return true 381 | else 382 | let hR ← mkAppM ``And.right #[hP] 383 | destructAndExpr hR) 384 | 385 | #check Expr.and? 386 | 387 | def destructAnd (name : Name) : TacticM Unit := 388 | withMainContext 389 | (do 390 | let h ← getFVarFromUserName name 391 | let success ← destructAndExpr h 392 | if ! success then 393 | failure) 394 | 395 | elab "destruct_and" h:ident : tactic => 396 | destructAnd (getId h) 397 | 398 | /- Let us check that our tactic works: -/ 399 | 400 | theorem abc_a_again (a b c : Prop) (h : a ∧ b ∧ c) : 401 | a := 402 | by destruct_and h 403 | 404 | theorem abc_b_again (a b c : Prop) (h : a ∧ b ∧ c) : 405 | b := 406 | by destruct_and h 407 | 408 | theorem abc_bc_again (a b c : Prop) (h : a ∧ b ∧ c) : 409 | b ∧ c := 410 | by destruct_and h 411 | 412 | theorem abc_c_again (a b c : Prop) (h : a ∧ b ∧ c) : 413 | c := 414 | by destruct_and h 415 | 416 | /- 417 | theorem abc_ac (a b c : Prop) (h : a ∧ b ∧ c) : 418 | a ∧ c := 419 | by destruct_and h -- fails 420 | -/ 421 | 422 | 423 | /- ## Third Example: A Direct Proof Finder 424 | 425 | Finally, we implement a `prove_direct` tool that traverses all theorems in the 426 | database and checks whether one of them can be used to prove the current 427 | goal. -/ 428 | 429 | def isTheorem : ConstantInfo → Bool 430 | | ConstantInfo.axiomInfo _ => true 431 | | ConstantInfo.thmInfo _ => true 432 | | _ => false 433 | 434 | def applyConstant (name : Name) : TacticM Unit := 435 | do 436 | let cst ← mkConstWithFreshMVarLevels name 437 | liftMetaTactic (fun goal ↦ MVarId.apply goal cst) 438 | 439 | def andThenOnSubgoals (tac₁ tac₂ : TacticM Unit) : 440 | TacticM Unit := 441 | do 442 | let origGoals ← getGoals 443 | let mainGoal ← getMainGoal 444 | setGoals [mainGoal] 445 | tac₁ 446 | let subgoals₁ ← getUnsolvedGoals 447 | let mut newGoals := [] 448 | for subgoal in subgoals₁ do 449 | let assigned ← MVarId.isAssigned subgoal 450 | if ! assigned then 451 | setGoals [subgoal] 452 | tac₂ 453 | let subgoals₂ ← getUnsolvedGoals 454 | newGoals := newGoals ++ subgoals₂ 455 | setGoals (newGoals ++ List.tail origGoals) 456 | 457 | def proveUsingTheorem (name : Name) : TacticM Unit := 458 | andThenOnSubgoals (applyConstant name) hypothesis 459 | 460 | def proveDirect : TacticM Unit := 461 | do 462 | let origGoals ← getUnsolvedGoals 463 | let goal ← getMainGoal 464 | setGoals [goal] 465 | let env ← getEnv 466 | for (name, info) 467 | in SMap.toList (Environment.constants env) do 468 | if isTheorem info && ! ConstantInfo.isUnsafe info then 469 | try 470 | proveUsingTheorem name 471 | logInfo m!"Proved directly by {name}" 472 | setGoals (List.tail origGoals) 473 | return 474 | catch _ => 475 | continue 476 | failure 477 | 478 | elab "prove_direct" : tactic => 479 | proveDirect 480 | 481 | /- Let us check that our tactic works: -/ 482 | 483 | theorem Nat.symm (x y : ℕ) (h : x = y) : 484 | y = x := 485 | by prove_direct 486 | 487 | theorem Nat.symm_manual (x y : ℕ) (h : x = y) : 488 | y = x := 489 | by 490 | apply symm 491 | hypothesis 492 | 493 | theorem Nat.trans (x y z : ℕ) (hxy : x = y) (hyz : y = z) : 494 | x = z := 495 | by prove_direct 496 | 497 | theorem List.reverse_twice (xs : List ℕ) : 498 | List.reverse (List.reverse xs) = xs := 499 | by prove_direct 500 | 501 | /- Lean has `apply?`: -/ 502 | 503 | theorem List.reverse_twice_apply? (xs : List ℕ) : 504 | List.reverse (List.reverse xs) = xs := 505 | by apply? 506 | 507 | end LoVe 508 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe08_Metaprogramming_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe08_Metaprogramming_Demo 5 | 6 | 7 | /- # LoVe Exercise 8: Metaprogramming 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | open Lean 16 | open Lean.Meta 17 | open Lean.Elab.Tactic 18 | open Lean.TSyntax 19 | 20 | namespace LoVe 21 | 22 | 23 | /- ## Question 1: `destruct_and` on Steroids 24 | 25 | Recall from the lecture that `destruct_and` fails on easy goals such as -/ 26 | 27 | theorem abc_ac (a b c : Prop) (h : a ∧ b ∧ c) : 28 | a ∧ c := 29 | sorry 30 | 31 | /- We will now address this by developing a new tactic called `destro_and`, 32 | which applies both **des**truction and in**tro**duction rules for conjunction. 33 | It will also go automatically through the hypotheses instead of taking an 34 | argument. We will develop it in three steps. 35 | 36 | 1.1. Develop a tactic `intro_and` that replaces all goals of the form 37 | `a ∧ b` with two new goals `a` and `b` systematically, until all top-level 38 | conjunctions are gone. Define your tactic as a macro. -/ 39 | 40 | #check repeat' 41 | 42 | -- enter your definition here 43 | 44 | theorem abcd_bd (a b c d : Prop) (h : a ∧ (b ∧ c) ∧ d) : 45 | b ∧ d := 46 | by 47 | intro_and 48 | /- The proof state should be as follows: 49 | 50 | case left 51 | a b c d: Prop 52 | h : a ∧ (b ∧ c) ∧ d 53 | ⊢ b 54 | 55 | case right 56 | a b c d : Prop 57 | h : a ∧ (b ∧ c) ∧ d 58 | ⊢ d -/ 59 | repeat' sorry 60 | 61 | theorem abcd_bacb (a b c d : Prop) (h : a ∧ (b ∧ c) ∧ d) : 62 | b ∧ (a ∧ (c ∧ b)) := 63 | by 64 | intro_and 65 | /- The proof state should be as follows: 66 | 67 | case left 68 | a b c d : Prop 69 | h : a ∧ (b ∧ c) ∧ d 70 | ⊢ b 71 | 72 | case right.left 73 | a b c d : Prop 74 | h : a ∧ (b ∧ c) ∧ d 75 | ⊢ a 76 | 77 | case right.right.left 78 | a b c d : Prop 79 | h : a ∧ (b ∧ c) ∧ d 80 | ⊢ c 81 | 82 | case right.right.right 83 | a b c d : Prop 84 | h : a ∧ (b ∧ c) ∧ d 85 | ⊢ b -/ 86 | repeat' sorry 87 | 88 | /- 1.2. Develop a tactic `cases_and` that replaces hypotheses of the form 89 | `h : a ∧ b` by two new hypotheses `h_left : a` and `h_right : b` systematically, 90 | until all top-level conjunctions are gone. 91 | 92 | Here is some pseudocode that you can follow: 93 | 94 | 1. Wrap the entire `do` block in a call to `withMainContext` to ensure you work 95 | with the right context. 96 | 97 | 2. Retrieve the list of hypotheses from the context. This is provided by 98 | `getLCtx`. 99 | 100 | 3. Find the first hypothesis (= term) with a type (= proposition) of the form 101 | `_ ∧ _`. To iterate, you can use the `for … in … do` syntax. To obtain the 102 | type of a term, you can use `inferType`. To check if a type `ty` has the form 103 | `_ ∧ _`, you can use `Expr.isAppOfArity ty ``And 2` (with two backticks before 104 | `And`). 105 | 106 | 4. Perform a case split on the first found hypothesis. This can be achieved 107 | using the metaprogram `cases` provided in `LoVelib`. To extract the free 108 | variable associated with a hypothesis, use `LocalDecl.fvarId`. 109 | 110 | 5. Repeat (via a recursive call). 111 | 112 | 6. Return. -/ 113 | 114 | partial def casesAnd : TacticM Unit := 115 | sorry 116 | 117 | elab "cases_and" : tactic => 118 | casesAnd 119 | 120 | theorem abcd_bd_again (a b c d : Prop) : 121 | a ∧ (b ∧ c) ∧ d → b ∧ d := 122 | by 123 | intro h 124 | cases_and 125 | /- The proof state should be as follows: 126 | 127 | case intro.intro.intro 128 | a b c d : Prop 129 | left : a 130 | right : d 131 | left_1 : b 132 | right_1 : c 133 | ⊢ b ∧ d -/ 134 | sorry 135 | 136 | /- 1.3. Implement a `destro_and` tactic that first invokes `cases_and`, then 137 | `intro_and`, before it tries to prove all the subgoals that can be discharged 138 | directly by `assumption`. -/ 139 | 140 | macro "destro_and" : tactic => 141 | sorry 142 | 143 | theorem abcd_bd_over_again (a b c d : Prop) (h : a ∧ (b ∧ c) ∧ d) : 144 | b ∧ d := 145 | by destro_and 146 | 147 | theorem abcd_bacb_again (a b c d : Prop) (h : a ∧ (b ∧ c) ∧ d) : 148 | b ∧ (a ∧ (c ∧ b)) := 149 | by destro_and 150 | 151 | theorem abd_bacb_again (a b c d : Prop) (h : a ∧ b ∧ d) : 152 | b ∧ (a ∧ (c ∧ b)) := 153 | by 154 | destro_and 155 | /- The proof state should be roughly as follows: 156 | 157 | case intro.intro.right.right.left 158 | a b c d : Prop 159 | left : a 160 | left_1 : b 161 | right : d 162 | ⊢ c -/ 163 | sorry -- unprovable 164 | 165 | /- 1.4. Provide some more examples for `destro_and` to convince yourself that 166 | it works as expected also on more complicated examples. -/ 167 | 168 | -- enter your examples here 169 | 170 | 171 | /- ## Question 2 (**optional**): A Theorem Finder 172 | 173 | We will implement a function that allows us to find theorems by constants 174 | appearing in their statements. So given a list of constant names, the function 175 | will list all theorems in which all these constants appear. 176 | 177 | 2.1 (**optional**). Write a function that checks whether an expression contains 178 | a specific constant. 179 | 180 | Hints: 181 | 182 | * You can pattern-match on `e` and proceed recursively. 183 | 184 | * The "or" connective on `Bool` is called `||`, and equality is called `==`. -/ 185 | 186 | def constInExpr (name : Name) (e : Expr) : Bool := 187 | sorry 188 | 189 | /- 2.2 (**optional**). Write a function that checks whether an expression 190 | contains **all** constants in a list. 191 | 192 | Hint: You can either proceed recursively or use `List.and` and `List.map`. -/ 193 | 194 | def constsInExpr (names : List Name) (e : Expr) : Bool := 195 | sorry 196 | 197 | /- 2.3 (**optional**). Develop a tactic that uses `constsInExpr` to print the 198 | name of all theorems that contain all constants `names` in their statement. 199 | 200 | This code should be similar to that of `proveDirect` in the demo file. With 201 | `ConstantInfo.type`, you can extract the proposition associated with a theorem. -/ 202 | 203 | def findConsts (names : List Name) : TacticM Unit := 204 | sorry 205 | 206 | elab "find_consts" "(" names:ident+ ")" : tactic => 207 | findConsts (Array.toList (Array.map getId names)) 208 | 209 | /- Test the solution. -/ 210 | 211 | theorem List.a_property_of_reverse {α : Type} (xs : List α) (a : α) : 212 | List.reverse (List.concat xs a) = a :: List.reverse xs := 213 | by 214 | find_consts (List.reverse) 215 | find_consts (List.reverse List.concat) 216 | apply List.reverse_concat 217 | 218 | end LoVe 219 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe08_Metaprogramming_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVelib 5 | 6 | 7 | /- # LoVe Homework 8 (10 points + 2 bonus points): Metaprogramming 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | open Lean 18 | open Lean.Meta 19 | open Lean.Elab.Tactic 20 | open Lean.TSyntax 21 | 22 | namespace LoVe 23 | 24 | 25 | /- ## Question 1 (10 points): A `safe` Tactic 26 | 27 | You will develop a tactic that applies all safe introduction and elimination 28 | rules for the connectives and quantifiers exhaustively. A rule is said to be 29 | __safe__ if, given a provable goal, it always gives rise to provable subgoals. 30 | In addition, we will require that safe rules do not introduce metavariables 31 | (since these can easily be instantiated accidentally with the wrong terms). 32 | 33 | You will proceed in three steps. 34 | 35 | 1.1 (4 points). Develop a `safe_intros` tactic that repeatedly applies the 36 | introduction rules for `True`, `∧`, and `↔` and that invokes `intro _` for 37 | `→`/`∀`. The tactic generalizes `intro_and` from the exercise. -/ 38 | 39 | macro "safe_intros" : tactic => 40 | sorry 41 | 42 | theorem abcd (a b c d : Prop) : 43 | a → ¬ b ∧ (c ↔ d) := 44 | by 45 | safe_intros 46 | /- The proof state should be roughly as follows: 47 | 48 | case left 49 | a b c d : Prop 50 | a_1 : a 51 | a_2 : b 52 | ⊢ False 53 | 54 | case right.mp 55 | a b c d : Prop 56 | a_1 : a 57 | a_2 : c 58 | ⊢ d 59 | 60 | case right.mpr 61 | a b c d : Prop 62 | a_1 : a 63 | a_2 : d 64 | ⊢ c -/ 65 | repeat' sorry 66 | 67 | /- 1.2 (4 points). Develop a `safe_cases` tactic that performs case 68 | distinctions on `False`, `∧` (`And`), and `∃` (`Exists`). The tactic generalizes 69 | `cases_and` from the exercise. 70 | 71 | Hints: 72 | 73 | * The last argument of `Expr.isAppOfArity` is the number of arguments expected 74 | by the logical symbol. For example, the arity of `∧` is 2. 75 | 76 | * The "or" connective on `Bool` is called `||`. -/ 77 | 78 | #check @False 79 | #check @And 80 | #check @Exists 81 | 82 | partial def safeCases : TacticM Unit := 83 | sorry 84 | 85 | elab "safe_cases" : tactic => 86 | safeCases 87 | 88 | theorem abcdef (a b c d e f : Prop) (P : ℕ → Prop) 89 | (hneg: ¬ a) (hand : a ∧ b ∧ c) (hor : c ∨ d) (himp : b → e) (hiff : e ↔ f) 90 | (hex : ∃x, P x) : 91 | False := 92 | by 93 | safe_cases 94 | /- The proof state should be roughly as follows: 95 | 96 | case intro.intro.intro 97 | a b c d e f : Prop 98 | P : ℕ → Prop 99 | hneg : ¬a 100 | hor : c ∨ d 101 | himp : b → e 102 | hiff : e ↔ f 103 | left : a 104 | w : ℕ 105 | h : P w 106 | left_1 : b 107 | right : c 108 | ⊢ False -/ 109 | sorry 110 | 111 | /- 1.3 (2 points). Implement a `safe` tactic that first invokes `safe_intros` 112 | on all goals, then `safe_cases` on all emerging subgoals, before it tries 113 | `assumption` on all emerging subsubgoals. -/ 114 | 115 | macro "safe" : tactic => 116 | sorry 117 | 118 | theorem abcdef_abcd (a b c d e f : Prop) (P : ℕ → Prop) 119 | (hneg: ¬ a) (hand : a ∧ b ∧ c) (hor : c ∨ d) (himp : b → e) (hiff : e ↔ f) 120 | (hex : ∃x, P x) : 121 | a → ¬ b ∧ (c ↔ d) := 122 | by 123 | safe 124 | /- The proof state should be roughly as follows: 125 | 126 | case left.intro.intro.intro 127 | a b c d e f : Prop 128 | P : ℕ → Prop 129 | hneg : ¬a 130 | hor : c ∨ d 131 | himp : b → e 132 | hiff : e ↔ f 133 | a_1 : a 134 | a_2 : b 135 | left : a 136 | w : ℕ 137 | h : P w 138 | left_1 : b 139 | right : c 140 | ⊢ False 141 | 142 | case right.mp.intro.intro.intro 143 | a b c d e f : Prop 144 | P : ℕ → Prop 145 | hneg : ¬a 146 | hor : c ∨ d 147 | himp : b → e 148 | hiff : e ↔ f 149 | a_1 : a 150 | a_2 : c 151 | left : a 152 | w : ℕ 153 | h : P w 154 | left_1 : b 155 | right : c 156 | ⊢ d -/ 157 | repeat' sorry 158 | 159 | 160 | /- ## Question 2 (2 bonus points): An `aesop`-Like Tactic 161 | 162 | 2.1 (1 bonus point). Develop a simple `aesop`-like tactic. 163 | 164 | This tactic should apply all safe introduction and elimination rules. In 165 | addition, it should try potentially unsafe rules (such as `Or.inl` and 166 | `False.elim`) but backtrack at some point (or try several possibilities in 167 | parallel). Iterative deepening may be a valid approach, or best-first search, or 168 | breadth-first search. The tactic should also try to apply assumptions whose 169 | conclusion matches the goal, but backtrack if necessary. 170 | 171 | Hint: The `MonadBacktrack` monad class might be useful. 172 | 173 | 2.2 (1 bonus point). Test your tactic on some benchmarks. 174 | 175 | You can try your tactic on logic puzzles of the kinds we proved in exercise and 176 | homework 3. Please include these below. -/ 177 | 178 | end LoVe 179 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe09_OperationalSemantics_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe09_OperationalSemantics_Demo 5 | 6 | 7 | /- # LoVe Exercise 9: Operational Semantics 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Guarded Command Language 19 | 20 | In 1976, E. W. Dijkstra introduced the guarded command language (GCL), a 21 | minimalistic imperative language with built-in nondeterminism. A grammar for one 22 | of its variants is given below: 23 | 24 | S ::= x := e -- assignment 25 | | assert B -- assertion 26 | | S ; S -- sequential composition 27 | | S | ⋯ | S -- nondeterministic choice 28 | | loop S -- nondeterministic iteration 29 | 30 | Assignment and sequential composition are as in the WHILE language. The other 31 | statements have the following semantics: 32 | 33 | * `assert B` aborts if `B` evaluates to false; otherwise, the command is a 34 | no-op. 35 | 36 | * `S | ⋯ | S` chooses any of the branches and executes it, ignoring the other 37 | branches. 38 | 39 | * `loop S` executes `S` any number of times. 40 | 41 | In Lean, GCL is captured by the following inductive type: -/ 42 | 43 | namespace GCL 44 | 45 | inductive Stmt : Type 46 | | assign : String → (State → ℕ) → Stmt 47 | | assert : (State → Prop) → Stmt 48 | | seq : Stmt → Stmt → Stmt 49 | | choice : List Stmt → Stmt 50 | | loop : Stmt → Stmt 51 | 52 | infixr:90 "; " => Stmt.seq 53 | 54 | /- 1.1. Complete the following big-step semantics, based on the informal 55 | specification of GCL above. -/ 56 | 57 | inductive BigStep : (Stmt × State) → State → Prop 58 | -- enter the missing `assign` rule here 59 | | assert (B s) (hB : B s) : 60 | BigStep (Stmt.assert B, s) s 61 | -- enter the missing `seq` rule here 62 | -- below, `Ss[i]'hless` returns element `i` of `Ss`, which exists thanks to 63 | -- condition `hless` 64 | | choice (Ss s t i) (hless : i < List.length Ss) 65 | (hbody : BigStep (Ss[i]'hless, s) t) : 66 | BigStep (Stmt.choice Ss, s) t 67 | -- enter the missing `loop` rules here 68 | 69 | infixl:110 " ⟹ " => BigStep 70 | 71 | /- 1.2. Prove the following inversion rules, as we did in the lecture for the 72 | WHILE language. -/ 73 | 74 | @[simp] theorem BigStep_assign_iff {x a s t} : 75 | (Stmt.assign x a, s) ⟹ t ↔ t = s[x ↦ a s] := 76 | sorry 77 | 78 | @[simp] theorem BigStep_assert {B s t} : 79 | (Stmt.assert B, s) ⟹ t ↔ t = s ∧ B s := 80 | sorry 81 | 82 | @[simp] theorem BigStep_seq_iff {S₁ S₂ s t} : 83 | (Stmt.seq S₁ S₂, s) ⟹ t ↔ (∃u, (S₁, s) ⟹ u ∧ (S₂, u) ⟹ t) := 84 | sorry 85 | 86 | theorem BigStep_loop {S s u} : 87 | (Stmt.loop S, s) ⟹ u ↔ 88 | (s = u ∨ (∃t, (S, s) ⟹ t ∧ (Stmt.loop S, t) ⟹ u)) := 89 | sorry 90 | 91 | /- This one is more difficult: -/ 92 | 93 | @[simp] theorem BigStep_choice {Ss s t} : 94 | (Stmt.choice Ss, s) ⟹ t ↔ 95 | (∃(i : ℕ) (hless : i < List.length Ss), (Ss[i]'hless, s) ⟹ t) := 96 | sorry 97 | 98 | end GCL 99 | 100 | /- 1.3. Complete the translation below of a deterministic program to a GCL 101 | program, by filling in the `sorry` placeholders below. -/ 102 | 103 | def gcl_of : Stmt → GCL.Stmt 104 | | Stmt.skip => 105 | GCL.Stmt.assert (fun _ ↦ True) 106 | | Stmt.assign x a => 107 | sorry 108 | | S; T => 109 | sorry 110 | | Stmt.ifThenElse B S T => 111 | sorry 112 | | Stmt.whileDo B S => 113 | sorry 114 | 115 | /- 1.4. In the definition of `gcl_of` above, `skip` is translated to 116 | `assert (fun _ ↦ True)`. Looking at the big-step semantics of both constructs, 117 | we can convince ourselves that it makes sense. Can you think of other correct 118 | ways to define the `skip` case? -/ 119 | 120 | -- enter your answer here 121 | 122 | 123 | /- ## Question 2: Program Equivalence 124 | 125 | For this question, we introduce the notion of program equivalence: `S₁ ~ S₂`. -/ 126 | 127 | def BigStepEquiv (S₁ S₂ : Stmt) : Prop := 128 | ∀s t, (S₁, s) ⟹ t ↔ (S₂, s) ⟹ t 129 | 130 | infix:50 (priority := high) " ~ " => BigStepEquiv 131 | 132 | /- Program equivalence is an equivalence relation, i.e., it is reflexive, 133 | symmetric, and transitive. -/ 134 | 135 | theorem BigStepEquiv.refl {S} : 136 | S ~ S := 137 | fix s t : State 138 | show (S, s) ⟹ t ↔ (S, s) ⟹ t from 139 | by rfl 140 | 141 | theorem BigStepEquiv.symm {S₁ S₂} : 142 | S₁ ~ S₂ → S₂ ~ S₁ := 143 | assume h : S₁ ~ S₂ 144 | fix s t : State 145 | show (S₂, s) ⟹ t ↔ (S₁, s) ⟹ t from 146 | Iff.symm (h s t) 147 | 148 | theorem BigStepEquiv.trans {S₁ S₂ S₃} (h₁₂ : S₁ ~ S₂) (h₂₃ : S₂ ~ S₃) : 149 | S₁ ~ S₃ := 150 | fix s t : State 151 | show (S₁, s) ⟹ t ↔ (S₃, s) ⟹ t from 152 | Iff.trans (h₁₂ s t) (h₂₃ s t) 153 | 154 | /- 2.1. Prove the following program equivalences. -/ 155 | 156 | theorem BigStepEquiv.skip_assign_id {x} : 157 | Stmt.assign x (fun s ↦ s x) ~ Stmt.skip := 158 | sorry 159 | 160 | theorem BigStepEquiv.seq_skip_left {S} : 161 | Stmt.skip; S ~ S := 162 | sorry 163 | 164 | theorem BigStepEquiv.seq_skip_right {S} : 165 | S; Stmt.skip ~ S := 166 | sorry 167 | 168 | theorem BigStepEquiv.if_seq_while_skip {B S} : 169 | Stmt.ifThenElse B (S; Stmt.whileDo B S) Stmt.skip ~ Stmt.whileDo B S := 170 | sorry 171 | 172 | /- 2.2 (**optional**). Program equivalence can be used to replace subprograms 173 | by other subprograms with the same semantics. Prove the following so-called 174 | congruence rules that facilitate such replacement: -/ 175 | 176 | theorem BigStepEquiv.seq_congr {S₁ S₂ T₁ T₂} (hS : S₁ ~ S₂) 177 | (hT : T₁ ~ T₂) : 178 | S₁; T₁ ~ S₂; T₂ := 179 | sorry 180 | 181 | theorem BigStepEquiv.if_congr {B S₁ S₂ T₁ T₂} (hS : S₁ ~ S₂) (hT : T₁ ~ T₂) : 182 | Stmt.ifThenElse B S₁ T₁ ~ Stmt.ifThenElse B S₂ T₂ := 183 | sorry 184 | 185 | end LoVe 186 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe09_OperationalSemantics_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe02_ProgramsAndTheorems_Demo 5 | 6 | 7 | /- # LoVe Homework 9 (10 points + 1 bonus point): Operational Semantics 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | 20 | /- ## Question 1 (5 points): Arithmetic Expressions 21 | 22 | Recall the type of arithmetic expressions from lecture 1 and its evaluation 23 | function: -/ 24 | 25 | #check AExp 26 | #check eval 27 | 28 | /- Let us introduce the following abbreviation for an environment that maps 29 | variable names to values: -/ 30 | 31 | def Envir : Type := 32 | String → ℤ 33 | 34 | /- 1.1 (2 points). Complete the following Lean definition of a big-step-style 35 | semantics for arithmetic expressions. The predicate `BigStep` (`⟹`) relates 36 | an arithmetic expression, an environment, and the value to which the expression 37 | evaluates in the given environment: -/ 38 | 39 | inductive BigStep : AExp × Envir → ℤ → Prop 40 | | num (i env) : BigStep (AExp.num i, env) i 41 | 42 | infix:60 " ⟹ " => BigStep 43 | 44 | /- 1.2 (1 point). Prove the following theorem to validate your definition 45 | above. 46 | 47 | Hint: It may help to first prove 48 | `(AExp.add (AExp.num 2) (AExp.num 2), env) ⟹ 2 + 2`. -/ 49 | 50 | theorem BigStep_add_two_two (env : Envir) : 51 | (AExp.add (AExp.num 2) (AExp.num 2), env) ⟹ 4 := 52 | sorry 53 | 54 | /- 1.3 (2 points). Prove that the big-step semantics is sound with respect to 55 | the `eval` function: -/ 56 | 57 | theorem BigStep_sound (aenv : AExp × Envir) (i : ℤ) (hstep : aenv ⟹ i) : 58 | eval (Prod.snd aenv) (Prod.fst aenv) = i := 59 | sorry 60 | 61 | 62 | /- ## Question 2 (5 points + 1 bonus point): Semantics of Regular Expressions 63 | 64 | Regular expressions are a very popular tool for software development. Often, 65 | when textual input needs to be analyzed it is matched against a regular 66 | expression. In this question, we define the syntax of regular expressions and 67 | what it means for a regular expression to match a string. 68 | 69 | We define `Regex` to represent the following grammar: 70 | 71 | R ::= ∅ -- `nothing`: matches nothing 72 | | ε -- `empty`: matches the empty string 73 | | a -- `atom`: matches the atom `a` 74 | | R ⬝ R -- `concat`: matches the concatenation of two regexes 75 | | R + R -- `alt`: matches either of two regexes 76 | | R* -- `star`: matches arbitrary many repetitions of a Regex 77 | 78 | Notice the rough correspondence with a WHILE language: 79 | 80 | `empty` ~ `skip` 81 | `atom` ~ assignment 82 | `concat` ~ sequential composition 83 | `alt` ~ conditional statement 84 | `star` ~ while loop -/ 85 | 86 | inductive Regex (α : Type) : Type 87 | | nothing : Regex α 88 | | empty : Regex α 89 | | atom : α → Regex α 90 | | concat : Regex α → Regex α → Regex α 91 | | alt : Regex α → Regex α → Regex α 92 | | star : Regex α → Regex α 93 | 94 | /- The `Matches r s` predicate indicates that the regular expression `r` matches 95 | the string `s` (where the string is a sequence of atoms). -/ 96 | 97 | inductive Matches {α : Type} : Regex α → List α → Prop 98 | | empty : 99 | Matches Regex.empty [] 100 | | atom (a : α) : 101 | Matches (Regex.atom a) [a] 102 | | concat (r₁ r₂ : Regex α) (s₁ s₂ : List α) (h₁ : Matches r₁ s₁) 103 | (h₂ : Matches r₂ s₂) : 104 | Matches (Regex.concat r₁ r₂) (s₁ ++ s₂) 105 | | alt_left (r₁ r₂ : Regex α) (s : List α) (h : Matches r₁ s) : 106 | Matches (Regex.alt r₁ r₂) s 107 | | alt_right (r₁ r₂ : Regex α) (s : List α) (h : Matches r₂ s) : 108 | Matches (Regex.alt r₁ r₂) s 109 | | star_base (r : Regex α) : 110 | Matches (Regex.star r) [] 111 | | star_step (r : Regex α) (s s' : List α) (h₁ : Matches r s) 112 | (h₂ : Matches (Regex.star r) s') : 113 | Matches (Regex.star r) (s ++ s') 114 | 115 | /- The introduction rules correspond to the following cases: 116 | 117 | * match the empty string 118 | * match one atom (e.g., character) 119 | * match two concatenated regexes 120 | * match the left option 121 | * match the right option 122 | * match the empty string (the base case of `R*`) 123 | * match `R` followed again by `R*` (the induction step of `R*`) 124 | 125 | 2.1 (1 point). Explain why there is no rule for `nothing`. -/ 126 | 127 | -- enter your answer here 128 | 129 | /- 2.2 (4 points). Prove the following inversion rules. -/ 130 | 131 | @[simp] theorem Matches_atom {α : Type} {s : List α} {a : α} : 132 | Matches (Regex.atom a) s ↔ s = [a] := 133 | sorry 134 | 135 | @[simp] theorem Matches_nothing {α : Type} {s : List α} : 136 | ¬ Matches Regex.nothing s := 137 | sorry 138 | 139 | @[simp] theorem Matches_empty {α : Type} {s : List α} : 140 | Matches Regex.empty s ↔ s = [] := 141 | sorry 142 | 143 | @[simp] theorem Matches_concat {α : Type} {s : List α} {r₁ r₂ : Regex α} : 144 | Matches (Regex.concat r₁ r₂) s 145 | ↔ (∃s₁ s₂, Matches r₁ s₁ ∧ Matches r₂ s₂ ∧ s = s₁ ++ s₂) := 146 | sorry 147 | 148 | @[simp] theorem Matches_alt {α : Type} {s : List α} {r₁ r₂ : Regex α} : 149 | Matches (Regex.alt r₁ r₂) s ↔ (Matches r₁ s ∨ Matches r₂ s) := 150 | sorry 151 | 152 | /- 2.3 (1 bonus point). Prove the following inversion rule. -/ 153 | 154 | theorem Matches_star {α : Type} {s : List α} {r : Regex α} : 155 | Matches (Regex.star r) s ↔ 156 | (s = [] ∨ (∃s₁ s₂, Matches r s₁ ∧ Matches (Regex.star r) s₂ ∧ s = s₁ ++ s₂)) := 157 | sorry 158 | 159 | end LoVe 160 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe10_HoareLogic_Demo.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe08_Metaprogramming_Demo 5 | import LoVe.LoVe09_OperationalSemantics_Demo 6 | 7 | 8 | /- # LoVe Demo 10: Hoare Logic 9 | 10 | We review a second way to specify the semantics of a programming language: Hoare 11 | logic. If operational semantics corresponds to an idealized interpreter, 12 | __Hoare logic__ (also called __axiomatic semantics__) corresponds to a verifier. 13 | Hoare logic is particularly convenient to reason about concrete programs. -/ 14 | 15 | 16 | set_option autoImplicit false 17 | set_option tactic.hygienic false 18 | 19 | open Lean 20 | open Lean.Meta 21 | open Lean.Elab.Tactic 22 | 23 | namespace LoVe 24 | 25 | 26 | /- ## Hoare Triples 27 | 28 | The basic judgments of Hoare logic are often called __Hoare triples__. They have 29 | the form 30 | 31 | `{P} S {Q}` 32 | 33 | where `S` is a statement, and `P` and `Q` (called __precondition__ and 34 | __postcondition__) are logical formulas over the state variables. 35 | 36 | Intended meaning: 37 | 38 | If `P` holds before `S` is executed and the execution terminates normally, 39 | `Q` holds at termination. 40 | 41 | This is a __partial correctness__ statement: The program is correct if it 42 | terminates normally (i.e., no run-time error, no infinite loop or divergence). 43 | 44 | All of these Hoare triples are valid (with respect to the intended meaning): 45 | 46 | `{True} b := 4 {b = 4}` 47 | `{a = 2} b := 2 * a {a = 2 ∧ b = 4}` 48 | `{b ≥ 5} b := b + 1 {b ≥ 6}` 49 | `{False} skip {b = 100}` 50 | `{True} while i ≠ 100 do i := i + 1 {i = 100}` 51 | 52 | 53 | ## Hoare Rules 54 | 55 | The following is a complete set of rules for reasoning about WHILE programs: 56 | 57 | ———————————— Skip 58 | {P} skip {P} 59 | 60 | ——————————————————— Assign 61 | {Q[a/x]} x := a {Q} 62 | 63 | {P} S {R} {R} S' {Q} 64 | —————————————————————— Seq 65 | {P} S; S' {Q} 66 | 67 | {P ∧ B} S {Q} {P ∧ ¬B} S' {Q} 68 | ——————————————————————————————— If 69 | {P} if B then S else S' {Q} 70 | 71 | {I ∧ B} S {I} 72 | ————————————————————————— While 73 | {I} while B do S {I ∧ ¬B} 74 | 75 | P' → P {P} S {Q} Q → Q' 76 | ——————————————————————————— Conseq 77 | {P'} S {Q'} 78 | 79 | `Q[a/x]` denotes `Q` with `x` replaced by `a`. 80 | 81 | In the `While` rule, `I` is called an __invariant__. 82 | 83 | Except for `Conseq`, the rules are syntax-driven: by looking at a program, we 84 | see immediately which rule to apply. 85 | 86 | Example derivations: 87 | 88 | —————————————————————— Assign —————————————————————— Assign 89 | {a = 2} b := a {b = 2} {b = 2} c := b {c = 2} 90 | —————————————————————————————————————————————————————— Seq 91 | {a = 2} b := a; c := b {c = 2} 92 | 93 | 94 | —————————————————————— Assign 95 | x > 10 → x > 5 {x > 5} y := x {y > 5} y > 5 → y > 0 96 | ——————————————————————————————————————————————————————— Conseq 97 | {x > 10} y := x {y > 0} 98 | 99 | Various __derived rules__ can be proved to be correct in terms of the standard 100 | rules. For example, we can derive bidirectional rules for `skip`, `:=`, and 101 | `while`: 102 | 103 | P → Q 104 | ———————————— Skip' 105 | {P} skip {Q} 106 | 107 | P → Q[a/x] 108 | —————————————— Assign' 109 | {P} x := a {Q} 110 | 111 | {P ∧ B} S {P} P ∧ ¬B → Q 112 | —————————————————————————— While' 113 | {P} while B do S {Q} 114 | 115 | 116 | ## A Semantic Approach to Hoare Logic 117 | 118 | We can, and will, define Hoare triples **semantically** in Lean. 119 | 120 | We will use predicates on states (`State → Prop`) to represent pre- and 121 | postconditions, following the shallow embedding style. -/ 122 | 123 | def PartialHoare (P : State → Prop) (S : Stmt) 124 | (Q : State → Prop) : Prop := 125 | ∀s t, P s → (S, s) ⟹ t → Q t 126 | 127 | macro "{*" P:term " *} " "(" S:term ")" " {* " Q:term " *}" : term => 128 | `(PartialHoare $P $S $Q) 129 | 130 | namespace PartialHoare 131 | 132 | theorem skip_intro {P} : 133 | {* P *} (Stmt.skip) {* P *} := 134 | by 135 | intro s t hs hst 136 | cases hst 137 | assumption 138 | 139 | theorem assign_intro (P) {x a} : 140 | {* fun s ↦ P (s[x ↦ a s]) *} (Stmt.assign x a) {* P *} := 141 | by 142 | intro s t P' hst 143 | cases hst with 144 | | assign => assumption 145 | 146 | theorem seq_intro {P Q R S T} (hS : {* P *} (S) {* Q *}) 147 | (hT : {* Q *} (T) {* R *}) : 148 | {* P *} (S; T) {* R *} := 149 | by 150 | intro s t hs hst 151 | cases hst with 152 | | seq _ _ _ u d hS' hT' => 153 | apply hT 154 | { apply hS 155 | { exact hs } 156 | { assumption } } 157 | { assumption } 158 | 159 | theorem if_intro {B P Q S T} 160 | (hS : {* fun s ↦ P s ∧ B s *} (S) {* Q *}) 161 | (hT : {* fun s ↦ P s ∧ ¬ B s *} (T) {* Q *}) : 162 | {* P *} (Stmt.ifThenElse B S T) {* Q *} := 163 | by 164 | intro s t hs hst 165 | cases hst with 166 | | if_true _ _ _ _ _ hB hS' => 167 | apply hS 168 | exact And.intro hs hB 169 | assumption 170 | | if_false _ _ _ _ _ hB hT' => 171 | apply hT 172 | exact And.intro hs hB 173 | assumption 174 | 175 | theorem while_intro (P) {B S} 176 | (h : {* fun s ↦ P s ∧ B s *} (S) {* P *}) : 177 | {* P *} (Stmt.whileDo B S) {* fun s ↦ P s ∧ ¬ B s *} := 178 | by 179 | intro s t hs hst 180 | generalize ws_eq : (Stmt.whileDo B S, s) = Ss 181 | rw [ws_eq] at hst 182 | induction hst generalizing s with 183 | | skip s' => cases ws_eq 184 | | assign x a s' => cases ws_eq 185 | | seq S T s' t' u hS hT ih => cases ws_eq 186 | | if_true B S T s' t' hB hS ih => cases ws_eq 187 | | if_false B S T s' t' hB hT ih => cases ws_eq 188 | | while_true B' S' s' t' u hB' hS hw ih_hS ih_hw => 189 | cases ws_eq 190 | apply ih_hw 191 | { apply h 192 | { apply And.intro <;> 193 | assumption } 194 | { exact hS } } 195 | { rfl } 196 | | while_false B' S' s' hB' => 197 | cases ws_eq 198 | aesop 199 | 200 | theorem consequence {P P' Q Q' S} 201 | (h : {* P *} (S) {* Q *}) (hp : ∀s, P' s → P s) 202 | (hq : ∀s, Q s → Q' s) : 203 | {* P' *} (S) {* Q' *} := 204 | fix s t : State 205 | assume hs : P' s 206 | assume hst : (S, s) ⟹ t 207 | show Q' t from 208 | hq _ (h s t (hp s hs) hst) 209 | 210 | theorem consequence_left (P') {P Q S} 211 | (h : {* P *} (S) {* Q *}) (hp : ∀s, P' s → P s) : 212 | {* P' *} (S) {* Q *} := 213 | consequence h hp (by aesop) 214 | 215 | theorem consequence_right (Q) {Q' P S} 216 | (h : {* P *} (S) {* Q *}) (hq : ∀s, Q s → Q' s) : 217 | {* P *} (S) {* Q' *} := 218 | consequence h (by aesop) hq 219 | 220 | theorem skip_intro' {P Q} (h : ∀s, P s → Q s) : 221 | {* P *} (Stmt.skip) {* Q *} := 222 | consequence skip_intro h (by aesop) 223 | 224 | theorem assign_intro' {P Q x a} 225 | (h : ∀s, P s → Q (s[x ↦ a s])): 226 | {* P *} (Stmt.assign x a) {* Q *} := 227 | consequence (assign_intro Q) h (by aesop) 228 | 229 | theorem seq_intro' {P Q R S T} (hT : {* Q *} (T) {* R *}) 230 | (hS : {* P *} (S) {* Q *}) : 231 | {* P *} (S; T) {* R *} := 232 | seq_intro hS hT 233 | 234 | theorem while_intro' {B P Q S} (I) 235 | (hS : {* fun s ↦ I s ∧ B s *} (S) {* I *}) 236 | (hP : ∀s, P s → I s) 237 | (hQ : ∀s, ¬ B s → I s → Q s) : 238 | {* P *} (Stmt.whileDo B S) {* Q *} := 239 | consequence (while_intro I hS) hP (by aesop) 240 | 241 | theorem assign_intro_forward (P) {x a} : 242 | {* P *} 243 | (Stmt.assign x a) 244 | {* fun s ↦ ∃n₀, P (s[x ↦ n₀]) ∧ s x = a (s[x ↦ n₀]) *} := 245 | by 246 | apply assign_intro' 247 | intro s hP 248 | apply Exists.intro (s x) 249 | simp [*] 250 | 251 | theorem assign_intro_backward (Q) {x a} : 252 | {* fun s ↦ ∃n', Q (s[x ↦ n']) ∧ n' = a s *} 253 | (Stmt.assign x a) 254 | {* Q *} := 255 | by 256 | apply assign_intro' 257 | intro s hP 258 | cases hP with 259 | | intro n' hQ => aesop 260 | 261 | end PartialHoare 262 | 263 | 264 | /- ## First Program: Exchanging Two Variables -/ 265 | 266 | def SWAP : Stmt := 267 | Stmt.assign "t" (fun s ↦ s "a"); 268 | Stmt.assign "a" (fun s ↦ s "b"); 269 | Stmt.assign "b" (fun s ↦ s "t") 270 | 271 | theorem SWAP_correct (a₀ b₀ : ℕ) : 272 | {* fun s ↦ s "a" = a₀ ∧ s "b" = b₀ *} 273 | (SWAP) 274 | {* fun s ↦ s "a" = b₀ ∧ s "b" = a₀ *} := 275 | by 276 | apply PartialHoare.seq_intro' 277 | apply PartialHoare.seq_intro' 278 | apply PartialHoare.assign_intro 279 | apply PartialHoare.assign_intro 280 | apply PartialHoare.assign_intro' 281 | aesop 282 | 283 | 284 | /- ## Second Program: Adding Two Numbers -/ 285 | 286 | def ADD : Stmt := 287 | Stmt.whileDo (fun s ↦ s "n" ≠ 0) 288 | (Stmt.assign "n" (fun s ↦ s "n" - 1); 289 | Stmt.assign "m" (fun s ↦ s "m" + 1)) 290 | 291 | theorem ADD_correct (n₀ m₀ : ℕ) : 292 | {* fun s ↦ s "n" = n₀ ∧ s "m" = m₀ *} 293 | (ADD) 294 | {* fun s ↦ s "n" = 0 ∧ s "m" = n₀ + m₀ *} := 295 | PartialHoare.while_intro' (fun s ↦ s "n" + s "m" = n₀ + m₀) 296 | (by 297 | apply PartialHoare.seq_intro' 298 | { apply PartialHoare.assign_intro } 299 | { apply PartialHoare.assign_intro' 300 | aesop }) 301 | (by aesop) 302 | (by aesop) 303 | 304 | /- How did we come up with this invariant? The invariant must 305 | 306 | 1. be true before we enter the loop; 307 | 308 | 2. remain true after each iteration of the loop if it was true before the 309 | iteration; 310 | 311 | 3. be strong enough to imply the desired loop postcondition. 312 | 313 | The invariant `True` meets 1 and 2 but usually not 3. Similarly, `False` meets 314 | 2 and 3 but usually not 1. Suitable invariants are often of the form 315 | 316 | __work done__ + __work remaining__ = __desired result__ 317 | 318 | where `+` is some suitable operator. When we enter the loop, __work done__ will 319 | often be `0`. And when we exit the loop, __work remaining__ should be `0`. 320 | 321 | For the `ADD` loop: 322 | 323 | * __work done__ is `m`; 324 | * __work remaining__ is `n`; 325 | * __desired result__ is `n₀ + m₀`. 326 | 327 | 328 | ## A Verification Condition Generator 329 | 330 | __Verification condition generators__ (VCGs) are programs that apply Hoare rules 331 | automatically, producing __verification conditions__ that must be proved by the 332 | user. The user must usually also provide strong enough loop invariants, as an 333 | annotation in their programs. 334 | 335 | We can use Lean's metaprogramming framework to define a simple VCG. 336 | 337 | Hundreds of program verification tools are based on these principles. 338 | 339 | VCGs typically work backwards from the postcondition, using backward rules 340 | (rules stated to have an arbitrary `Q` as their postcondition). This works well 341 | because `Assign` is backward. -/ 342 | 343 | def Stmt.invWhileDo (I B : State → Prop) (S : Stmt) : Stmt := 344 | Stmt.whileDo B S 345 | 346 | namespace PartialHoare 347 | 348 | theorem invWhile_intro {B I Q S} 349 | (hS : {* fun s ↦ I s ∧ B s *} (S) {* I *}) 350 | (hQ : ∀s, ¬ B s → I s → Q s) : 351 | {* I *} (Stmt.invWhileDo I B S) {* Q *} := 352 | while_intro' I hS (by aesop) hQ 353 | 354 | theorem invWhile_intro' {B I P Q S} 355 | (hS : {* fun s ↦ I s ∧ B s *} (S) {* I *}) 356 | (hP : ∀s, P s → I s) (hQ : ∀s, ¬ B s → I s → Q s) : 357 | {* P *} (Stmt.invWhileDo I B S) {* Q *} := 358 | while_intro' I hS hP hQ 359 | 360 | end PartialHoare 361 | 362 | def matchPartialHoare : Expr → Option (Expr × Expr × Expr) 363 | | (Expr.app (Expr.app (Expr.app 364 | (Expr.const ``PartialHoare _) P) S) Q) => 365 | Option.some (P, S, Q) 366 | | _ => 367 | Option.none 368 | 369 | partial def vcg : TacticM Unit := 370 | do 371 | let goals ← getUnsolvedGoals 372 | if goals.length != 0 then 373 | let target ← getMainTarget 374 | match matchPartialHoare target with 375 | | Option.none => return 376 | | Option.some (P, S, Q) => 377 | if Expr.isAppOfArity S ``Stmt.skip 0 then 378 | if Expr.isMVar P then 379 | applyConstant ``PartialHoare.skip_intro 380 | else 381 | applyConstant ``PartialHoare.skip_intro' 382 | else if Expr.isAppOfArity S ``Stmt.assign 2 then 383 | if Expr.isMVar P then 384 | applyConstant ``PartialHoare.assign_intro 385 | else 386 | applyConstant ``PartialHoare.assign_intro' 387 | else if Expr.isAppOfArity S ``Stmt.seq 2 then 388 | andThenOnSubgoals 389 | (applyConstant ``PartialHoare.seq_intro') vcg 390 | else if Expr.isAppOfArity S ``Stmt.ifThenElse 3 then 391 | andThenOnSubgoals 392 | (applyConstant ``PartialHoare.if_intro) vcg 393 | else if Expr.isAppOfArity S ``Stmt.invWhileDo 3 then 394 | if Expr.isMVar P then 395 | andThenOnSubgoals 396 | (applyConstant ``PartialHoare.invWhile_intro) vcg 397 | else 398 | andThenOnSubgoals 399 | (applyConstant ``PartialHoare.invWhile_intro') 400 | vcg 401 | else 402 | failure 403 | 404 | elab "vcg" : tactic => 405 | vcg 406 | 407 | 408 | /- ## Second Program Revisited: Adding Two Numbers -/ 409 | 410 | theorem ADD_correct_vcg (n₀ m₀ : ℕ) : 411 | {* fun s ↦ s "n" = n₀ ∧ s "m" = m₀ *} 412 | (ADD) 413 | {* fun s ↦ s "n" = 0 ∧ s "m" = n₀ + m₀ *} := 414 | show {* fun s ↦ s "n" = n₀ ∧ s "m" = m₀ *} 415 | (Stmt.invWhileDo (fun s ↦ s "n" + s "m" = n₀ + m₀) 416 | (fun s ↦ s "n" ≠ 0) 417 | (Stmt.assign "n" (fun s ↦ s "n" - 1); 418 | Stmt.assign "m" (fun s ↦ s "m" + 1))) 419 | {* fun s ↦ s "n" = 0 ∧ s "m" = n₀ + m₀ *} from 420 | by 421 | vcg <;> 422 | aesop 423 | 424 | 425 | /- ## Hoare Triples for Total Correctness 426 | 427 | __Total correctness__ asserts that the program not only is partially correct but 428 | also that it always terminates normally. Hoare triples for total correctness 429 | have the form 430 | 431 | [P] S [Q] 432 | 433 | Intended meaning: 434 | 435 | If `P` holds before `S` is executed, the execution terminates normally and 436 | `Q` holds in the final state. 437 | 438 | For deterministic programs, an equivalent formulation is as follows: 439 | 440 | If `P` holds before `S` is executed, there exists a state in which execution 441 | terminates normally and `Q` holds in that state. 442 | 443 | Example: 444 | 445 | `[i ≤ 100] while i ≠ 100 do i := i + 1 [i = 100]` 446 | 447 | In our WHILE language, this only affects while loops, which must now be 448 | annotated by a __variant__ `V` (a natural number that decreases with each 449 | iteration): 450 | 451 | [I ∧ B ∧ V = v₀] S [I ∧ V < v₀] 452 | ——————————————————————————————— While-Var 453 | [I] while B do S [I ∧ ¬B] 454 | 455 | What is a suitable variant for the example above? -/ 456 | 457 | end LoVe 458 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe10_HoareLogic_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe10_HoareLogic_Demo 5 | 6 | 7 | /- # LoVe Exercise 10: Hoare Logic 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Program Verification 19 | 20 | 1.1. The following WHILE program takes two numbers `a` and `b` and increments 21 | `b` until it reaches `a`: -/ 22 | 23 | def COUNT_UP : Stmt := 24 | Stmt.whileDo (fun s ↦ s "b" ≠ s "a") 25 | (Stmt.assign "b" (fun s ↦ s "b" + 1)) 26 | 27 | /- Prove the following Hoare triple. The main difficulty is to figure out which 28 | invariant to use for the while loop. The invariant should capture both the work 29 | that has been done already (the intermediate result) and the work that remains 30 | to be done. Use a `show` command to annotate the program with a loop invariant. 31 | 32 | Hint: If a variable `x` does not change in a program, it might be useful to 33 | record this in the invariant, by adding a conjunct `s "x" = x₀`. -/ 34 | 35 | theorem COUNT_UP_correct (a₀ : ℕ) : 36 | {* fun s ↦ s "a" = a₀ *} (COUNT_UP) {* fun s ↦ s "a" = a₀ ∧ s "b" = a₀ *} := 37 | sorry 38 | 39 | /- 1.2. What happens if the program is run with `b > a`? How is this captured 40 | by the Hoare triple? -/ 41 | 42 | -- enter your solution here 43 | 44 | /- 1.3. The following WHILE program is intended to compute the Gaussian sum up 45 | to `n`, leaving the result in `r`. -/ 46 | 47 | def GAUSS (N : ℕ) : Stmt := 48 | Stmt.assign "r" (fun s ↦ 0); 49 | Stmt.assign "n" (fun s ↦ 0); 50 | Stmt.whileDo (fun s ↦ s "n" ≠ N) 51 | (Stmt.assign "n" (fun s ↦ s "n" + 1); 52 | Stmt.assign "r" (fun s ↦ s "r" + s "n")) 53 | 54 | /- Here is a functional implementation of the same function: -/ 55 | 56 | def sumUpTo : ℕ → ℕ 57 | | 0 => 0 58 | | n + 1 => n + 1 + sumUpTo n 59 | 60 | /- Invoke `vcg` on `GAUSS` using a suitable loop invariant and prove the 61 | emerging verification conditions. -/ 62 | 63 | theorem GAUSS_correct (N : ℕ) : 64 | {* fun s ↦ True *} (GAUSS N) {* fun s ↦ s "r" = sumUpTo N *} := 65 | sorry 66 | 67 | /- 1.4 (**optional**). The following program `MUL` is intended to compute the 68 | product of `n` and `m`, leaving the result in `r`. Invoke `vcg` on `MUL` using a 69 | suitable loop invariant and prove the emerging verification conditions. -/ 70 | 71 | def MUL : Stmt := 72 | Stmt.assign "r" (fun s ↦ 0); 73 | Stmt.whileDo (fun s ↦ s "n" ≠ 0) 74 | (Stmt.assign "r" (fun s ↦ s "r" + s "m"); 75 | Stmt.assign "n" (fun s ↦ s "n" - 1)) 76 | 77 | theorem MUL_correct (n₀ m₀ : ℕ) : 78 | {* fun s ↦ s "n" = n₀ ∧ s "m" = m₀ *} (MUL) {* fun s ↦ s "r" = n₀ * m₀ *} := 79 | sorry 80 | 81 | 82 | /- ## Question 2: Hoare Triples for Total Correctness 83 | 84 | The following definition captures Hoare triples for total correctness for 85 | deterministic languages: -/ 86 | 87 | def TotalHoare (P : State → Prop) (S : Stmt) (Q : State → Prop) : Prop := 88 | ∀s, P s → ∃t, (S, s) ⟹ t ∧ Q t 89 | 90 | macro "[*" P:term " *] " "(" S:term ")" " [* " Q:term " *]" : term => 91 | `(TotalHoare $P $S $Q) 92 | 93 | namespace TotalHoare 94 | 95 | /- 2.1. Prove the consequence rule. -/ 96 | 97 | theorem consequence {P P' Q Q' S} 98 | (hS : [* P *] (S) [* Q *]) (hP : ∀s, P' s → P s) (hQ : ∀s, Q s → Q' s) : 99 | [* P' *] (S) [* Q' *] := 100 | sorry 101 | 102 | /- 2.2. Prove the rule for `skip`. -/ 103 | 104 | theorem skip_intro {P} : 105 | [* P *] (Stmt.skip) [* P *] := 106 | sorry 107 | 108 | /- 2.3. Prove the rule for `assign`. -/ 109 | 110 | theorem assign_intro {P x a} : 111 | [* fun s ↦ P (s[x ↦ a s]) *] (Stmt.assign x a) [* P *] := 112 | sorry 113 | 114 | /- 2.4. Prove the rule for `seq`. -/ 115 | 116 | theorem seq_intro {P Q R S T} (hS : [* P *] (S) [* Q *]) 117 | (hT : [* Q *] (T) [* R *]) : 118 | [* P *] (S; T) [* R *] := 119 | sorry 120 | 121 | /- 2.5. Complete the proof of the rule for `if`–`then`–`else`. 122 | 123 | Hint: The proof requires a case distinction on the truth value of `B s`. -/ 124 | 125 | theorem if_intro {B P Q S T} 126 | (hS : [* fun s ↦ P s ∧ B s *] (S) [* Q *]) 127 | (hT : [* fun s ↦ P s ∧ ¬ B s *] (T) [* Q *]) : 128 | [* P *] (Stmt.ifThenElse B S T) [* Q *] := 129 | sorry 130 | 131 | /- 2.6 (**optional**). Try to prove the rule for `while`. 132 | 133 | The rule is parameterized by a loop invariant `I` and by a variant `V` that 134 | decreases with each iteration of the loop body. 135 | 136 | Before we prove the desired theorem, we introduce an auxiliary theorem. Its 137 | proof requires induction by pattern matching and recursion. When using 138 | `var_while_intro_aux` as induction hypothesis we recommend to do it directly 139 | after proving that the argument is less than `v₀`: 140 | 141 | have ih : ∃u, (stmt.while b S, t) ⟹ u ∧ I u ∧ ¬ b u := 142 | have _ : V t < v₀ := 143 | … 144 | var_while_intro_aux I V h_inv (V t) … 145 | 146 | Similarly to `if`--`then`--`else`, the proof requires a case distinction on the 147 | truth value of `B s`. -/ 148 | 149 | theorem var_while_intro_aux {B} (I : State → Prop) (V : State → ℕ) {S} 150 | (h_inv : ∀v₀, 151 | [* fun s ↦ I s ∧ B s ∧ V s = v₀ *] (S) [* fun s ↦ I s ∧ V s < v₀ *]) : 152 | ∀v₀ s, V s = v₀ → I s → ∃t, (Stmt.whileDo B S, s) ⟹ t ∧ I t ∧ ¬ B t 153 | | v₀, s, V_eq, hs => 154 | sorry 155 | 156 | theorem var_while_intro {B} (I : State → Prop) (V : State → ℕ) {S} 157 | (hinv : ∀v₀, 158 | [* fun s ↦ I s ∧ B s ∧ V s = v₀ *] (S) [* fun s ↦ I s ∧ V s < v₀ *]) : 159 | [* I *] (Stmt.whileDo B S) [* fun s ↦ I s ∧ ¬ B s *] := 160 | sorry 161 | 162 | end TotalHoare 163 | 164 | end LoVe 165 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe10_HoareLogic_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe09_OperationalSemantics_ExerciseSheet 5 | import LoVe.LoVe10_HoareLogic_Demo 6 | 7 | 8 | /- # LoVe Homework 10 (10 points + 1 bonus point): Hoare Logic 9 | 10 | Homework must be done individually. 11 | 12 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 13 | 14 | 15 | set_option autoImplicit false 16 | set_option tactic.hygienic false 17 | 18 | namespace LoVe 19 | 20 | 21 | /- ## Question 1 (5 points): Factorial 22 | 23 | The following WHILE program is intended to compute the factorial of `n₀`, leaving 24 | the result in `r`. -/ 25 | 26 | def FACT : Stmt := 27 | Stmt.assign "i" (fun s ↦ 0); 28 | Stmt.assign "r" (fun s ↦ 1); 29 | Stmt.whileDo (fun s ↦ s "i" ≠ s "n") 30 | (Stmt.assign "i" (fun s ↦ s "i" + 1); 31 | Stmt.assign "r" (fun s ↦ s "r" * s "i")) 32 | 33 | /- Recall the definition of the `fact` function: -/ 34 | 35 | #print fact 36 | 37 | /- Let us register its recursive equations as simplification rules to 38 | strengthen the simplifier and `aesop`, using some new Lean syntax: -/ 39 | 40 | attribute [simp] fact 41 | 42 | /- Prove the correctness of `FACT` using `vcg`. 43 | 44 | Hint: Remember to strengthen the loop invariant with `s "n" = n₀` to 45 | capture the fact that the variable `n` does not change. -/ 46 | 47 | theorem FACT_correct (n₀ : ℕ) : 48 | {* fun s ↦ s "n" = n₀ *} (FACT) {* fun s ↦ s "r" = fact n₀ *} := 49 | sorry 50 | 51 | 52 | /- ## Question 2 (5 points + 1 bonus point): 53 | ## Hoare Logic for the Guarded Command Language 54 | 55 | Recall the definition of GCL from exercise 9: -/ 56 | 57 | namespace GCL 58 | 59 | #check Stmt 60 | #check BigStep 61 | 62 | /- The definition of Hoare triples for partial correctness is unsurprising: -/ 63 | 64 | def PartialHoare (P : State → Prop) (S : Stmt) (Q : State → Prop) : Prop := 65 | ∀s t, P s → (S, s) ⟹ t → Q t 66 | 67 | macro (priority := high) "{*" P:term " *} " "(" S:term ")" " {* " Q:term " *}" : 68 | term => 69 | `(PartialHoare $P $S $Q) 70 | 71 | namespace PartialHoare 72 | 73 | /- 2.1 (5 points). Prove the following Hoare rules: -/ 74 | 75 | theorem consequence {P P' Q Q' S} (h : {* P *} (S) {* Q *}) 76 | (hp : ∀s, P' s → P s) (hq : ∀s, Q s → Q' s) : 77 | {* P' *} (S) {* Q' *} := 78 | sorry 79 | 80 | theorem assign_intro {P x a} : 81 | {* fun s ↦ P (s[x ↦ a s]) *} (Stmt.assign x a) {* P *} := 82 | sorry 83 | 84 | theorem assert_intro {P Q : State → Prop} : 85 | {* fun s ↦ Q s → P s *} (Stmt.assert Q) {* P *} := 86 | sorry 87 | 88 | theorem seq_intro {P Q R S T} 89 | (hS : {* P *} (S) {* Q *}) (hT : {* Q *} (T) {* R *}) : 90 | {* P *} (Stmt.seq S T) {* R *} := 91 | sorry 92 | 93 | theorem choice_intro {P Q Ss} 94 | (h : ∀i (hi : i < List.length Ss), {* P *} (Ss[i]'hi) {* Q *}) : 95 | {* P *} (Stmt.choice Ss) {* Q *} := 96 | sorry 97 | 98 | /- 2.2 (1 bonus point). Prove the rule for `loop`. Notice the similarity with 99 | the rule for `while` in the WHILE language. -/ 100 | 101 | theorem loop_intro {P S} (h : {* P *} (S) {* P *}) : 102 | {* P *} (Stmt.loop S) {* P *} := 103 | sorry 104 | 105 | end PartialHoare 106 | 107 | end GCL 108 | 109 | end LoVe 110 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe11_DenotationalSemantics_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe11_DenotationalSemantics_Demo 5 | 6 | 7 | /- # LoVe Exercise 11: Denotational Semantics 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Monotonicity 19 | 20 | 1.1. Prove the following theorem from the lecture. -/ 21 | 22 | theorem Monotone_restrict {α β : Type} [PartialOrder α] (f : α → Set (β × β)) 23 | (p : β → Prop) (hf : Monotone f) : 24 | Monotone (fun a ↦ f a ⇃ p) := 25 | sorry 26 | 27 | /- 1.2. Prove its cousin. -/ 28 | 29 | theorem Monotone_comp {α β : Type} [PartialOrder α] (f g : α → Set (β × β)) 30 | (hf : Monotone f) (hg : Monotone g) : 31 | Monotone (fun a ↦ f a ◯ g a) := 32 | sorry 33 | 34 | 35 | /- ## Question 2: Regular Expressions 36 | 37 | __Regular expressions__, or __regexes__, are a highly popular tool for software 38 | development, to analyze textual inputs. Regexes are generated by the following 39 | grammar: 40 | 41 | R ::= ∅ 42 | | ε 43 | | a 44 | | R ⬝ R 45 | | R + R 46 | | R* 47 | 48 | Informally, the semantics of regular expressions is as follows: 49 | 50 | * `∅` accepts nothing; 51 | * `ε` accepts the empty string; 52 | * `a` accepts the atom `a`; 53 | * `R ⬝ R` accepts the concatenation of two regexes; 54 | * `R + R` accepts either of two regexes; 55 | * `R*` accepts arbitrary many repetitions of a regex. 56 | 57 | Notice the rough correspondence with a WHILE language: 58 | 59 | `∅` ~ diverging statement (e.g., `while true do skip`) 60 | `ε` ~ `skip` 61 | `a` ~ `:=` 62 | `⬝` ~ `;` 63 | `+` ~ `if then else` 64 | `*` ~ `while` loop -/ 65 | 66 | inductive Regex (α : Type) : Type 67 | | nothing : Regex α 68 | | empty : Regex α 69 | | atom : α → Regex α 70 | | concat : Regex α → Regex α → Regex α 71 | | alt : Regex α → Regex α → Regex α 72 | | star : Regex α → Regex α 73 | 74 | /- In this exercise, we explore an alternative semantics of regular 75 | expressions. Namely, we can imagine that the atoms represent binary relations, 76 | instead of letters or symbols. Concatenation corresponds to composition of 77 | relations, and alternation is union. Mathematically, regexes and binary 78 | relations are both instances of Kleene algebras. 79 | 80 | 2.1. Complete the following translation of regular expressions to relations. 81 | 82 | Hint: Exploit the correspondence with the WHILE language. -/ 83 | 84 | def rel_of_Regex {α : Type} : Regex (Set (α × α)) → Set (α × α) 85 | | Regex.nothing => ∅ 86 | | Regex.empty => Id 87 | -- enter the missing cases here 88 | 89 | /- 2.2. Prove the following recursive equation about your definition. -/ 90 | 91 | theorem rel_of_Regex_Star {α : Type} (r : Regex (Set (α × α))) : 92 | rel_of_Regex (Regex.star r) = 93 | rel_of_Regex (Regex.alt (Regex.concat r (Regex.star r)) Regex.empty) := 94 | sorry 95 | 96 | end LoVe 97 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe11_DenotationalSemantics_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe11_DenotationalSemantics_Demo 5 | 6 | 7 | /- # LoVe Homework 11 (10 points + 2 bonus points): Denotational Semantics 8 | 9 | Homework must be done individually. 10 | 11 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 12 | 13 | 14 | set_option autoImplicit false 15 | set_option tactic.hygienic false 16 | 17 | namespace LoVe 18 | 19 | /- The following command enables noncomputable decidability on every `Prop`. 20 | The `0` argument ensures this is used only when necessary; otherwise, it would 21 | make some computable definitions noncomputable for Lean. Depending on how you 22 | solve question 2.2, this command might help you. -/ 23 | 24 | attribute [instance 0] Classical.propDecidable 25 | 26 | /- Denotational semantics are well suited to functional programming. In this 27 | exercise, we will study some representations of functional programs in Lean and 28 | their denotational semantics. 29 | 30 | The `Nondet` type represents functional programs that can perform 31 | nondeterministic computations: A program can choose between many different 32 | computation paths / return values. Returning no results at all is represented 33 | by `fail`, and nondeterministic choice between two options, identified by the 34 | `Bool` values `true` and `false`, is represented by `choice`. -/ 35 | 36 | inductive Nondet (α : Type) : Type 37 | | just : α → Nondet α 38 | | fail : Nondet α 39 | | choice : (Bool → Nondet α) → Nondet α 40 | 41 | namespace Nondet 42 | 43 | 44 | /- ## Question 1 (5 points + 1 bonus point): The `Nondet` Monad 45 | 46 | The `Nondet` inductive type forms a monad. The `pure` operator is `Nondet.just`. 47 | `bind` is as follows: -/ 48 | 49 | def bind {α β : Type} : Nondet α → (α → Nondet β) → Nondet β 50 | | just a, f => f a 51 | | fail, f => fail 52 | | choice k, f => choice (fun b ↦ bind (k b) f) 53 | 54 | instance : Pure Nondet := 55 | { pure := just } 56 | 57 | instance : Bind Nondet := 58 | { bind := bind } 59 | 60 | /- 1.1 (5 points). Prove the three monad laws for `Nondet`. 61 | 62 | Hints: 63 | 64 | * To unfold the definition of `pure` and `>>=`, invoke 65 | `simp [Bind.bind, Pure.pure]`. 66 | 67 | * To reduce `f = g` to `∀x, f x = g x`, use the theorem `funext`. -/ 68 | 69 | theorem pure_bind {α β : Type} (a : α) (f : α → Nondet β) : 70 | pure a >>= f = f a := 71 | sorry 72 | 73 | theorem bind_pure {α : Type} : 74 | ∀na : Nondet α, na >>= pure = na := 75 | sorry 76 | 77 | theorem bind_assoc {α β γ : Type} : 78 | ∀(na : Nondet α) (f : α → Nondet β) (g : β → Nondet γ), 79 | ((na >>= f) >>= g) = (na >>= (fun a ↦ f a >>= g)) := 80 | sorry 81 | 82 | /- The function `portmanteau` computes a portmanteau of two lists: A 83 | portmanteau of `xs` and `ys` has `xs` as a prefix and `ys` as a suffix, and they 84 | overlap. We use `startsWith xs ys` to test that `ys` has `xs` as a prefix. -/ 85 | 86 | def startsWith : List ℕ → List ℕ → Bool 87 | | x :: xs, [] => false 88 | | [], ys => true 89 | | x :: xs, y :: ys => x = y && startsWith xs ys 90 | 91 | #eval startsWith [1, 2] [1, 2, 3] 92 | #eval startsWith [1, 2, 3] [1, 2] 93 | 94 | def portmanteau : List ℕ → List ℕ → List (List ℕ) 95 | | [], ys => [] 96 | | x :: xs, ys => 97 | List.map (List.cons x) (portmanteau xs ys) ++ 98 | (if startsWith (x :: xs) ys then [ys] else []) 99 | 100 | /- Here are some examples of portmanteaux: -/ 101 | 102 | #eval portmanteau [0, 1, 2, 3] [2, 3, 4] 103 | #eval portmanteau [0, 1] [2, 3, 4] 104 | #eval portmanteau [0, 1, 2, 1, 2] [1, 2, 1, 2, 3, 4] 105 | 106 | /- 1.2 (1 bonus point). Translate the `portmanteau` program from the `List` 107 | monad to the `Nondet` monad. -/ 108 | 109 | def nondetPortmanteau : List ℕ → List ℕ → Nondet (List ℕ) := 110 | sorry 111 | 112 | 113 | /- ## Question 2 (5 points + 1 bonus point): Nondeterminism, Denotationally 114 | 115 | 2.1 (2 points). Give a denotational semantics for `Nondet`, mapping it into a 116 | `List` of all results. `pure` returns one result, `fail` returns zero, and 117 | `choice` combines the results of either option. -/ 118 | 119 | def listSem {α : Type} : Nondet α → List α := 120 | sorry 121 | 122 | /- Check that the following lines give the same output as for `portmanteau` (if 123 | you have answered question 1.2): -/ 124 | 125 | #reduce listSem (nondetPortmanteau [0, 1, 2, 3] [2, 3, 4]) 126 | #reduce listSem (nondetPortmanteau [0, 1] [2, 3, 4]) 127 | #reduce listSem (nondetPortmanteau [0, 1, 2, 1, 2] [1, 2, 1, 2, 3, 4]) 128 | 129 | /- 2.2 (3 points). Often, we are not interested in getting all outcomes, just 130 | the first successful one. Give a semantics for `Nondet` that produces the first 131 | successful result, if any. Your solution should *not* use `listSem`. -/ 132 | 133 | noncomputable def optionSem {α : Type} : Nondet α → Option α := 134 | sorry 135 | 136 | /- 2.3 (1 bonus point). Prove the theorem `List_Option_compat` below, showing 137 | that the two semantics you defined are compatible. 138 | 139 | `List.head?` returns the head of a list wrapped in an `Option.some`, or 140 | `Option.none` for an empty list. It corresponds to the function we called 141 | `headOpt` in lecture 5. -/ 142 | 143 | theorem List_Option_compat {α : Type} : 144 | ∀na : Nondet α, optionSem na = List.head? (listSem na) := 145 | sorry 146 | 147 | end Nondet 148 | 149 | end LoVe 150 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe12_LogicalFoundationsOfMathematics_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe12_LogicalFoundationsOfMathematics_Demo 5 | 6 | 7 | /- # LoVe Exercise 12: Logical Foundations of Mathematics 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Vectors as Subtypes 19 | 20 | Recall the definition of vectors from the demo: -/ 21 | 22 | #check Vector 23 | 24 | /- The following function adds two lists of integers elementwise. If one 25 | function is longer than the other, the tail of the longer list is ignored. -/ 26 | 27 | def List.add : List ℤ → List ℤ → List ℤ 28 | | [], [] => [] 29 | | x :: xs, y :: ys => (x + y) :: List.add xs ys 30 | | [], y :: ys => [] 31 | | x :: xs, [] => [] 32 | 33 | /- 1.1. Show that if the lists have the same length, the resulting list also 34 | has that length. -/ 35 | 36 | theorem List.length_add : 37 | ∀xs ys, List.length xs = List.length ys → 38 | List.length (List.add xs ys) = List.length xs 39 | | [], [] => 40 | sorry 41 | | x :: xs, y :: ys => 42 | sorry 43 | | [], y :: ys => 44 | sorry 45 | | x :: xs, [] => 46 | sorry 47 | 48 | /- 1.2. Define componentwise addition on vectors using `List.add` and 49 | `List.length_add`. -/ 50 | 51 | def Vector.add {n : ℕ} : Vector ℤ n → Vector ℤ n → Vector ℤ n := 52 | sorry 53 | 54 | /- 1.3. Show that `List.add` and `Vector.add` are commutative. -/ 55 | 56 | theorem List.add.comm : 57 | ∀xs ys, List.add xs ys = List.add ys xs := 58 | sorry 59 | 60 | theorem Vector.add.comm {n : ℕ} (u v : Vector ℤ n) : 61 | Vector.add u v = Vector.add v u := 62 | sorry 63 | 64 | 65 | /- ## Question 2: Integers as Quotients 66 | 67 | Recall the construction of integers from the lecture, not to be confused with 68 | Lean's predefined type `Int` (= `ℤ`): -/ 69 | 70 | #check Int.Setoid 71 | #check Int.Setoid_Iff 72 | #check Int 73 | 74 | /- 2.1. Define negation on these integers. Observe that if `(p, n)` represents 75 | an integer, then `(n, p)` represents its negation. -/ 76 | 77 | def Int.neg : Int → Int := 78 | sorry 79 | 80 | /- 2.2. Prove the following theorems about negation. -/ 81 | 82 | theorem Int.neg_eq (p n : ℕ) : 83 | Int.neg ⟦(p, n)⟧ = ⟦(n, p)⟧ := 84 | sorry 85 | 86 | theorem int.neg_neg (a : Int) : 87 | Int.neg (Int.neg a) = a := 88 | sorry 89 | 90 | end LoVe 91 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe12_LogicalFoundationsOfMathematics_HomeworkSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe06_InductivePredicates_Demo 5 | 6 | 7 | /- # LoVe Homework 12 (10 points + 2 bonus points): 8 | # Logical Foundations of Mathematics 9 | 10 | Homework must be done individually. 11 | 12 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 13 | 14 | 15 | set_option autoImplicit false 16 | set_option tactic.hygienic false 17 | 18 | namespace LoVe 19 | 20 | 21 | /- ## Question 1 (8 points): Even Numbers as a Subtype 22 | 23 | Usually, the most convenient way to represent even natural numbers is to use the 24 | larger type `ℕ`, which also includes the odd natural numbers. If we want to 25 | quantify only over even numbers `n`, we can add an assumption `Even n` to our 26 | theorem statement. 27 | 28 | An alternative is to encode evenness in the type, using a subtype. We will 29 | explore this approach. 30 | 31 | 1.1 (1 point). Define the type `Eveℕ` of even natural numbers, using the `Even` 32 | predicate introduced in the lecture 5 demo. -/ 33 | 34 | #print Even 35 | 36 | def Eveℕ : Type := 37 | sorry 38 | 39 | /- 1.2 (1 point). Prove the following theorem about the `Even` predicate. You will 40 | need it to answer question 1.3. 41 | 42 | Hint: The theorems `add_assoc` and `add_comm` might be useful. -/ 43 | 44 | theorem Even.add {m n : ℕ} (hm : Even m) (hn : Even n) : 45 | Even (m + n) := 46 | sorry 47 | 48 | /- 1.3 (2 points). Define zero and addition of even numbers by filling in the 49 | `sorry` placeholders. -/ 50 | 51 | def Eveℕ.zero : Eveℕ := 52 | sorry 53 | 54 | def Eveℕ.add (m n : Eveℕ) : Eveℕ := 55 | sorry 56 | 57 | /- 1.4 (4 points). Prove that addition of even numbers is commutative and 58 | associative, and has 0 as an identity element. -/ 59 | 60 | theorem Eveℕ.add_comm (m n : Eveℕ) : 61 | Eveℕ.add m n = Eveℕ.add n m := 62 | sorry 63 | 64 | theorem Eveℕ.add_assoc (l m n : Eveℕ) : 65 | Eveℕ.add (Eveℕ.add l m) n = Eveℕ.add l (Eveℕ.add m n) := 66 | sorry 67 | 68 | theorem Eveℕ.add_iden_left (n : Eveℕ) : 69 | Eveℕ.add Eveℕ.zero n = n := 70 | sorry 71 | 72 | theorem Eveℕ.add_iden_right (n : Eveℕ) : 73 | Eveℕ.add n Eveℕ.zero = n := 74 | sorry 75 | 76 | 77 | /- ## Question 2 (2 points + 2 bonus points): Hilbert Choice 78 | 79 | 2.1 (2 bonus points). Prove the following theorem. 80 | 81 | Hints: 82 | 83 | * A good way to start is to make a case distinction on whether `∃n, f n < x` 84 | is true or false. 85 | 86 | * The theorem `le_of_not_gt` might be useful. -/ 87 | 88 | theorem exists_minimal_arg_helper (f : ℕ → ℕ) : 89 | ∀x m, f m = x → ∃n, ∀i, f n ≤ f i 90 | | x, m, eq => 91 | by 92 | sorry, sorry 93 | 94 | /- Now this interesting theorem falls off: -/ 95 | 96 | theorem exists_minimal_arg (f : ℕ → ℕ) : 97 | ∃n : ℕ, ∀i : ℕ, f n ≤ f i := 98 | exists_minimal_arg_helper f _ 0 (by rfl) 99 | 100 | /- 2.2 (1 point). Use what you learned about Hilbert choice in the lecture to 101 | define the following function, which returns the (or an) index of the minimal 102 | element in `f`'s image. -/ 103 | 104 | noncomputable def minimal_arg (f : ℕ → ℕ) : ℕ := 105 | sorry 106 | 107 | /- 2.3 (1 point). Prove the following characteristic theorem about your 108 | definition. -/ 109 | 110 | theorem minimal_arg_spec (f : ℕ → ℕ) : 111 | ∀i : ℕ, f (minimal_arg f) ≤ f i := 112 | sorry 113 | 114 | end LoVe 115 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe13_BasicMathematicalStructures_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe13_BasicMathematicalStructures_Demo 5 | 6 | 7 | /- # LoVe Exercise 13: Basic Mathematical Structures 8 | 9 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 10 | 11 | 12 | set_option autoImplicit false 13 | set_option tactic.hygienic false 14 | 15 | namespace LoVe 16 | 17 | 18 | /- ## Question 1: Type Classes 19 | 20 | Recall the inductive type `Tree` we introduced in lecture 5: -/ 21 | 22 | #check Tree 23 | 24 | /- The following function takes two trees and attaches copies of the second 25 | tree to each leaf of the first tree. -/ 26 | 27 | def Tree.graft {α : Type} : Tree α → Tree α → Tree α 28 | | Tree.nil, u => u 29 | | Tree.node a l r, u => 30 | Tree.node a (Tree.graft l u) (Tree.graft r u) 31 | 32 | #reduce Tree.graft (Tree.node 1 Tree.nil Tree.nil) 33 | (Tree.node 2 Tree.nil Tree.nil) 34 | 35 | /- 1.1. Prove the following two theorems by structural induction on `t`. -/ 36 | 37 | theorem Tree.graft_assoc {α : Type} (t u v : Tree α) : 38 | Tree.graft (Tree.graft t u) v = Tree.graft t (Tree.graft u v) := 39 | sorry 40 | 41 | theorem Tree.graft_nil {α : Type} (t : Tree α) : 42 | Tree.graft t Tree.nil = t := 43 | sorry 44 | 45 | /- 1.2. Declare `Tree` an instance of `AddMonoid` using `graft` as the 46 | addition operator. -/ 47 | 48 | #print AddMonoid 49 | 50 | instance Tree.AddMonoid {α : Type} : AddMonoid (Tree α) := 51 | { add := 52 | sorry 53 | add_assoc := 54 | sorry 55 | zero := 56 | sorry 57 | add_zero := 58 | sorry 59 | zero_add := 60 | sorry 61 | } 62 | 63 | /- 1.3 (**optional**). Explain why `Tree` with `graft` as addition cannot be 64 | declared an instance of `AddGroup`. -/ 65 | 66 | #print AddGroup 67 | 68 | -- enter your explanation here 69 | 70 | /- 1.4 (**optional**). Prove the following theorem illustrating why `Tree` 71 | with `graft` as addition does not constitute an `AddGroup`. -/ 72 | 73 | theorem Tree.add_left_neg_counterexample : 74 | ∃x : Tree ℕ, ∀y : Tree ℕ, Tree.graft y x ≠ Tree.nil := 75 | sorry 76 | 77 | 78 | /- ## Question 2: Multisets and Finsets 79 | 80 | Recall the following definitions from the lecture: -/ 81 | 82 | #check Finset.elems 83 | #check List.elems 84 | 85 | /- 2.1. Prove that the finite set of nodes does not change when mirroring a 86 | tree. -/ 87 | 88 | theorem Finset.elems_mirror (t : Tree ℕ) : 89 | Finset.elems (mirror t) = Finset.elems t := 90 | sorry 91 | 92 | /- 2.2. Show that this does not hold for the list of nodes by providing a 93 | tree `t` for which `List.elems t ≠ List.elems (mirror t)`. 94 | 95 | If you define a suitable counterexample, the proof below will succeed. -/ 96 | 97 | def rottenTree : Tree ℕ := 98 | sorry 99 | 100 | #eval List.elems rottenTree 101 | #eval List.elems (mirror rottenTree) 102 | 103 | theorem List.elems_mirror_counterexample : 104 | ∃t : Tree ℕ, List.elems t ≠ List.elems (mirror t) := 105 | by 106 | apply Exists.intro rottenTree 107 | simp [List.elems] 108 | 109 | end LoVe 110 | -------------------------------------------------------------------------------- /lean/LoVe/LoVe14_RationalAndRealNumbers_ExerciseSheet.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import LoVe.LoVe06_InductivePredicates_Demo 5 | import LoVe.LoVe14_RationalAndRealNumbers_Demo 6 | 7 | 8 | /- # LoVe Exercise 14: Rational and Real Numbers 9 | 10 | Replace the placeholders (e.g., `:= sorry`) with your solutions. -/ 11 | 12 | 13 | set_option autoImplicit false 14 | set_option tactic.hygienic false 15 | 16 | namespace LoVe 17 | 18 | 19 | /- ## Question 1: Rationals 20 | 21 | 1.1. Prove the following theorem. 22 | 23 | Hints: 24 | 25 | * Start with case distinctions on `a` and `b`. 26 | 27 | * When the goal starts getting complicated, use `simp at *` to clean it up. -/ 28 | 29 | theorem Fraction.ext (a b : Fraction) (hnum : Fraction.num a = Fraction.num b) 30 | (hdenom : Fraction.denom a = Fraction.denom b) : 31 | a = b := 32 | sorry 33 | 34 | /- 1.2. Extending the `Fraction.Mul` instance from the lecture, declare 35 | `Fraction` as an instance of `Semigroup`. 36 | 37 | Hint: Use the theorem `Fraction.ext` above, and possibly `Fraction.mul_num` and 38 | `Fraction.mul_denom`. -/ 39 | 40 | #check Fraction.ext 41 | #check Fraction.mul_num 42 | #check Fraction.mul_denom 43 | 44 | instance Fraction.Semigroup : Semigroup Fraction := 45 | { Fraction.Mul with 46 | mul_assoc := 47 | sorry 48 | } 49 | 50 | /- 1.3. Extending the `Rat.Mul` instance from the lecture, declare `Rat` as an 51 | instance of `Semigroup`. -/ 52 | 53 | instance Rat.Semigroup : Semigroup Rat := 54 | { Rat.Mul with 55 | mul_assoc := 56 | sorry 57 | } 58 | 59 | end LoVe 60 | -------------------------------------------------------------------------------- /lean/LoVe/LoVelib.lean: -------------------------------------------------------------------------------- 1 | /- Copyright © 2018–2024 Anne Baanen, Alexander Bentkamp, Jasmin Blanchette, 2 | Johannes Hölzl, and Jannis Limperg. See `LICENSE.txt`. -/ 3 | 4 | import Aesop 5 | import Mathlib.Algebra.Field.Defs 6 | import Mathlib.Data.Finset.Basic 7 | import Mathlib.Tactic.LibrarySearch 8 | import Mathlib.Tactic.Linarith 9 | import Mathlib.Tactic.Ring 10 | 11 | 12 | /- # LoVelib: Logical Verification Library -/ 13 | 14 | 15 | set_option autoImplicit false 16 | set_option tactic.hygienic false 17 | 18 | open Lean 19 | open Lean.Parser 20 | open Lean.Parser.Term 21 | open Lean.Meta 22 | open Lean.Elab.Tactic 23 | open Lean.TSyntax 24 | 25 | namespace LoVe 26 | 27 | 28 | /- ## Structured Proofs -/ 29 | 30 | @[term_parser] def «fix» := 31 | leading_parser withPosition ("fix " >> many1 Term.ident >> " : " >> termParser) 32 | >> optSemicolon termParser 33 | 34 | @[term_parser] def «assume» := 35 | leading_parser withPosition ("assume " >> Term.ident >> " : " >> termParser) 36 | >> optSemicolon termParser 37 | 38 | macro_rules 39 | | `(fix $x* : $ty; $y) => `(fun $x* : $ty ↦ $y) 40 | | `(assume $h : $ty; $y) => `(fun $h : $ty ↦ $y) 41 | 42 | 43 | /- ## Natural Numbers -/ 44 | 45 | theorem Nat.two_mul (n : ℕ) : 46 | 2 * n = n + n := 47 | by ring 48 | 49 | @[simp] theorem Nat.sub_one_add (n m : ℕ) (h : ¬ n = 0) : 50 | n - 1 + m = n + m - 1 := 51 | by 52 | induction n with 53 | | zero => aesop 54 | | succ => simp 55 | 56 | @[simp] theorem Nat.le_lt_imp (m n : ℕ) (p : Prop) (hge : m ≥ n) : 57 | (m < n → p) ↔ True := 58 | by 59 | apply Iff.intro 60 | { intro himp 61 | apply True.intro } 62 | { intro htrue 63 | intro hlt 64 | have hle : n ≤ m := 65 | hge 66 | rw [←Nat.not_lt_eq] at hle 67 | aesop } 68 | 69 | @[simp] theorem Nat.lt_succ {m n : ℕ} : 70 | Nat.succ m < Nat.succ n ↔ m < n := 71 | by 72 | apply Iff.intro 73 | { apply Nat.lt_of_succ_lt_succ } 74 | { apply Nat.succ_lt_succ } 75 | 76 | @[simp] theorem Nat.le_succ {m n : ℕ} : 77 | Nat.succ m ≤ Nat.succ n ↔ m ≤ n := 78 | by 79 | apply Iff.intro 80 | { apply Nat.le_of_succ_le_succ } 81 | { apply Nat.succ_le_succ } 82 | 83 | 84 | /- ## Integers -/ 85 | 86 | @[simp] theorem Int.neg_neg : 87 | Int.neg ∘ Int.neg = id := 88 | by 89 | apply funext 90 | intro i 91 | cases i with 92 | | ofNat n => 93 | { cases n <;> 94 | aesop } 95 | | negSucc n => 96 | { aesop } 97 | 98 | 99 | /- ## Rationals -/ 100 | 101 | @[simp] theorem Rat.div_two_add_div_two (x : ℚ) : 102 | x / 2 + x / 2 = x := 103 | by ring_nf 104 | 105 | 106 | /- ## Lists -/ 107 | 108 | @[simp] theorem List.count_nil {α : Type} [BEq α] (x : α) : 109 | List.count x [] = 0 := 110 | by rfl 111 | 112 | @[simp] theorem List.count_cons {α : Type} [BEq α] (x a : α) (as : List α) : 113 | List.count x (a :: as) = (bif a == x then 1 else 0) + List.count x as := 114 | by 115 | cases Classical.em (a == x) with 116 | | inl hx => 117 | rw [List.count] 118 | simp [hx] 119 | ac_rfl 120 | | inr hx => 121 | rw [List.count] 122 | simp [hx] 123 | 124 | @[simp] theorem List.count_append {α : Type} [BEq α] (x : α) (as bs : List α) : 125 | List.count x (as ++ bs) = List.count x as + List.count x bs := 126 | by 127 | induction as with 128 | | nil => simp 129 | | cons a as' ih => 130 | simp [ih] 131 | ac_rfl 132 | 133 | 134 | /- ## Sets -/ 135 | 136 | @[aesop norm simp] theorem Set.subseteq_def {α : Type} (A B : Set α) : 137 | A ⊆ B ↔ ∀a, a ∈ A → a ∈ B := 138 | by rfl 139 | 140 | instance Set.PartialOrder {α : Type} : PartialOrder (Set α) := 141 | { le := fun A B ↦ A ⊆ B, 142 | lt := fun A B ↦ A ⊆ B ∧ A ≠ B, 143 | le_refl := 144 | by 145 | intro A a ha 146 | assumption 147 | le_trans := 148 | by 149 | intro A B C hAB hBC a ha 150 | aesop, 151 | lt_iff_le_not_le := 152 | by 153 | intro A B 154 | apply Iff.intro 155 | { intro hAB 156 | simp [LT.lt, LE.le] at * 157 | cases hAB with 158 | | intro hsseq hneq => 159 | apply And.intro 160 | { assumption } 161 | { intro hflip 162 | apply hneq 163 | apply Set.ext 164 | aesop } } 165 | { intro hAB 166 | simp [LT.lt, LE.le] at * 167 | aesop }, 168 | le_antisymm := 169 | by 170 | intro A B hAB hBA 171 | apply Set.ext 172 | aesop } 173 | 174 | @[simp] theorem Set.le_def {α : Type} (A B : Set α) : 175 | A ≤ B ↔ A ⊆ B := 176 | by rfl 177 | 178 | @[simp] theorem Set.lt_def {α : Type} (A B : Set α) : 179 | A < B ↔ A ⊆ B ∧ A ≠ B := 180 | by rfl 181 | 182 | inductive Set.Finite {α : Type} : Set α → Prop where 183 | | empty : Set.Finite {} 184 | | insert (a : α) (A : Set α) : Set.Finite A → Set.Finite (insert a A) 185 | 186 | 187 | /- ## Relations -/ 188 | 189 | def Id {α : Type} : Set (α × α) := 190 | {ab | Prod.snd ab = Prod.fst ab} 191 | 192 | @[simp] theorem mem_Id {α : Type} (a b : α) : 193 | (a, b) ∈ @Id α ↔ b = a := 194 | by rfl 195 | 196 | def comp {α : Type} (r₁ r₂ : Set (α × α)) : Set (α × α) := 197 | {ac | ∃b, (Prod.fst ac, b) ∈ r₁ ∧ (b, Prod.snd ac) ∈ r₂} 198 | 199 | infixl:90 " ◯ " => comp 200 | 201 | @[simp] theorem mem_comp {α : Type} (r₁ r₂ : Set (α × α)) (a b : α) : 202 | (a, b) ∈ r₁ ◯ r₂ ↔ (∃c, (a, c) ∈ r₁ ∧ (c, b) ∈ r₂) := 203 | by rfl 204 | 205 | def restrict {α : Type} (r : Set (α × α)) (p : α → Prop) :Set (α × α) := 206 | {ab | ab ∈ r ∧ p (Prod.fst ab)} 207 | 208 | infixl:90 " ⇃ " => restrict 209 | 210 | @[simp] theorem mem_restrict {α : Type} (r : Set (α × α)) 211 | (P : α → Prop) (a b : α) : 212 | (a, b) ∈ r ⇃ P ↔ (a, b) ∈ r ∧ P a := 213 | by rfl 214 | 215 | 216 | /- ## Reflexive Transitive Closure -/ 217 | 218 | inductive RTC {α : Type} (R : α → α → Prop) (a : α) : α → Prop 219 | | refl : RTC R a a 220 | | tail (b c) (hab : RTC R a b) (hbc : R b c) : RTC R a c 221 | 222 | namespace RTC 223 | 224 | theorem trans {α : Type} {R : α → α → Prop} {a b c : α} (hab : RTC R a b) 225 | (hbc : RTC R b c) : 226 | RTC R a c := 227 | by 228 | induction hbc with 229 | | refl => 230 | assumption 231 | | tail c d hbc hcd hac => 232 | apply tail <;> 233 | assumption 234 | 235 | theorem single {α : Type} {R : α → α → Prop} {a b : α} (hab : R a b) : 236 | RTC R a b := 237 | tail _ _ refl hab 238 | 239 | theorem head {α : Type} {R : α → α → Prop} (a b c : α) (hab : R a b) 240 | (hbc : RTC R b c) : 241 | RTC R a c := 242 | by 243 | induction hbc with 244 | | refl => 245 | exact tail _ _ refl hab 246 | | tail c d hbc hcd hac => 247 | apply tail <;> 248 | assumption 249 | 250 | theorem head_induction_on {α : Type} {R : α → α → Prop} {b : α} 251 | {P : ∀a : α, RTC R a b → Prop} {a : α} (h : RTC R a b) 252 | (refl : P b refl) 253 | (head : ∀a c (h' : R a c) (h : RTC R c b), 254 | P c h → P a (RTC.head _ _ _ h' h)) : 255 | P a h := 256 | by 257 | induction h with 258 | | refl => 259 | exact refl 260 | | tail b' c _ hb'c ih => 261 | apply ih (P := fun a hab' ↦ P a (RTC.tail _ _ hab' hb'c)) 262 | { exact head _ _ hb'c _ refl } 263 | { intro x y hxy hyb' hy 264 | exact head _ _ hxy _ hy } 265 | 266 | theorem lift {α β : Type} {R : α → α → Prop} {S : β → β → Prop} {a b : α} 267 | (f : α → β) (hf : ∀a b, R a b → S (f a) (f b)) (hab : RTC R a b) : 268 | RTC S (f a) (f b) := 269 | by 270 | induction hab with 271 | | refl => apply refl 272 | | tail b c hab hbc ih => 273 | apply tail 274 | apply ih 275 | apply hf 276 | exact hbc 277 | 278 | theorem mono {α : Type} {R R' : α → α → Prop} {a b : α} : 279 | (∀a b, R a b → R' a b) → RTC R a b → RTC R' a b := 280 | lift id 281 | 282 | theorem RTC_RTC_eq {α : Type} {R : α → α → Prop} : 283 | RTC (RTC R) = RTC R := 284 | funext 285 | (fix a : α 286 | funext 287 | (fix b : α 288 | propext (Iff.intro 289 | (assume h : RTC (RTC R) a b 290 | by 291 | induction h with 292 | | refl => exact refl 293 | | tail b c hab' hbc ih => 294 | apply trans <;> 295 | assumption) 296 | (mono 297 | (fix a b : α 298 | single))))) 299 | 300 | end RTC 301 | 302 | 303 | /- ## Setoids -/ 304 | 305 | attribute [simp] Setoid.refl 306 | 307 | 308 | /- ## Metaprogramming -/ 309 | 310 | def cases (id : FVarId) : TacticM Unit := 311 | do 312 | liftMetaTactic (fun goal ↦ 313 | do 314 | let subgoals ← MVarId.cases goal id 315 | pure (List.map (fun subgoal ↦ 316 | InductionSubgoal.mvarId (CasesSubgoal.toInductionSubgoal subgoal)) 317 | (Array.toList subgoals))) 318 | 319 | 320 | /- ## States -/ 321 | 322 | def State : Type := 323 | String → ℕ 324 | 325 | def State.update (name : String) (val : ℕ) (s : State) : State := 326 | fun name' ↦ if name' = name then val else s name' 327 | 328 | macro s:term "[" name:term "↦" val:term "]" : term => 329 | `(State.update $name $val $s) 330 | 331 | @[simp] theorem update_apply (name : String) (val : ℕ) (s : State) : 332 | (s[name ↦ val]) name = val := 333 | by 334 | apply if_pos 335 | rfl 336 | 337 | @[simp] theorem update_apply_neq (name name' : String) (val : ℕ) (s : State) 338 | (hneq : name' ≠ name) : 339 | (s[name ↦ val]) name' = s name' := 340 | by 341 | apply if_neg 342 | assumption 343 | 344 | @[simp] theorem update_override (name : String) (val₁ val₂ : ℕ) (s : State) : 345 | s[name ↦ val₂][name ↦ val₁] = s[name ↦ val₁] := 346 | by 347 | apply funext 348 | intro name' 349 | cases Classical.em (name' = name) with 350 | | inl h => simp [h] 351 | | inr h => simp [h] 352 | 353 | theorem update_swap (name₁ name₂ : String) (val₁ val₂ : ℕ) (s : State) 354 | (hneq : name₁ ≠ name₂) : 355 | -- `hneq` should have `by decide` as an auto param, but this confuses `simp`. 356 | -- See https://github.com/leanprover/lean4/issues/3257 357 | s[name₂ ↦ val₂][name₁ ↦ val₁] = s[name₁ ↦ val₁][name₂ ↦ val₂] := 358 | by 359 | apply funext 360 | intro name' 361 | cases Classical.em (name' = name₁) with 362 | | inl h => simp [*] 363 | | inr h => 364 | cases Classical.em (name' = name₁) with 365 | | inl h => simp [*] 366 | | inr h => simp [State.update, *] 367 | 368 | @[simp] theorem update_id (name : String) (s : State) : 369 | s[name ↦ s name] = s := 370 | by 371 | apply funext 372 | intro name' 373 | simp [State.update] 374 | intro heq 375 | simp [*] 376 | 377 | @[simp] theorem update_same_const (name : String) (val : ℕ) : 378 | (fun _ ↦ val)[name ↦ val] = (fun _ ↦ val) := 379 | by 380 | apply funext 381 | simp [State.update] 382 | 383 | open private preprocessPropToDecide in Lean.Elab.Tactic.evalDecide 384 | 385 | /-- 386 | Attempt to decide the proposition `prop`. If `prop` is decidable, return a proof 387 | of either `prop` or `¬ prop`. 388 | -/ 389 | -- Inspired by Lean.Elab.Tactic.evalDecide 390 | private def decide (prop : Expr) : MetaM (Option (Sum Expr Expr)) := do 391 | let (prop, _) ← 392 | try 393 | preprocessPropToDecide prop |>.run 394 | catch _ => 395 | return none 396 | if let some decInst ← synthInstance? (mkConst ``Decidable |>.app prop) then 397 | let d := mkApp2 (.const ``Decidable.decide []) prop decInst 398 | let r ← whnfD d 399 | if r.isConstOf ``true then 400 | let rflPrf ← mkEqRefl (.const ``true []) 401 | let prf := mkApp3 (Lean.mkConst ``of_decide_eq_true) prop decInst rflPrf 402 | return some $ .inl prf 403 | else if r.isConstOf ``false then 404 | let rflPrf ← mkEqRefl (.const ``false []) 405 | let prf := mkApp3 (Lean.mkConst ``of_decide_eq_false) prop decInst rflPrf 406 | return some $ .inr prf 407 | return none 408 | 409 | 410 | private def isStrLitEq (prop : Expr) : MetaM Bool := 411 | withNewMCtxDepth do 412 | let stringType := .const ``String [] 413 | let lhs ← mkFreshExprMVar (some stringType) 414 | let rhs ← mkFreshExprMVar (some stringType) 415 | let pat ← mkEq lhs rhs 416 | if ← isDefEq prop pat then 417 | let lhs ← whnfD lhs 418 | let rhs ← whnfD rhs 419 | return lhs.isStringLit && rhs.isStringLit 420 | else 421 | return false 422 | 423 | open Lean Lean.Meta in 424 | /-- 425 | Decide equality between string literals. 426 | -/ 427 | simproc decideStrLitEq (@Eq String _ _) := λ prop => do 428 | if ← isStrLitEq prop then 429 | match ← decide prop with 430 | | .none => return .continue 431 | | .some (.inl propPrf) => 432 | let prf := mkApp2 (.const ``eq_true_intro []) prop propPrf 433 | return .visit { expr := .const ``True [], proof? := prf } 434 | | .some (.inr notPropPrf) => 435 | let prf := mkApp2 (.const ``eq_false_intro []) prop notPropPrf 436 | return .visit { expr := .const ``False [], proof? := prf } 437 | return .continue 438 | 439 | example (s : State) : 440 | s["a" ↦ 0]["a" ↦ 2] = s["a" ↦ 2] := 441 | by simp 442 | 443 | example (s : State) : 444 | (s["a" ↦ 0]) "b" = s "b" := 445 | by simp 446 | 447 | example (s : State) : 448 | s["a" ↦ 0]["b" ↦ 2] = s["b" ↦ 2]["a" ↦ 0] := 449 | by simp [update_swap] 450 | 451 | example (s : State) : 452 | s["a" ↦ s "a"]["b" ↦ 0] = s["b" ↦ 0] := 453 | by simp 454 | 455 | example (s : State) : 456 | (s["a" ↦ 0]["b" ↦ 0]) "c" = s "c" := 457 | by simp (config := {decide := true}) 458 | 459 | end LoVe 460 | -------------------------------------------------------------------------------- /lean/lake-manifest.json: -------------------------------------------------------------------------------- 1 | {"version": 7, 2 | "packagesDir": ".lake/packages", 3 | "packages": 4 | [{"url": "https://github.com/leanprover/std4", 5 | "type": "git", 6 | "subDir": null, 7 | "rev": "a7543d1a6934d52086971f510e482d743fe30cf3", 8 | "name": "std", 9 | "manifestFile": "lake-manifest.json", 10 | "inputRev": "v4.6.0", 11 | "inherited": true, 12 | "configFile": "lakefile.lean"}, 13 | {"url": "https://github.com/leanprover-community/quote4", 14 | "type": "git", 15 | "subDir": null, 16 | "rev": "fd760831487e6835944e7eeed505522c9dd47563", 17 | "name": "Qq", 18 | "manifestFile": "lake-manifest.json", 19 | "inputRev": "master", 20 | "inherited": true, 21 | "configFile": "lakefile.lean"}, 22 | {"url": "https://github.com/JLimperg/aesop", 23 | "type": "git", 24 | "subDir": null, 25 | "rev": "c51fa8ea4de8b203f64929cba19d139e555f9f6b", 26 | "name": "aesop", 27 | "manifestFile": "lake-manifest.json", 28 | "inputRev": "stable", 29 | "inherited": false, 30 | "configFile": "lakefile.lean"}, 31 | {"url": "https://github.com/leanprover-community/ProofWidgets4", 32 | "type": "git", 33 | "subDir": null, 34 | "rev": "16cae05860b208925f54e5581ec5fd264823440c", 35 | "name": "proofwidgets", 36 | "manifestFile": "lake-manifest.json", 37 | "inputRev": "v0.0.29", 38 | "inherited": true, 39 | "configFile": "lakefile.lean"}, 40 | {"url": "https://github.com/leanprover/lean4-cli", 41 | "type": "git", 42 | "subDir": null, 43 | "rev": "a751d21d4b68c999accb6fc5d960538af26ad5ec", 44 | "name": "Cli", 45 | "manifestFile": "lake-manifest.json", 46 | "inputRev": "main", 47 | "inherited": true, 48 | "configFile": "lakefile.lean"}, 49 | {"url": "https://github.com/leanprover-community/import-graph.git", 50 | "type": "git", 51 | "subDir": null, 52 | "rev": "64d082eeaad1a8e6bbb7c23b7a16b85a1715a02f", 53 | "name": "importGraph", 54 | "manifestFile": "lake-manifest.json", 55 | "inputRev": "main", 56 | "inherited": true, 57 | "configFile": "lakefile.lean"}, 58 | {"url": "https://github.com/leanprover-community/mathlib4", 59 | "type": "git", 60 | "subDir": null, 61 | "rev": "7ca43cbd6aa34058a1afad8e47190af3ec1f9bdb", 62 | "name": "mathlib", 63 | "manifestFile": "lake-manifest.json", 64 | "inputRev": "stable", 65 | "inherited": false, 66 | "configFile": "lakefile.lean"}], 67 | "name": "love", 68 | "lakeDir": ".lake"} 69 | -------------------------------------------------------------------------------- /lean/lakefile.lean: -------------------------------------------------------------------------------- 1 | import Lake 2 | 3 | open Lake DSL 4 | 5 | package love 6 | 7 | @[default_target] 8 | lean_lib LoVe { 9 | roots := #[`LoVe] 10 | globs := #[Glob.submodules `LoVe] 11 | } 12 | 13 | require mathlib from git "https://github.com/leanprover-community/mathlib4" @ "stable" 14 | -------------------------------------------------------------------------------- /lean/lean-toolchain: -------------------------------------------------------------------------------- 1 | leanprover/lean4:v4.6.0 2 | --------------------------------------------------------------------------------