├── .gitignore
├── .travis.yml
├── CONTRIBUTING.md
├── LICENSE
├── Makefile
├── README.md
├── VERSION
├── default.nix
├── docs
├── index.html
└── pdf
│ └── sf-idris-2018.pdf
├── nixpkgs-src.json
├── prerequisites_macOS.md
├── software_foundations.ipkg
└── src
├── .gitignore
├── Basics.lidr
├── Imp.lidr
├── ImpCEvalFun.lidr
├── ImpParser.lidr
├── IndPrinciples.lidr
├── IndProp.lidr
├── Induction.lidr
├── Lists.lidr
├── Logic.lidr
├── Makefile
├── Maps.lidr
├── Poly.lidr
├── Preface.lidr
├── ProofObjects.lidr
├── Rel.lidr
├── Tactics.lidr
├── book.tex
├── footer.tex
├── glossary.tex
├── latexmkrc
└── pandoc-minted.hs
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Idris
2 | *.ibc
3 | *.o
4 |
5 |
6 | ### https://github.com/github/gitignore/blob/master/TeX.gitignore
7 |
8 | ## Core latex/pdflatex auxiliary files:
9 | *.aux
10 | *.lof
11 | *.log
12 | *.lot
13 | *.fls
14 | *.out
15 | *.toc
16 | *.fmt
17 | *.fot
18 | *.cb
19 | *.cb2
20 |
21 | ## Intermediate documents:
22 | *.dvi
23 | *-converted-to.*
24 | # these rules might exclude image files for figures etc.
25 | # *.ps
26 | # *.eps
27 | # *.pdf
28 |
29 | ## Bibliography auxiliary files (bibtex/biblatex/biber):
30 | *.bbl
31 | *.bcf
32 | *.blg
33 | *-blx.aux
34 | *-blx.bib
35 | *.brf
36 | *.run.xml
37 |
38 | ## Build tool auxiliary files:
39 | *.fdb_latexmk
40 | *.synctex
41 | *.synctex.gz
42 | *.synctex.gz(busy)
43 | *.pdfsync
44 |
45 | ## Auxiliary and intermediate files from other packages:
46 | # algorithms
47 | *.alg
48 | *.loa
49 |
50 | # achemso
51 | acs-*.bib
52 |
53 | # amsthm
54 | *.thm
55 |
56 | # beamer
57 | *.nav
58 | *.snm
59 | *.vrb
60 |
61 | # cprotect
62 | *.cpt
63 |
64 | # fixme
65 | *.lox
66 |
67 | #(r)(e)ledmac/(r)(e)ledpar
68 | *.end
69 | *.?end
70 | *.[1-9]
71 | *.[1-9][0-9]
72 | *.[1-9][0-9][0-9]
73 | *.[1-9]R
74 | *.[1-9][0-9]R
75 | *.[1-9][0-9][0-9]R
76 | *.eledsec[1-9]
77 | *.eledsec[1-9]R
78 | *.eledsec[1-9][0-9]
79 | *.eledsec[1-9][0-9]R
80 | *.eledsec[1-9][0-9][0-9]
81 | *.eledsec[1-9][0-9][0-9]R
82 |
83 | # glossaries
84 | *.acn
85 | *.acr
86 | *.glg
87 | *.glo
88 | *.gls
89 | *.glsdefs
90 |
91 | # gnuplottex
92 | *-gnuplottex-*
93 |
94 | # hyperref
95 | *.brf
96 |
97 | # knitr
98 | *-concordance.tex
99 | # TODO Comment the next line if you want to keep your tikz graphics files
100 | *.tikz
101 | *-tikzDictionary
102 |
103 | # listings
104 | *.lol
105 |
106 | # makeidx
107 | *.idx
108 | *.ilg
109 | *.ind
110 | *.ist
111 |
112 | # minitoc
113 | *.maf
114 | *.mlf
115 | *.mlt
116 | *.mtc
117 | *.mtc[0-9]
118 | *.mtc[1-9][0-9]
119 |
120 | # minted
121 | _minted*
122 | *.pyg
123 |
124 | # morewrites
125 | *.mw
126 |
127 | # mylatexformat
128 | *.fmt
129 |
130 | # nomencl
131 | *.nlo
132 |
133 | # sagetex
134 | *.sagetex.sage
135 | *.sagetex.py
136 | *.sagetex.scmd
137 |
138 | # sympy
139 | *.sout
140 | *.sympy
141 | sympy-plots-for-*.tex/
142 |
143 | # pdfcomment
144 | *.upa
145 | *.upb
146 |
147 | # pythontex
148 | *.pytxcode
149 | pythontex-files-*/
150 |
151 | # thmtools
152 | *.loe
153 |
154 | # TikZ & PGF
155 | *.dpth
156 | *.md5
157 | *.auxlock
158 |
159 | # todonotes
160 | *.tdo
161 |
162 | # xindy
163 | *.xdy
164 |
165 | # xypic precompiled matrices
166 | *.xyc
167 |
168 | # endfloat
169 | *.ttt
170 | *.fff
171 |
172 | # Latexian
173 | TSWLatexianTemp*
174 |
175 | ## Editors:
176 | # WinEdt
177 | *.bak
178 | *.sav
179 |
180 | # Texpad
181 | .texpadtmp
182 |
183 | # Kile
184 | *.backup
185 |
186 | # KBibTeX
187 | *~[0-9]*
188 |
189 | ## Generated LaTeX files:
190 | *.tex
191 | src/auto/
192 |
193 |
194 | ## Nix
195 | /result*
196 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: nix
2 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contributing
2 |
3 | _TBD_ (see [issue #4] for discussion)
4 |
5 | ### General Workflow
6 |
7 | 1. [Fork the repository](https://github.com/idris-hackers/software-foundations/fork) if you haven't already.
8 | 1. Check the [open pull requests](https://github.com/idris-hackers/software-foundations/pulls) for any related work in progress (WIP).
9 | 1. Check out a new branch based on `develop`.
10 | 1. Push some commits to your fork.
11 | - The general workflow here is as follows:
12 | - Copy/paste the original text, [reformatting](#formatting) as
13 | appropriate.
14 | - Translate Coq code into (idiomatic) Idris.
15 | - Edit, augment and delete text as appropriate.
16 | _N.B. This can be done in subsequent pull requests._
17 | 1. Open a pull request (as soon as possible).
18 | - If it's not ready to be merged, but you want to _claim_ a particular task,
19 | prefix the pull request with `WIP:`.
20 | - Make a comment and remove the `WIP:` when it's ready to be reviewed and
21 | merged. _Remember: formatting the text and taking a first pass at
22 | translating the Coq to Idris is enough for an initial pull request._
23 | 1. Open subsequent pull requests following a similar pattern for and edits or
24 | other updates
25 |
26 | The `develop` branch is the _working branch_ and `master` is for _releases_,
27 | i.e. rebuilt [PDF]s and [website](https://idris-hackers.github.io/software-foundations) updates.
28 |
29 |
30 | ### Formatting
31 |
32 | When formatting the [Literate Idris] source, we use [bird tracks] for code meant
33 | to be compiled and a combination of [Markdown] and [LaTeX] for commentary and
34 | illustrative examples that aren't meant to be compiled.
35 |
36 | ````markdown
37 | = Example
38 |
39 | This is some commentary with **bold** and _italicized_ text.
40 |
41 | ```idris
42 | -- This is an Idris code block which won't be read when compiling the file.
43 | foo : Nat
44 | foo = 42
45 | ```
46 |
47 |
48 | == Code to Compile
49 |
50 | The following, however, will be compiled:
51 |
52 | > module Example
53 | >
54 | > %access public export
55 | >
56 | > foo : String
57 | > foo = "bar"
58 |
59 |
60 | == Other Notes
61 |
62 | - We can also highlight code inline, e.g. \idr{primes : Inf (List Nat)}.
63 | - To refer to glossary entries, use e.g. \mintinline[]{latex}{\gls{term}}.
64 | ````
65 |
66 | #### Chapters, Sections, et al.
67 |
68 | To denote chapters, sections, and other subdivisions, use `=` as follows:
69 |
70 | ```markdown
71 | = Chapter
72 | == Section
73 | === Subsection
74 | ==== Subsubsection
75 | ```
76 |
77 | #### Bold and Italicized Text
78 |
79 | We use the succinct Markdown syntax...
80 |
81 | ```markdown
82 | ... to format **bold** and _italicized_ text.
83 | ```
84 |
85 | #### Lists
86 |
87 | We prefer the Markdown syntax here too, e.g.
88 |
89 | ```markdown
90 | - foo
91 | - bar
92 | - baz
93 | ```
94 |
95 | #### Code Blocks
96 |
97 | Just as with bold and italicized text, we favor the more succinct Markdown
98 | syntax for (fenced) code blocks:
99 |
100 | ````markdown
101 | ```idris
102 | addTwo : Nat -> Nat
103 | addTwo x = x + 2
104 | ```
105 | ````
106 |
107 | For more information, refer to [the relevant GitHub Help document][gfm code blocks].
108 |
109 | #### Inline Code
110 |
111 | For inline Idris code, use the custom `\mintinline` shortcut `\idr`, e.g.
112 |
113 | ```tex
114 | To print ``Hello World!'' in Idris, write \idr{putStrLn "Hello, World!"}.
115 | ```
116 |
117 | For convenience, we've also defined the shortcut `\el` for inline Emacs Lisp
118 | code, e.g.
119 |
120 | ```latex
121 | Set the value of \el{foo} to \el{42}: \el{(setq foo 42)}.
122 | ```
123 |
124 | Otherwise, use single backticks for inline monospace text, e.g.
125 |
126 | ```
127 | This is some `inline monospaced text`.
128 | ```
129 |
130 | In a certain cases, we might want syntax highlighting for a language other than
131 | Idris or Emacs Lisp. For such cases, use the standard `\mintinline` command,
132 | e.g.
133 |
134 | ```tex
135 | To declare a theorem in Coq, use \mintinline[]{coq}{Theorem}.
136 | ```
137 |
138 | #### Glossary
139 |
140 | We use the [glossaries package] for defining terms
141 | (in [`src/glossary.tex`][glossary.tex]) and including a glossary in
142 | the [generated PDF][PDF]. See the package documentation for more information,
143 | but here's a quick example:
144 |
145 | ```tex
146 | What is the \gls{meaning of life}?
147 |
148 |
149 | \newglossaryentry{meaning of life}{
150 | description={42}
151 | }
152 | ```
153 |
154 |
155 | ### Generating the PDF
156 |
157 | To generate the [PDF] we use [Pandoc] and [latexmk]. For more details, check out
158 | the `all.pdf`, `all.tex` and `%.tex` rules in [`src/Makefile`].
159 |
160 |
161 |
162 |
163 | [issue #4]: https://github.com/idris-hackers/software-foundations/issues/4
164 | [Literate Idris]: http://docs.idris-lang.org/en/latest/tutorial/miscellany.html#literate-programming
165 | [bird tracks]: https://wiki.haskell.org/Literate_programming#Bird_Style
166 | [Markdown]: https://daringfireball.net/projects/markdown/
167 | [LaTeX]: http://www.latex-project.org
168 | [gfm code blocks]: https://help.github.com/articles/creating-and-highlighting-code-blocks/
169 | [glossaries package]: https://www.ctan.org/pkg/glossaries
170 | [glossary.tex]: https://github.com/idris-hackers/software-foundations/blob/master/src/glossary.tex
171 | [`src/Makefile`]: https://github.com/idris-hackers/software-foundations/blob/master/src/Makefile
172 | [PDF]: https://idris-hackers.github.io/software-foundations/pdf/sf-idris-2018.pdf
173 | [Pandoc]: http://pandoc.org
174 | [latexmk]: https://www.ctan.org/pkg/latexmk/
175 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 | Copyright © 2016-2018 idris-hackers team
3 |
4 | Permission is hereby granted, free of charge, to any person obtaining a copy
5 | of this software and associated documentation files (the “Software”), to deal
6 | in the Software without restriction, including without limitation the rights
7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | copies of the Software, and to permit persons to whom the Software is
9 | furnished to do so, subject to the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be included in
12 | all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 | THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | PKG := software_foundations
2 | PREFIX ?= docs
3 | IDRIS ?= idris
4 | PDF ?= sf-idris-2018.pdf
5 |
6 |
7 | .PHONY: pdf site
8 |
9 |
10 | all: pdf site
11 |
12 |
13 | build:
14 | $(IDRIS) --build $(PKG).ipkg
15 |
16 |
17 | check:
18 | $(IDRIS) --checkpkg $(PKG).ipkg
19 |
20 |
21 | pdf:
22 | $(MAKE) -C src
23 | mkdir -p ${PREFIX}/pdf
24 | mv src/all.pdf ${PREFIX}/pdf/${PDF}
25 |
26 |
27 | clean-all: clean clean-docs
28 |
29 |
30 | clean:
31 | $(IDRIS) --clean $(PKG).ipkg
32 |
33 |
34 | clean-docs:
35 | $(MAKE) -C src clean
36 | @$(RM) ${PREFIX}/index.html >/dev/null
37 |
38 |
39 | site: ${PREFIX}/index.html
40 |
41 |
42 | ${PREFIX}/index.html: README.md CONTRIBUTING.md
43 | pandoc -f gfm -t gfm -A CONTRIBUTING.md $< | \
44 | pandoc -M title='Software Foundations in Idris' \
45 | -f gfm -t html -s -o $@
46 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # _[Software Foundations][SF] in Idris_
2 |
3 | [](https://travis-ci.org/idris-hackers/software-foundations)
4 |
5 | :book: [Download the PDF][PDF]
6 |
7 |
8 | ## Building
9 |
10 | To rebuild the PDF, ensure the [prerequisites][prereqs] are installed, then:
11 |
12 | ```fish
13 | make pdf
14 | ```
15 |
16 |
17 | ### Prerequisites
18 |
19 | Others may work, but here are the versions I'm using.
20 |
21 | | Dependency | Version |
22 | |------------------|-----------------------------------------------|
23 | | [(run)ghc][GHC] | 8.4.3 |
24 | | [Idris][] | 1.3.0 |
25 | | [latexmk][] | 4.59 |
26 | | [GNU Make][] | 4.2.1 |
27 | | [minted][] | 2.5 |
28 | | [Iosevka][] | 1.14.3 |
29 | | [Pandoc][] | 2.2.1 |
30 | | [pandoc-types][] | 1.17.5.1 |
31 | | [Python][] | 3.6.6 |
32 | | [Pygments][] | 2.2.0 |
33 | | [XeLaTeX][] | 3.14159265-2.6-0.99999 (Web2C 2018/NixOS.org) |
34 |
35 |
36 | ### Installing prerequisites
37 |
38 | - [macOS](prerequisites_macOS.md)
39 |
40 |
41 |
42 | [SF]: http://www.cis.upenn.edu/%7Ebcpierce/sf/current/index.html
43 | [PDF]: https://idris-hackers.github.io/software-foundations/pdf/sf-idris-2018.pdf
44 | [prereqs]: #prerequisites
45 | [GHC]: https://www.haskell.org/ghc/
46 | [Idris]: https://www.idris-lang.org
47 | [latexmk]: https://www.ctan.org/pkg/latexmk/
48 | [Make]: https://www.gnu.org/software/make/
49 | [minted]: http://www.ctan.org/pkg/minted
50 | [Iosevka]: https://be5invis.github.io/Iosevka/
51 | [Pandoc]: http://pandoc.org
52 | [pandoc-types]: https://github.com/jgm/pandoc-types
53 | [Python]: https://www.python.org
54 | [Pygments]: http://pygments.org
55 | [XeLaTeX]: http://tug.org/xetex/
56 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 0.0.2.1
--------------------------------------------------------------------------------
/default.nix:
--------------------------------------------------------------------------------
1 | let
2 |
3 | fetchTarballFromGitHub =
4 | { owner, repo, rev, sha256, ... }:
5 | builtins.fetchTarball {
6 | url = "https://github.com/${owner}/${repo}/tarball/${rev}";
7 | inherit sha256;
8 | };
9 |
10 | fromJSONFile = f: builtins.fromJSON (builtins.readFile f);
11 |
12 | genMeta = { ghc, stdenv, ...}: with stdenv.lib; {
13 | description = "Software Foundations in Idris";
14 | homepage = https://idris-hackers.github.io/software-foundations;
15 | license = licenses.mit;
16 | maintainers = with maintainers; [ yurrriq ];
17 | inherit (ghc.meta) platforms;
18 | };
19 |
20 | in
21 |
22 | { nixpkgs ? fetchTarballFromGitHub (fromJSONFile ./nixpkgs-src.json) }:
23 |
24 | with import nixpkgs {
25 | overlays = [
26 | (self: super: {
27 | idrisPackages = super.idrisPackages // {
28 | software_foundations = with super.idrisPackages; build-idris-package {
29 | name = "software_foundations";
30 | version = builtins.readFile ./VERSION;
31 | src = ./.;
32 | idrisDeps = [ pruviloj ];
33 | meta = genMeta super;
34 | };
35 | };
36 | })
37 | (self: super: {
38 | idris = with super.idrisPackages; with-packages [
39 | base
40 | prelude
41 | pruviloj
42 | software_foundations
43 | ];
44 |
45 | pandoc = super.haskellPackages.ghcWithPackages (ps: with ps; [
46 | pandoc
47 | ]);
48 |
49 | inherit (super.pythonPackages) pygments;
50 |
51 | xelatex = super.texlive.combine {
52 | inherit (super.texlive) scheme-small
53 | amsmath
54 | datatool
55 | dirtytalk
56 | ebproof
57 | fontspec
58 | framed
59 | fvextra
60 | glossaries
61 | ifplatform
62 | latexmk
63 | lm-math
64 | mfirstuc
65 | minted
66 | newunicodechar
67 | substr
68 | todonotes
69 | xetex
70 | xfor
71 | xindy
72 | xstring;
73 | };
74 | })
75 | ];
76 | };
77 |
78 |
79 | stdenv.mkDerivation rec {
80 | name = "sf-idris-${version}";
81 | version = builtins.readFile ./VERSION;
82 | src = ./.;
83 |
84 | FONTCONFIG_FILE = makeFontsConf { fontDirectories = [ iosevka ]; };
85 |
86 | patchPhase = lib.optionalString (! lib.inNixShell) ''
87 | patchShebangs src/pandoc-minted.hs
88 | '';
89 |
90 | nativeBuildInputs = [
91 | pandoc
92 | pygments
93 | xelatex
94 | which
95 | ];
96 |
97 | buildInputs = [
98 | idris
99 | ];
100 |
101 | makeFlags = [ "PREFIX=$(out)" ];
102 |
103 | dontInstall = true;
104 |
105 | meta = (genMeta pkgs) // {
106 | platforms = lib.platforms.linux;
107 | };
108 | }
109 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Software Foundations in Idris
8 |
14 |
77 |
80 |
81 |
82 |
83 | Software Foundations in Idris
84 |
85 |
86 | 
87 | 📖 Download the PDF
88 | Building
89 | To rebuild the PDF, ensure the prerequisites are installed, then:
90 | make pdf
91 |
92 | Prerequisites
93 | Others may work, but here are the versions I'm using.
94 |
95 |
96 |
100 |
101 |
102 |
103 | (run)ghc |
104 | 8.4.3 |
105 |
106 |
107 | Idris |
108 | 1.3.0 |
109 |
110 |
111 | latexmk |
112 | 4.59 |
113 |
114 |
115 | [GNU Make][] |
116 | 4.2.1 |
117 |
118 |
119 | minted |
120 | 2.5 |
121 |
122 |
123 | Iosevka |
124 | 1.14.3 |
125 |
126 |
127 | Pandoc |
128 | 2.2.1 |
129 |
130 |
131 | pandoc-types |
132 | 1.17.5.1 |
133 |
134 |
135 | Python |
136 | 3.6.6 |
137 |
138 |
139 | Pygments |
140 | 2.2.0 |
141 |
142 |
143 | XeLaTeX |
144 | 3.14159265-2.6-0.99999 (Web2C 2018/NixOS.org) |
145 |
146 |
147 |
148 | Installing prerequisites
149 |
152 |
153 |
154 | Contributing
155 | TBD (see issue #4 for discussion)
156 | General Workflow
157 |
158 | - Fork the repository if you haven't already.
159 | - Check the open pull requests for any related work in progress (WIP).
160 | - Check out a new branch based on
develop
.
161 | - Push some commits to your fork.
162 |
163 | - The general workflow here is as follows:
164 |
165 | - Copy/paste the original text, reformatting as appropriate.
166 | - Translate Coq code into (idiomatic) Idris.
167 | - Edit, augment and delete text as appropriate. N.B. This can be done in subsequent pull requests.
168 |
169 |
170 | - Open a pull request (as soon as possible).
171 |
172 | - If it's not ready to be merged, but you want to claim a particular task, prefix the pull request with
WIP:
.
173 | - Make a comment and remove the
WIP:
when it's ready to be reviewed and merged. Remember: formatting the text and taking a first pass at translating the Coq to Idris is enough for an initial pull request.
174 |
175 | - Open subsequent pull requests following a similar pattern for and edits or other updates
176 |
177 | The develop
branch is the working branch and master
is for releases, i.e. rebuilt PDFs and website updates.
178 |
179 | When formatting the Literate Idris source, we use bird tracks for code meant to be compiled and a combination of Markdown and LaTeX for commentary and illustrative examples that aren't meant to be compiled.
180 |
207 | Chapters, Sections, et al.
208 | To denote chapters, sections, and other subdivisions, use =
as follows:
209 |
213 | Bold and Italicized Text
214 | We use the succinct Markdown syntax...
215 |
216 | Lists
217 | We prefer the Markdown syntax here too, e.g.
218 |
221 | Code Blocks
222 | Just as with bold and italicized text, we favor the more succinct Markdown syntax for (fenced) code blocks:
223 |
227 | For more information, refer to the relevant GitHub Help document.
228 | Inline Code
229 | For inline Idris code, use the custom \mintinline
shortcut \idr
, e.g.
230 |
231 | For convenience, we've also defined the shortcut \el
for inline Emacs Lisp code, e.g.
232 |
233 | Otherwise, use single backticks for inline monospace text, e.g.
234 | This is some `inline monospaced text`.
235 |
236 | In a certain cases, we might want syntax highlighting for a language other than Idris or Emacs Lisp. For such cases, use the standard \mintinline
command, e.g.
237 |
238 | Glossary
239 | We use the glossaries package for defining terms (in src/glossary.tex
) and including a glossary in the generated PDF. See the package documentation for more information, but here's a quick example:
240 |
246 | Generating the PDF
247 | To generate the PDF we use Pandoc and latexmk. For more details, check out the all.pdf
, all.tex
and %.tex
rules in src/Makefile
.
248 |
249 |
250 |
251 |
--------------------------------------------------------------------------------
/docs/pdf/sf-idris-2018.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/idris-hackers/software-foundations/03e178fc616e50019c4168fb488f67fbfb46fafa/docs/pdf/sf-idris-2018.pdf
--------------------------------------------------------------------------------
/nixpkgs-src.json:
--------------------------------------------------------------------------------
1 | {
2 | "owner": "NixOS",
3 | "repo": "nixpkgs-channels",
4 | "branch": "nixos-18.09",
5 | "rev": "09195057114a0a8d112c847a9a8f52957420857d",
6 | "sha256": "0hszcsvgcphjny8j0p5inhl45ja61vjiz0csb0kx0b9lzmrafr7b"
7 | }
8 |
--------------------------------------------------------------------------------
/prerequisites_macOS.md:
--------------------------------------------------------------------------------
1 | # Installing prerequisites on macOS
2 |
3 | We assume that you are using a fresh macOS installation.
4 |
5 | ## Install [Homebrew](http://brew.sh/) package manager
6 |
7 | ```shell
8 | /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
9 | ```
10 |
11 | ## Install major dependencies via Homebrew
12 |
13 | ```shell
14 | brew install idris cabal-install git ghc python wget
15 | ```
16 |
17 | ## Install [MacTeX](http://www.tug.org/mactex/) via Homebrew
18 |
19 | ```shell
20 | brew cask install mactex
21 | ```
22 |
23 | ## Install pandoc and pandoc-types via cabal
24 |
25 | ```shell
26 | cabal update
27 | cabal install pandoc pandoc-types
28 | export PATH=$HOME/.cabal/bin:$PATH
29 | ```
30 |
31 | You might want to add cabal to your `PATH` permanently.
32 |
33 | ## Install pygments
34 |
35 | ```shell
36 | pip install --user Pygments
37 | ```
38 |
39 | ## Install Iosevka font
40 |
41 | Download the [Iosevka font](https://be5invis.github.io/Iosevka/) and put the `ttf`-files into `/Library/Fonts` directory.
42 | Reboot to let macOS find the font.
43 |
--------------------------------------------------------------------------------
/software_foundations.ipkg:
--------------------------------------------------------------------------------
1 | package software_foundations
2 |
3 | modules = Basics
4 | , Induction
5 | , Lists
6 | , Poly
7 | , Logic
8 | , IndProp
9 | , Maps
10 | , ProofObjects
11 | , Rel
12 | , Imp
13 | , ImpParser
14 | , ImpCEvalFun
15 |
16 | brief = "Software Foundations in Idris"
17 | version = 0.0.2.1
18 | readme = README.md
19 | -- NOTE: For license information see the file LICENSE.
20 |
21 | author = "The Idris community"
22 | maintainer = "Eric Bailey "
23 |
24 | homepage = https://idris-hackers.github.io/software-foundations
25 | sourceloc = git://git@github.com:idris-hackers/software-foundations.git
26 | bugtracker = https://github.com/idris-hackers/software-foundations/issues
27 |
28 | sourcedir = src
29 | opts = "--check --total -X ElabReflection"
30 | pkgs = pruviloj
31 |
--------------------------------------------------------------------------------
/src/.gitignore:
--------------------------------------------------------------------------------
1 | !book.tex
2 | !footer.tex
3 | !glossary.tex
4 |
--------------------------------------------------------------------------------
/src/ImpCEvalFun.lidr:
--------------------------------------------------------------------------------
1 | = ImpCEvalFun : Evaluation Function for Imp
2 |
3 | > module ImpCEvalFun
4 |
5 | We saw in the `Imp` chapter how a naive approach to defining a function
6 | representing evaluation for Imp runs into difficulties. There, we adopted the
7 | solution of changing from a functional to a relational definition of evaluation.
8 | In this optional chapter, we consider strategies for getting the functional
9 | approach to work.
10 |
11 | > import Logic
12 | > import Maps
13 | > import Imp
14 |
15 | > %access public export
16 | > %default total
17 |
18 | == A Broken Evaluator
19 |
20 | Here was our first try at an evaluation function for commands, omitting
21 | \idr{WHILE}.
22 |
23 | > ceval_step1 : (st : State) -> (c : Com) -> State
24 | > ceval_step1 st CSkip = st
25 | > ceval_step1 st (CAss l a1) = t_update l (aeval st a1) st
26 | > ceval_step1 st (CSeq c1 c2) =
27 | > let st' = ceval_step1 st c1
28 | > in ceval_step1 st' c2
29 | > ceval_step1 st (CIf b c1 c2) =
30 | > if beval st b
31 | > then ceval_step1 st c1
32 | > else ceval_step1 st c2
33 | > ceval_step1 st (CWhile b c) = st -- bogus
34 |
35 | As we remarked in chapter `Imp`, in a traditional functional programming
36 | language like ML or Haskell we could write the WHILE case as follows:
37 |
38 | ```idris
39 | ...
40 | ceval_step1 st (CWhile b c) =
41 | if (beval st b)
42 | then ceval_step1 st (CSeq c $ CWhile b c)
43 | else st
44 | ```
45 |
46 | Idris doesn't accept such a definition (\idr{ImpCEvalFun.ceval_step1 is possibly
47 | not total due to recursive path ImpCEvalFun.ceval_step1 -->
48 | ImpCEvalFun.ceval_step1 --> ImpCEvalFun.ceval_step1}) because the function we
49 | want to define is not guaranteed to terminate. Indeed, the changed
50 | \idr{ceval_step1} function applied to the \idr{loop} program from `Imp.lidr`
51 | would never terminate. Since Idris is not just a functional programming
52 | language, but also a consistent logic, any potentially non-terminating function
53 | needs to be rejected. Here is an invalid(!) Idris program showing what would go
54 | wrong if Idris allowed non-terminating recursive functions:
55 |
56 | ```idris
57 | loop_false : (n : Nat) -> Void
58 | loop_false n = loop_false n
59 | ```
60 |
61 | That is, propositions like \idr{Void} would become provable (e.g.,
62 | \idr{loop_false 0} would be a proof of \idr{Void}), which would be a disaster
63 | for Idris's logical consistency.
64 |
65 | Thus, because it doesn't terminate on all inputs, the full version of
66 | \idr{ceval_step1} cannot be written in Idris -- at least not without one
67 | additional trick...
68 |
69 | == A Step-Indexed Evaluator
70 |
71 | The trick we need is to pass an _additional_ parameter to the evaluation
72 | function that tells it how long to run. Informally, we start the evaluator with
73 | a certain amount of "gas" in its tank, and we allow it to run until either it
74 | terminates in the usual way _or_ it runs out of gas, at which point we simply
75 | stop evaluating and say that the final result is the empty memory. (We could
76 | also say that the result is the current state at the point where the evaluator
77 | runs out fo gas -- it doesn't really matter because the result is going to be
78 | wrong in either case!)
79 |
80 | > ceval_step2 : (st : State) -> (c : Com) -> (i : Nat) -> State
81 | > ceval_step2 _ _ Z = empty_state
82 | > ceval_step2 st CSkip (S i') = st
83 | > ceval_step2 st (CAss l a1) (S i') = t_update l (aeval st a1) st
84 | > ceval_step2 st (CSeq c1 c2) (S i') =
85 | > let st' = ceval_step2 st c1 i'
86 | > in ceval_step2 st' c2 i'
87 | > ceval_step2 st (CIf b c1 c2) (S i') =
88 | > if beval st b
89 | > then ceval_step2 st c1 i'
90 | > else ceval_step2 st c2 i'
91 | > ceval_step2 st c@(CWhile b1 c1) (S i') =
92 | > if (beval st b1)
93 | > then let st' = ceval_step2 st c1 i' in
94 | > ceval_step2 st' c i'
95 | > else st
96 |
97 | _Note_: It is tempting to think that the index \idr{i} here is counting the
98 | "number of steps of evaluation." But if you look closely you'll see that this
99 | is not the case: for example, in the rule for sequencing, the same \idr{i} is
100 | passed to both recursive calls. Understanding the exact way that \idr{i} is
101 | treated will be important in the proof of \idr{ceval__ceval_step}, which is
102 | given as an exercise below.
103 |
104 | One thing that is not so nice about this evaluator is that we can't tell, from
105 | its result, whether it stopped because the program terminated normally or
106 | because it ran out of gas. Our next version returns an \idr{Maybe State}
107 | instead of just a \idr{State}, so that we can distinguish between normal and
108 | abnormal termination.
109 |
110 | > ceval_step3 : (st : State) -> (c : Com) -> (i : Nat) -> Maybe State
111 | > ceval_step3 _ _ Z = Nothing
112 | > ceval_step3 st CSkip (S i') = Just st
113 | > ceval_step3 st (CAss l a1) (S i') = Just $ t_update l (aeval st a1) st
114 | > ceval_step3 st (CSeq c1 c2) (S i') =
115 | > case ceval_step3 st c1 i' of
116 | > Just st' => ceval_step3 st' c2 i'
117 | > Nothing => Nothing
118 | > ceval_step3 st (CIf b c1 c2) (S i') =
119 | > if beval st b
120 | > then ceval_step3 st c1 i'
121 | > else ceval_step3 st c2 i'
122 | > ceval_step3 st c@(CWhile b1 c1) (S i') =
123 | > if (beval st b1)
124 | > then case ceval_step3 st c1 i' of
125 | > Just st' => ceval_step3 st' c i'
126 | > Nothing => Nothing
127 | > else Just st
128 |
129 | We can improve the readability of this version by using the fact that /idr{Maybe} forms a monad to hide the plumbing involved in repeatedly matching against optional
130 | states.
131 |
132 | ```idris
133 | Monad Maybe where
134 | Nothing >>= k = Nothing
135 | (Just x) >>= k = k x
136 | ```
137 |
138 | > ceval_step : (st : State) -> (c : Com) -> (i : Nat) -> Maybe State
139 | > ceval_step _ _ Z = Nothing
140 | > ceval_step st CSkip (S i') = Just st
141 | > ceval_step st (CAss l a1) (S i') = Just $ t_update l (aeval st a1) st
142 | > ceval_step st (CSeq c1 c2) (S i') =
143 | > do st' <- ceval_step st c1 i'
144 | > ceval_step st' c2 i'
145 | > ceval_step st (CIf b c1 c2) (S i') =
146 | > if beval st b
147 | > then ceval_step st c1 i'
148 | > else ceval_step st c2 i'
149 | > ceval_step st c@(CWhile b1 c1) (S i') =
150 | > if (beval st b1)
151 | > then do st' <- ceval_step st c1 i'
152 | > ceval_step st' c i'
153 | > else Just st
154 |
155 | > test_ceval : (st : State) -> (c : Com) -> Maybe (Nat, Nat, Nat)
156 | > test_ceval st c = case ceval_step st c 500 of
157 | > Nothing => Nothing
158 | > Just st => Just (st X, st Y, st Z)
159 |
160 | \todo[inline]{Syntax sugar for IF breaks down here}
161 |
162 | ```idris
163 | λΠ> test_ceval Imp.empty_state (CSeq (X ::= ANum 2) (CIf (BLe (AId X) (ANum 1)) (Y ::= ANum 3) (Z ::= ANum 4)))
164 | Just (2, 0, 4) : Maybe (Nat, Nat, Nat)
165 | ```
166 |
167 | ==== Exercise: 2 stars, recommended (pup_to_n)
168 |
169 | Write an Imp program that sums the numbers from \idr{1} to \idr{X} (inclusive:
170 | \idr{1 + 2 + ... + X}) in the variable \idr{Y}. Make sure your solution
171 | satisfies the test that follows.
172 |
173 | > pup_to_n : Com
174 | > pup_to_n = ?pup_to_n_rhs
175 |
176 | > pup_to_n_1 : test_ceval (t_update X 5 $ Imp.empty_state) ImpCEvalFun.pup_to_n = Just (0, 15, 0)
177 | > pup_to_n_1 = ?pup_to_n_1 -- replace with Refl when done
178 |
179 | $\square$
180 |
181 |
182 | ==== Exercise: 2 stars, optional (peven)
183 |
184 | Write a \idr{While} program that sets \idr{Z} to \idr{0} if \idr{X} is even and
185 | sets \idr{Z} to \idr{1} otherwise. Use \idr{test_ceval} to test your program.
186 |
187 | > -- FILL IN HERE
188 |
189 | $\square$
190 |
191 | == Relational vs. Step-Indexed Evaluation
192 |
193 | As for arithmetic and boolean expressions, we'd hope that the two alternative
194 | definitions of evaluation would actually amount to the same thing in the end.
195 | This section shows that this is the case.
196 |
197 | > ceval_step__ceval : (c : Com) -> (st, st' : State) -> (i ** ceval_step st c i = Just st') -> c / st \\ st'
198 | > ceval_step__ceval c st st' (Z ** prf) = absurd prf
199 | > ceval_step__ceval CSkip st st (S i ** Refl) = E_Skip
200 | > ceval_step__ceval (CAss l a) st st' (S i ** prf) =
201 | > rewrite sym $ justInjective prf in
202 | > E_Ass {n=aeval st a} Refl
203 | > ceval_step__ceval (CSeq c1 c2) st st' (S i ** prf) with (ceval_step st c1 i) proof c1prf
204 | > ceval_step__ceval (CSeq c1 c2) st st' (S i ** prf) | Just st1 =
205 | > E_Seq (ceval_step__ceval c1 st st1 (i**sym c1prf))
206 | > (ceval_step__ceval c2 st1 st' (i**prf))
207 | > ceval_step__ceval (CSeq c1 c2) st st' (S i ** prf) | Nothing = absurd prf
208 | > ceval_step__ceval (CIf b c1 c2) st st' (S i ** prf) with (beval st b) proof bprf
209 | > ceval_step__ceval (CIf b c1 c2) st st' (S i ** prf) | True =
210 | > E_IfTrue (sym bprf) (ceval_step__ceval c1 st st' (i**prf))
211 | > ceval_step__ceval (CIf b c1 c2) st st' (S i ** prf) | False =
212 | > E_IfFalse (sym bprf) (ceval_step__ceval c2 st st' (i**prf))
213 | > ceval_step__ceval (CWhile b c) st st' (S i ** prf) with (beval st b) proof bprf
214 | > ceval_step__ceval (CWhile b c) st st' (S i ** prf) | True with (ceval_step st c i) proof cprf
215 | > ceval_step__ceval (CWhile b c) st st' (S i ** prf) | True | Just st1 =
216 | > E_WhileLoop (sym bprf) (ceval_step__ceval c st st1 (i**sym cprf))
217 |
218 | \todo[inline]{Idris can't see sigma is decreasing, use WellFounded here?}
219 |
220 | > (assert_total $ ceval_step__ceval (CWhile b c) st1 st' (i**prf))
221 | > ceval_step__ceval (CWhile b c) st st' (S i ** prf) | True | Nothing = absurd prf
222 | > ceval_step__ceval (CWhile b c) st st (S i ** Refl) | False = E_WhileEnd (sym bprf)
223 |
224 |
225 | ==== Exercise: 4 stars (ceval_step__ceval_inf)
226 |
227 | Write an informal proof of \idr{ceval_step__ceval}, following the usual
228 | template. (The template for case analysis on an inductively defined value should
229 | look the same as for induction, except that there is no induction hypothesis.)
230 | Make your proof communicate the main ideas to a human reader; do not simply
231 | transcribe the steps of the formal proof.
232 |
233 | > -- FILL IN HERE
234 |
235 | $\square$
236 |
237 | > ceval_step_more : (i1, i2 : Nat) -> (st, st' : State) -> (c : Com) -> LTE i1 i2 -> ceval_step st c i1 = Just st'
238 | > -> ceval_step st c i2 = Just st'
239 | > ceval_step_more Z i2 st st' c lte prf = absurd prf
240 | > ceval_step_more (S i1) Z st st' c lte prf = absurd lte
241 | > ceval_step_more (S i1) (S i2) st st' CSkip lte prf = prf
242 | > ceval_step_more (S i1) (S i2) st st' (CAss l a) lte prf = prf
243 | > ceval_step_more (S i1) (S i2) st st' (CSeq c1 c2) lte prf with (ceval_step st c1 i1) proof cprf
244 | > ceval_step_more (S i1) (S i2) st st' (CSeq c1 c2) lte prf | Just st1 =
245 | > rewrite ceval_step_more i1 i2 st st1 c1 (fromLteSucc lte) (sym cprf) in
246 | > ceval_step_more i1 i2 st1 st' c2 (fromLteSucc lte) prf
247 | > ceval_step_more (S i1) (S i2) st st' (CSeq c1 c2) lte prf | Nothing = absurd prf
248 | > ceval_step_more (S i1) (S i2) st st' (CIf b c1 c2) lte prf with (beval st b) proof bprf
249 | > ceval_step_more (S i1) (S i2) st st' (CIf b c1 c2) lte prf | True =
250 | > ceval_step_more i1 i2 st st' c1 (fromLteSucc lte) prf
251 | > ceval_step_more (S i1) (S i2) st st' (CIf b c1 c2) lte prf | False =
252 | > ceval_step_more i1 i2 st st' c2 (fromLteSucc lte) prf
253 | > ceval_step_more (S i1) (S i2) st st' (CWhile b c) lte prf with (beval st b)
254 | > ceval_step_more (S i1) (S i2) st st' (CWhile b c) lte prf | True with (ceval_step st c i1) proof cprf
255 | > ceval_step_more (S i1) (S i2) st st' (CWhile b c) lte prf | True | Just st1 =
256 | > rewrite ceval_step_more i1 i2 st st1 c (fromLteSucc lte) (sym cprf) in
257 | > ceval_step_more i1 i2 st1 st' (CWhile b c) (fromLteSucc lte) prf
258 | > ceval_step_more (S i1) (S i2) st st' (CWhile b c) lte prf | True | Nothing = absurd prf
259 | > ceval_step_more (S i1) (S i2) st st' (CWhile b c) lte prf | False = prf
260 |
261 |
262 | ==== Exercise: 3 stars, recommended (ceval__ceval_step)
263 |
264 | Finish the following proof. You'll need \idr{ceval_step_more} in a few places,
265 | as well as some basic facts about \idr{LTE} and \idr{S}.
266 |
267 | > ceval__ceval_step : (c : Com) -> (st, st' : State) -> (c / st \\ st') -> (i ** ceval_step st c i = Just st')
268 | > ceval__ceval_step c st st' prf = ?ceval__ceval_step_rhs
269 |
270 | $\square$
271 |
272 | > ceval_and_ceval_step_coincide : (c : Com) -> (st, st' : State) -> (c / st \\ st') <-> (i ** ceval_step st c i = Just st')
273 | > ceval_and_ceval_step_coincide c st st' = (ceval__ceval_step c st st', ceval_step__ceval c st st')
274 |
275 |
276 | == Determinism of Evaluation Again
277 |
278 | Using the fact that the relational and step-indexed definition of evaluation are
279 | the same, we can give a slicker proof that the evaluation _relation_ is
280 | deterministic.
281 |
282 | > ceval_deterministic' : (c : Com) -> (st, st1, st2 : State) -> (c / st \\ st1) -> (c / st \\ st2) -> st1 = st2
283 | > ceval_deterministic' c st st1 st2 prf1 prf2 =
284 | > let
285 | > (i1**e1) = ceval__ceval_step c st st1 prf1
286 | > (i2**e2) = ceval__ceval_step c st st2 prf2
287 | > plus1 = ceval_step_more i1 (i1+i2) st st1 c (lteAddRight i1) e1
288 | > plus2 = ceval_step_more i2 (i1+i2) st st2 c (rewrite plusCommutative i1 i2 in lteAddRight i2) e2
289 | > in
290 | > justInjective $ trans (sym plus1) plus2
291 |
--------------------------------------------------------------------------------
/src/ImpParser.lidr:
--------------------------------------------------------------------------------
1 | = ImpParser: Lexing and Parsing in Idris
2 |
3 | > module ImpParser
4 | >
5 |
6 | The development of the Imp language in `Imp.lidr` completely ignores issues of
7 | concrete syntax -- how an ASCII string that a programmer might write gets
8 | translated into abstract syntax trees defined by the datatypes \idr{AExp},
9 | \idr{BExp}, and \idr{Com}. In this chapter, we illustrate how the rest of the
10 | story can be filled in by building a simple lexical analyzer and parser using
11 | Idris's functional programming facilities.
12 |
13 | It is not important to understand all the details here (and accordingly, the
14 | explanations are fairly terse and there are no exercises). The main point is
15 | simply to demonstrate that it can be done. You are invited to look through the
16 | code -- most of it is not very complicated, though the parser relies on some
17 | "monadic" programming idioms that may require a little work to make out -- but
18 | most readers will probably want to just skim down to the `Examples` section at
19 | the very end to get the punchline.
20 |
21 | > import Maps
22 | > import Imp
23 | >
24 |
25 | == Internals
26 |
27 | === Lexical Analysis
28 |
29 | > data Chartype = White | Alpha | Digit | Other
30 | >
31 | > classifyChar : (c : Char) -> Chartype
32 | > classifyChar c =
33 | > if isSpace c then
34 | > White
35 | > else if isAlpha c then
36 | > Alpha
37 | > else if isDigit c then
38 | > Digit
39 | > else
40 | > Other
41 | >
42 | > Token : Type
43 | > Token = String
44 | >
45 | > tokenizeHelper : (cls : Chartype) -> (acc, xs : List Char) -> List (List Char)
46 | > tokenizeHelper cls acc xs =
47 | > case xs of
48 | > [] => tk
49 | > (x::xs') =>
50 | > case (cls, classifyChar x, x) of
51 | > (_, _, '(') =>
52 | > tk ++ ['('] :: (tokenizeHelper Other [] xs')
53 | > (_, _, ')') =>
54 | > tk ++ [')'] :: (tokenizeHelper Other [] xs')
55 | > (_, White, _) =>
56 | > tk ++ (tokenizeHelper White [] xs')
57 | > (Alpha, Alpha, x) =>
58 | > tokenizeHelper Alpha (x::acc) xs'
59 | > (Digit, Digit, x) =>
60 | > tokenizeHelper Digit (x::acc) xs'
61 | > (Other, Other, x) =>
62 | > tokenizeHelper Other (x::acc) xs'
63 | > (_, tp, x) =>
64 | > tk ++ (tokenizeHelper tp [x] xs')
65 | > where
66 | > tk : List (List Char)
67 | > tk = case acc of
68 | > [] => []
69 | > (_::_) => [reverse acc]
70 | >
71 | > tokenize : (s : String) -> List String
72 | > tokenize s = map pack (tokenizeHelper White [] (unpack s))
73 | >
74 | > tokenizeEx1 : tokenize "abc12==3 223*(3+(a+c))" = ["abc","12","==","3","223","*","(","3","+","(","a","+","c",")",")"]
75 | > tokenizeEx1 = Refl
76 | >
77 |
78 | === Parsing
79 |
80 | ==== Options With Errors
81 |
82 | An \idr{Option} type with error messages:
83 |
84 | > data OptionE : (x : Type) -> Type where
85 | > SomeE : x -> OptionE x
86 | > NoneE : String -> OptionE x
87 | >
88 |
89 | Some interface instances to make writing nested match-expressions on
90 | \idr{OptionE} more convenient.
91 |
92 | \todo[inline]{Explain these/link to Haskell etc?}
93 |
94 | > Functor OptionE where
95 | > map f (SomeE x) = SomeE (f x)
96 | > map _ (NoneE err) = NoneE err
97 | >
98 | > Applicative OptionE where
99 | > pure = SomeE
100 | > (SomeE f) <*> (SomeE x) = SomeE (f x)
101 | > (SomeE _) <*> (NoneE e) = NoneE e
102 | > (NoneE e) <*> _ = NoneE e
103 | >
104 | > Alternative OptionE where
105 | > empty = NoneE ""
106 | > (SomeE x) <|> _ = SomeE x
107 | > (NoneE _) <|> v = v
108 | >
109 | > Monad OptionE where
110 | > (NoneE e) >>= _ = NoneE e
111 | > (SomeE x) >>= k = k x
112 | >
113 |
114 | ==== Generic Combinators for Building Parsers
115 |
116 | > Parser : (t : Type) -> Type
117 | > Parser t = List Token -> OptionE (t, List Token)
118 | >
119 | > manyHelper : (p : Parser t) -> (acc : List t) -> (steps : Nat) -> Parser (List t)
120 | > manyHelper p acc Z _ = NoneE "Too many recursive calls"
121 | > manyHelper p acc (S steps') xs with (p xs)
122 | > | NoneE _ = SomeE (reverse acc, xs)
123 | > | SomeE (t', xs') = manyHelper p (t'::acc) steps' xs'
124 | >
125 |
126 | A (step-indexed) parser that expects zero or more \idr{p}s:
127 |
128 | > many : (p : Parser t) -> (steps : Nat) -> Parser (List t)
129 | > many p steps = manyHelper p [] steps
130 | >
131 |
132 | A parser that expects a given token, followed by \idr{p}:
133 |
134 | > firstExpect : (a : Token) -> (p : Parser t) -> Parser t
135 | > firstExpect a p (x::xs) = if x == a then p xs else NoneE ("Expected '" ++ a ++ "'")
136 | > firstExpect a _ [] = NoneE ("Expected '" ++ a ++ "'")
137 | >
138 |
139 | A parser that expects a particular token:
140 |
141 | > expect : (t : Token) -> Parser ()
142 | > expect t = firstExpect t (\xs => SomeE ((), xs))
143 | >
144 |
145 | ==== A Recursive-Descent Parser for Imp
146 |
147 | Identifiers:
148 |
149 | > parseIdentifier : Parser Id
150 | > parseIdentifier [] = NoneE "Expected identifier"
151 | > parseIdentifier (x::xs) =
152 | > if all isLower (unpack x)
153 | > then SomeE (MkId x, xs)
154 | > else NoneE ("Illegal identifier:'" ++ x ++ "'")
155 | >
156 |
157 | Numbers:
158 |
159 | > parseNumber : Parser Nat
160 | > parseNumber [] = NoneE "Expected number"
161 | > parseNumber (x::xs) =
162 | > if all isDigit (unpack x)
163 | > then SomeE (foldl (\n, d => 10 * n + (cast (ord d - ord '0'))) 0 (unpack x), xs)
164 | > else NoneE "Expected number"
165 | >
166 |
167 | Parse arithmetic expressions
168 |
169 | > mutual
170 | > parsePrimaryExp : (steps : Nat) -> Parser AExp
171 | > parsePrimaryExp Z _ = NoneE "Too many recursive calls"
172 | > parsePrimaryExp (S steps') xs =
173 | > (do (i, rest) <- parseIdentifier xs
174 | > pure (AId i, rest))
175 | > <|>
176 | > (do (n, rest) <- parseNumber xs
177 | > pure (ANum n, rest))
178 | > <|>
179 | > (do (e, rest) <- firstExpect "(" (parseSumExp steps') xs
180 | > (u, rest') <- expect ")" rest
181 | > pure (e, rest'))
182 | >
183 | > parseProductExp : (steps : Nat) -> Parser AExp
184 | > parseProductExp Z _ = NoneE "Too many recursive calls"
185 | > parseProductExp (S steps') xs =
186 | > do (e, rest) <- parsePrimaryExp steps' xs
187 | > (es, rest') <- many (firstExpect "*" (parsePrimaryExp steps')) steps' rest
188 | > pure (foldl AMult e es, rest')
189 | >
190 | > parseSumExp : (steps : Nat) -> Parser AExp
191 | > parseSumExp Z _ = NoneE "Too many recursive calls"
192 | > parseSumExp (S steps') xs =
193 | > do (e, rest) <- parseProductExp steps' xs
194 | > (es, rest') <- many psum steps' rest
195 | > pure (foldl (\e0, term =>
196 | > case term of
197 | > (True, e) => APlus e0 e
198 | > (False, e) => AMinus e0 e
199 | > ) e es, rest')
200 | > where
201 | > psum : Parser (Bool, AExp)
202 | > psum xs =
203 | > let p = parseProductExp steps' in
204 | > (do (e, r) <- firstExpect "+" p xs
205 | > pure ((True, e), r))
206 | > <|>
207 | > (do (e, r) <- firstExpect "-" p xs
208 | > pure ((False, e), r))
209 | >
210 | > parseAExp : (steps : Nat) -> Parser AExp
211 | > parseAExp = parseSumExp
212 | >
213 |
214 | Parsing boolean expressions:
215 |
216 | > mutual
217 | > parseAtomicExp : (steps : Nat) -> Parser BExp
218 | > parseAtomicExp Z _ = NoneE "Too many recursive calls"
219 | > parseAtomicExp (S steps') xs =
220 | > (do (_, rest) <- expect "true" xs
221 | > pure (BTrue, rest))
222 | > <|>
223 | > (do (_, rest) <- expect "false" xs
224 | > pure (BFalse, rest))
225 | > <|>
226 | > (do (e, rest) <- firstExpect "not" (parseAtomicExp steps') xs
227 | > pure (BNot e, rest))
228 | > <|>
229 | > (do (e, rest) <- firstExpect "(" (parseConjunctionExp steps') xs
230 | > (_, rest') <- expect ")" rest
231 | > pure (e, rest'))
232 | > <|>
233 | > (do (e, rest) <- parseProductExp steps' xs
234 | > ((do (e', rest') <- firstExpect "==" (parseAExp steps') rest
235 | > pure (BEq e e', rest'))
236 | > <|>
237 | > (do (e', rest') <- firstExpect "<=" (parseAExp steps') rest
238 | > pure (BLe e e', rest'))
239 | > <|>
240 | > (NoneE "Expected '==' or '<=' after arithmetic expression")))
241 | >
242 | > parseConjunctionExp : (steps : Nat) -> Parser BExp
243 | > parseConjunctionExp Z _ = NoneE "Too many recursive calls"
244 | > parseConjunctionExp (S steps') xs =
245 | > do (e, rest) <- parseAtomicExp steps' xs
246 | > (es, rest') <- many (firstExpect "&&" (parseAtomicExp steps')) steps' rest
247 | > pure (foldl BAnd e es, rest')
248 | >
249 | > parseBExp : (steps : Nat) -> Parser BExp
250 | > parseBExp = parseConjunctionExp
251 | >
252 | > testParsing : (p : Nat -> Parser t) -> (s : String) -> OptionE (t, List Token)
253 | > testParsing p s = p 100 (tokenize s)
254 | >
255 |
256 | \todo[inline]{The second one seems designed to fail}
257 |
258 | ```idris
259 | λΠ> testParsing parseProductExp "x*y*(x*x)*x"
260 |
261 | λΠ> testParsing parseConjunctionExp "not((x==x||x*x<=(x*x)*x)&&x==x"
262 | ```
263 |
264 | Parsing commands:
265 |
266 | > mutual
267 | > parseSimpleCommand : (steps : Nat) -> Parser Com
268 | > parseSimpleCommand Z _ = NoneE "Too many recursive calls"
269 | > parseSimpleCommand (S steps') xs =
270 | > (do (_, rest) <- expect "SKIP" xs
271 | > pure (SKIP, rest))
272 | > <|>
273 | > (do (e, rest) <- firstExpect "IF" (parseBExp steps') xs
274 | > (c, rest') <- firstExpect "THEN" (parseSequencedCommand steps') rest
275 | > (c', rest'') <- firstExpect "ELSE" (parseSequencedCommand steps') rest'
276 | > (_, rest''') <- expect "END" rest''
277 | > pure (IFB e THEN c ELSE c' FI, rest'''))
278 | > <|>
279 | > (do (e, rest) <- firstExpect "WHILE" (parseBExp steps') xs
280 | > (c, rest') <- firstExpect "DO" (parseSequencedCommand steps') rest
281 | > (_, rest'') <- expect "END" rest'
282 | > pure (WHILE e c, rest''))
283 | > <|>
284 | > (do (i, rest) <- parseIdentifier xs;
285 | > (e, rest') <- firstExpect ":=" (parseAExp steps') rest
286 | > pure (i ::= e, rest'))
287 | >
288 | > parseSequencedCommand : (steps : Nat) -> Parser Com
289 | > parseSequencedCommand Z _ = NoneE "Too many recursive calls"
290 | > parseSequencedCommand (S steps') xs =
291 | > do (c, rest) <- parseSimpleCommand steps' xs
292 | > ((do (c', rest') <- firstExpect ";;" (parseSequencedCommand steps') rest
293 | > pure ((do c; c'), rest'))
294 | > <|>
295 | > (pure (c, rest)))
296 | >
297 | > bignumber : Nat
298 | > bignumber = 1000
299 | >
300 | > parse : (str : String) -> OptionE (Com, List Token)
301 | > parse str = parseSequencedCommand bignumber (tokenize str)
302 | >
303 |
304 | == Examples
305 |
306 | ```idris
307 | λΠ> parse "IF x == y + 1 + 2 - y * 6 + 3 THEN x := x * 1;; y := 0 ELSE SKIP END"
308 | SomeE (CIf (BEq (AId (MkId "x")) (APlus (AMinus (APlus (APlus (AId (MkId "y")) (ANum 1)) (ANum 2)) (AMult (AId (MkId "y")) (ANum 6))) (ANum 3)))
309 | (CSeq (CAss (MkId "x") (AMult (AId (MkId "x")) (ANum 1))) (CAss (MkId "y") (ANum 0)))
310 | CSkip,
311 | []) : OptionE (Com, List String)
312 |
313 | λΠ> parse "SKIP;; z:=x*y*(x*x);; WHILE x==x DO IF z <= z*z && not x == 2 THEN x := z;; y := z ELSE SKIP END;; SKIP END;; x:=z"
314 | ```
315 |
316 | \todo[inline]{This one is repeated twice in the book for some reason}
317 |
318 | ```idris
319 | λΠ> parse "SKIP;; z:=x*y*(x*x);; WHILE x==x DO IF z <= z*z && not x == 2 THEN x := z;; y := z ELSE SKIP END;; SKIP END;; x:=z"
320 | SomeE (CSeq CSkip
321 | (CSeq (CAss (MkId "z") (AMult (AMult (AId (MkId "x")) (AId (MkId "y"))) (AMult (AId (MkId "x")) (AId (MkId "x")))))
322 | (CSeq (CWhile (BEq (AId (MkId "x")) (AId (MkId "x")))
323 | (CSeq (CIf (BAnd (BLe (AId (MkId "z")) (AMult (AId (MkId "z")) (AId (MkId "z")))) (BNot (BEq (AId (MkId "x")) (ANum 2))))
324 | (CSeq (CAss (MkId "x") (AId (MkId "z"))) (CAss (MkId "y") (AId (MkId "z"))))
325 | CSkip)
326 | CSkip))
327 | (CAss (MkId "x") (AId (MkId "z"))))),
328 | []) : OptionE (Com, List String)
329 | ```
--------------------------------------------------------------------------------
/src/IndPrinciples.lidr:
--------------------------------------------------------------------------------
1 | = IndPrinciples : Induction Principles
2 |
3 | > module IndPrinciples
4 | >
5 | > import IndProp
6 | >
7 | > %access public export
8 | > %default total
9 | >
10 |
11 | With the Curry-Howard correspondence and its realization in Idris in mind, we
12 | can now take a deeper look at induction principles.
13 |
14 | \ \todo[inline]{We've written all induction principles by hand throughout the
15 | chapter, Idris doesn't generate them. Edit the text to reflect this}
16 |
17 |
18 | == Basics
19 |
20 | Every time we declare a new \idr{data} type, Idris automatically generates an
21 | _induction principle_ for this type. This induction principle is a theorem like
22 | any other: If \idr{t} is defined inductively, the corresponding induction
23 | principle is called \idr{t_ind}. Here is the one for natural numbers:
24 |
25 | > nat_ind : {P : Nat -> Type} -> P Z -> ((n : Nat) -> P n -> P (S n)) ->
26 | > ((n : Nat) -> P n)
27 | > nat_ind pz _ Z = pz
28 | > nat_ind pz f (S k) = f k (nat_ind pz f k)
29 |
30 | \todo[inline]{Mention it's no coincidence it's similar to fold}
31 |
32 | The `induction` tactic is a straightforward wrapper that, at its core, simply
33 | performs `apply t_ind`. To see this more clearly, let's experiment with directly
34 | using `apply nat_ind`, instead of the `induction` tactic, to carry out some
35 | proofs. Here, for example, is an alternate proof of a theorem that we saw in the
36 | `Induction` chapter.
37 |
38 | > mult_0_r' : (n : Nat) -> n * Z = Z
39 | > mult_0_r' = nat_ind {P=\x => x*Z = Z}
40 | > Refl -- n = Z
41 | > (\k => id) -- n = S k
42 |
43 | This proof is basically the same as the earlier one, but a few minor differences
44 | are worth noting.
45 |
46 | First, in the induction step of the proof (the "\idr{S}" case), we have to do a
47 | little bookkeeping manually (the `intros`) that `induction` does automatically.
48 |
49 | Second, we do not introduce \idr{n} into the context before applying
50 | \idr{nat_ind} — the conclusion of \idr{nat_ind} is a quantified formula, and
51 | `apply` needs this conclusion to exactly match the shape of the goal state,
52 | including the quantifier. By contrast, the `induction` tactic works either with
53 | a variable in the context or a quantified variable in the goal.
54 |
55 | These conveniences make `induction` nicer to use in practice than applying
56 | induction principles like \idr{nat_ind} directly. But it is important to realize
57 | that, modulo these bits of bookkeeping, applying \idr{nat_ind} is what we are
58 | really doing.
59 |
60 |
61 | ==== Exercise: 2 stars, optional (plus_one_r')
62 |
63 | Complete this proof without using the `induction` tactic.
64 |
65 | > plus_one_r' : (n : Nat) -> n + 1 = S n
66 | > plus_one_r' n = ?plus_one_r__rhs
67 |
68 |
69 | $\square$
70 |
71 | Idris generates induction principles for every datatype defined with \idr{data},
72 | including those that aren't recursive. Although of course we don't need
73 | induction to prove properties of non-recursive datatypes, the idea of an
74 | induction principle still makes sense for them: it gives a way to prove that a
75 | property holds for all values of the type.
76 |
77 | These generated principles follow a similar pattern. If we define a type \idr{t}
78 | with constructors \idr{c1} ... \idr{cn}, Idris generates a theorem with this
79 | shape:
80 |
81 | ```idris
82 | t_ind : {P : t -> Type},
83 | ... case for c1 ... ->
84 | ... case for c2 ... -> ...
85 | ... case for cn ... ->
86 | (n : t) -> P n
87 | ```
88 |
89 | The specific shape of each case depends on the arguments to the corresponding
90 | constructor. Before trying to write down a general rule, let's look at some more
91 | examples. First, an example where the constructors take no arguments:
92 |
93 | > data YesNo = Yes' | No'
94 |
95 | > yesno_ind : {P : YesNo -> Type} -> P Yes' -> P No' ->
96 | > ((y : YesNo) -> P y)
97 | > yesno_ind px _ Yes' = px
98 | > yesno_ind _ py No' = py
99 |
100 |
101 | ==== Exercise: 1 star, optional (rgb)
102 |
103 | Write out the induction principle that Idris will generate for the following
104 | datatype. Write down your answer on paper or type it into a comment, and then
105 | compare it with what Idris prints.
106 |
107 | > data RGB = Red | Green | Blue
108 |
109 | > rgb_ind : ?rgb_ind
110 |
111 | $\square$
112 |
113 | Here's another example, this time with one of the constructors taking some
114 | arguments.
115 |
116 | > data NatList : Type where
117 | > NNil : NatList
118 | > NCons : Nat -> NatList -> NatList
119 |
120 | > natlist_ind : {P : NatList -> Type} -> P NNil ->
121 | > ((n : Nat) -> (l : NatList) -> P l -> P (NCons n l)) ->
122 | > ((l : NatList) -> P l)
123 | > natlist_ind pn _ NNil = pn
124 | > natlist_ind pn f (NCons k x) = f k x (natlist_ind pn f x)
125 |
126 |
127 | ==== Exercise: 1 star, optional (natlist1)
128 |
129 | Suppose we had written the above definition a little differently:
130 |
131 | > data NatList1 : Type where
132 | > NNil1 : NatList1
133 | > NCons1 : NatList1 -> Nat -> NatList1
134 |
135 | Now what will the induction principle look like? $\square$
136 |
137 | From these examples, we can extract this general rule:
138 |
139 | - The type declaration gives several constructors; each corresponds to one
140 | clause of the induction principle.
141 |
142 | - Each constructor \idr{c} takes argument types \idr{a1} ... \idr{an}.
143 |
144 | - Each \idr{ai} can be either \idr{t} (the datatype we are defining) or some
145 | other type \idr{s}.
146 |
147 | - The corresponding case of the induction principle says:
148 |
149 | - "For all values \idr{x1}...\idr{xn} of types \idr{a1}...\idr{an}, if
150 | \idr{P} holds for each of the inductive arguments (each \idr{xi} of type
151 | \idr{t}), then \idr{P} holds for \idr{c x1 ... xn}".
152 |
153 |
154 | ==== Exercise: 1 star, optional (byntree_ind)
155 |
156 | Write out the induction principle that Idris will generate for the following
157 | datatype. (Again, write down your answer on paper or type it into a comment, and
158 | then compare it with what Idris prints.)
159 |
160 | > data Byntree : Type where
161 | > Bempty : Byntree
162 | > Bleaf : YesNo -> Byntree
163 | > Nbranch : YesNo -> Byntree -> Byntree -> Byntree
164 |
165 | $\square$
166 |
167 |
168 | ==== Exercise: 1 star, optional (ex_set)
169 |
170 | Here is an induction principle for an inductively defined set.
171 |
172 | ```idris
173 | ExSet_ind : {P : ExSet -> Type} -> ((b : Bool) -> P (Con1 b)) ->
174 | ((n : Nat) -> (e : ExSet) -> P e -> P (Con2 n e)) ->
175 | ((e : ExSet) -> P e)
176 | ```
177 |
178 | Give an \idr{data} definition of \idr{ExSet}:
179 |
180 | > -- data ExSet : Type where
181 | > -- FILL IN HERE
182 |
183 | $\square$
184 |
185 |
186 | == Polymorphism
187 |
188 | Next, what about polymorphic datatypes?
189 |
190 | The inductive definition of polymorphic lists
191 |
192 | ```idris
193 | data List : (x : Type) -> Type where
194 | Nil : List x
195 | Cons : x -> List x -> List x
196 | ```
197 |
198 | is very similar to that of \idr{NatList}. The main difference is that, here, the
199 | whole definition is _parameterized_ on a set \idr{x}: that is, we are defining a
200 | family of inductive types \idr{List x}, one for each \idr{x}. (Note that,
201 | wherever \idr{List} appears in the body of the declaration, it is always applied
202 | to the parameter \idr{x}.) The induction principle is likewise parameterized on
203 | \idr{x}:
204 |
205 | > list_ind : {x : Type} -> {P : List x -> Type} -> P [] ->
206 | > ((a : x) -> (l : List x) -> P l -> P (a :: l)) ->
207 | > ((l : List x) -> P l)
208 | > list_ind pn _ [] = pn
209 | > list_ind pn f (h::t) = f h t (list_ind pn f t)
210 |
211 | Note that the whole induction principle is parameterized on \idr{x}. That is,
212 | \idr{list_ind} can be thought of as a polymorphic function that, when applied to
213 | a type \idr{x}, gives us back an induction principle specialized to the type
214 | \idr{List x}.
215 |
216 |
217 | ==== Exercise: 1 star, optional (tree)
218 |
219 | Write out the induction principle that Idris will generate for the following
220 | datatype. Compare your answer with what Idris prints.
221 |
222 | > data Tree : (x : Type) -> Type where
223 | > Leaf : x -> Tree x
224 | > Node : Tree x -> Tree x -> Tree x
225 |
226 | > tree_ind : ?tree_ind
227 |
228 | $\square$
229 |
230 |
231 | ==== Exercise: 1 star, optional (mytype)
232 |
233 | Find an inductive definition that gives rise to the following induction
234 | principle:
235 |
236 | ```idris
237 | mytype_ind : {x : Type} -> {P : MyType x -> Type} ->
238 | ((a : x) -> P (Constr1 a)) ->
239 | ((n : Nat) -> P (Constr2 n)) ->
240 | ((m : MyType x) -> P m -> (n : Nat) -> P (Constr3 m n)) ->
241 | ((m : MyType x) -> P m)
242 | ```
243 |
244 | $\square$
245 |
246 |
247 | ==== Exercise: 1 star, optional (foo)
248 |
249 | Find an inductive definition that gives rise to the following induction
250 | principle:
251 |
252 | ```idris
253 | foo_ind : {x, y : Type} -> {P : Foo x y -> Type} ->
254 | ((a : x) -> P (Bar a)) ->
255 | ((b : y) -> P (Baz b)) ->
256 | ((f1 : Nat -> Foo x y) -> ((n : Nat) -> P (f1 n)) -> P (Quux f1)) ->
257 | ((f2 : Foo x y) -> P f2)
258 | ```
259 |
260 | $\square$
261 |
262 |
263 | ==== Exercise: 1 star, optional (foo')
264 |
265 | Consider the following inductive definition:
266 |
267 | > data Foo' : (x : Type) -> Type where
268 | > C1 : List x -> Foo' x -> Foo' x
269 | > C2 : Foo' x
270 |
271 | What induction principle will Idris generate for \idr{Foo'}? Fill in the blanks,
272 | then check your answer with Idris.)
273 |
274 | ```idris
275 | foo'_ind : {x : Type} -> {P : Foo' x -> Type} ->
276 | ((l : List x) -> (f : Foo' x) -> ?hole1 -> ?hole2) ->
277 | ?hole3 ->
278 | (f : Foo' x -> ?hole4)
279 | ```
280 |
281 | $\square$
282 |
283 |
284 | == Induction Hypotheses
285 |
286 | Where does the phrase "induction hypothesis" fit into this story?
287 |
288 | The induction principle for numbers
289 |
290 | ```idris
291 | nat_ind : {P : Nat -> Type} -> P Z -> ((n : Nat) -> P n -> P (S n)) ->
292 | ((n : Nat) -> P n)
293 | ``
294 |
295 | is a generic statement that holds for all propositions \idr{P} (or rather,
296 | strictly speaking, for all families of propositions \idr{P} indexed by a number
297 | \idr{n}). Each time we use this principle, we are choosing \idr{P} to be a
298 | particular expression of type \idr{Nat -> Type}.
299 |
300 | We can make proofs by induction more explicit by giving this expression a name.
301 | For example, instead of stating the theorem \idr{mult_0_r} as
302 | "\idr{(n : Nat) -> n * 0 = 0}," we can write it as "\idr{(n : Nat) -> P_m0r n}",
303 | where \idr{P_m0r} is defined as...
304 |
305 | > P_m0r : (n : Nat) -> Type
306 | > P_m0r n = n * Z = Z
307 |
308 | ... or equivalently:
309 |
310 | > P_m0r' : Nat -> Type
311 | > P_m0r' = \n => n * Z = Z
312 |
313 | Now it is easier to see where \idr{P_m0r} appears in the proof.
314 |
315 | > mult_0_r'' : (n: Nat) -> P_m0r n
316 | > mult_0_r'' = nat_ind {P=P_m0r}
317 | > Refl -- n = Z
318 | > (\n => id) -- n = S k
319 |
320 | This extra naming step isn't something that we do in normal proofs, but it is
321 | useful to do it explicitly for an example or two, because it allows us to see
322 | exactly what the induction hypothesis is. If we prove \idr{(n : Nat) -> P_m0r n}
323 | by induction on \idr{n} (using either `induction` or `apply nat_ind`), we see
324 | that the first subgoal requires us to prove \idr{P_m0r 0} ("\idr{P} holds for
325 | zero"), while the second subgoal requires us to prove
326 | \idr{(n' : Nat) -> P_m0r n' -> P_m0r (S n')} (that is "\idr{P} holds of
327 | \idr{S n'} if it holds of \idr{n'}" or, more elegantly, "\idr{P} is preserved by
328 | \idr{S}"). The _induction hypothesis_ is the premise of this latter implication
329 | — the assumption that \idr{P} holds of \idr{n'}, which we are allowed to use in
330 | proving that \idr{P} holds for \idr{S n'}.
331 |
332 |
333 | == More on the `induction` Tactic
334 |
335 | The `induction` tactic actually does even more low-level bookkeeping for us than
336 | we discussed above.
337 |
338 | Recall the informal statement of the induction principle for natural numbers:
339 |
340 | - If \idr{P n} is some proposition involving a natural number \idr{n}, and we
341 | want to show that \idr{P} holds for all numbers \idr{n}, we can reason like
342 | this:
343 |
344 | - show that \idr{P Z} holds
345 |
346 | - show that, if \idr{P n'} holds, then so does \idr{P (S n')}
347 |
348 | - conclude that \idr{P n} holds for all \idr{n}.
349 |
350 | So, when we begin a proof with `intros n` and then `induction n`, we are first
351 | telling Idris to consider a _particular_ `n` (by introducing it into the
352 | context) and then telling it to prove something about _all_ numbers (by using
353 | induction).
354 |
355 | What Idris actually does in this situation, internally, is to "re-generalize"
356 | the variable we perform induction on. For example, in our original proof that
357 | \idr{plus} is associative...
358 |
359 | ```coq
360 | Theorem plus_assoc' : forall n m p : Nat,
361 | n + (m + p) = (n + m) + p.
362 | Proof.
363 | (* ...we first introduce all 3 variables into the context,
364 | which amounts to saying "Consider an arbitrary n, m, and
365 | p..." *)
366 | intros n m p.
367 | (* ...We now use the induction tactic to prove P n (that
368 | is, n + (m + p) = (n + m) + p) for _all_ n,
369 | and hence also for the particular n that is in the context
370 | at the moment. *)
371 | induction n as [| n'].
372 | - (* n = O *) Refl.
373 | - (* n = S n' *)
374 | (* In the second subgoal generated by induction -- the
375 | "inductive step" -- we must prove that P n' implies
376 | P (S n') for all n'. The induction tactic
377 | automatically introduces n' and P n' into the context
378 | for us, leaving just P (S n') as the goal. *)
379 | simpl. rewrite -> IHn'. Refl. Qed.
380 | ```
381 |
382 | It also works to apply `induction` to a variable that is quantified in the goal.
383 |
384 | ```coq
385 | Theorem plus_comm' : forall n m : Nat,
386 | n + m = m + n.
387 | Proof.
388 | induction n as [| n'].
389 | - (* n = O *) intros m. rewrite <- plus_n_O. Refl.
390 | - (* n = S n' *) intros m. simpl. rewrite -> IHn'.
391 | rewrite <- plus_n_Sm. Refl. Qed.
392 | ```
393 |
394 | Note that `induction n` leaves `m` still bound in the goal — i.e., what we are
395 | proving inductively is a statement beginning with `forall m`.
396 |
397 | If we do `induction` on a variable that is quantified in the goal _after_ some
398 | other quantifiers, the `induction` tactic will automatically introduce the
399 | variables bound by these quantifiers into the context.
400 |
401 | ```coq
402 | Theorem plus_comm'' : forall n m : Nat,
403 | n + m = m + n.
404 | Proof.
405 | (* Let's do induction on m this time, instead of n... *)
406 | induction m as [| m'].
407 | - (* m = O *) simpl. rewrite <- plus_n_O. Refl.
408 | - (* m = S m' *) simpl. rewrite <- IHm'.
409 | rewrite <- plus_n_Sm. Refl. Qed.
410 | ```
411 |
412 |
413 | ==== Exercise: 1 star, optional (plus_explicit_prop)
414 |
415 | Rewrite both \idr{plus_assoc'} and \idr{plus_comm'} and their proofs in the same
416 | style as \idr{mult_0_r''} above — that is, for each theorem, give an explicit
417 | definition of the proposition being proved by induction, and state the theorem
418 | and proof in terms of this defined proposition.
419 |
420 | > -- FILL IN HERE
421 |
422 | $\square$
423 |
424 |
425 | == Induction Principles in \idr{Type}
426 |
427 | Earlier, we looked in detail at the induction principles that Idris generates
428 | for inductively defined _sets_. The induction principles for inductively defined
429 | _propositions_ like \idr{Ev} are a tiny bit more complicated. As with all
430 | induction principles, we want to use the induction principle on \idr{Ev} to
431 | prove things by inductively considering the possible shapes that something in
432 | \idr{Ev} can have. Intuitively speaking, however, what we want to prove are not
433 | statements about evidence but statements about _numbers_: accordingly, we want
434 | an induction principle that lets us prove properties of numbers by induction on
435 | evidence.
436 |
437 | For example, from what we've said so far, you might expect the inductive
438 | definition of \idr{Ev}...
439 |
440 | ```idris
441 | data Ev : Nat -> Type where
442 | Ev_0 : Ev Z
443 | Ev_SS : Ev n -> Ev (S (S n))
444 | ```
445 |
446 | ...to give rise to an induction principle that looks like this...
447 |
448 | > ev_ind_max : {P : {n : Nat} -> Ev n -> Type} ->
449 | > P {n=Z} Ev_0 ->
450 | > ((m : Nat) -> (e : Ev m) -> P {n=m} e -> P {n=S (S m)} (Ev_SS e)) ->
451 | > ((n : Nat) -> (e : Ev n) -> P {n} e)
452 |
453 | ... because:
454 |
455 | - Since \idr{Ev} is indexed by a number \idr{n} (every \idr{Ev} object \idr{e}
456 | is a piece of evidence that some particular number \idr{n} is even), the
457 | proposition \idr{P} is parameterized by both \idr{n} and \idr{e} — that is,
458 | the induction principle can be used to prove assertions involving both an
459 | even number and the evidence that it is even.
460 |
461 | - Since there are two ways of giving evidence of evenness (\idr{Ev} has two
462 | constructors), applying the induction principle generates two subgoals:
463 |
464 | - We must prove that \idr{P} holds for \idr{Z} and \idr{Ev_0}.
465 |
466 | - We must prove that, whenever \idr{n} is an even number and \idr{e} is an
467 | evidence of its evenness, if \idr{P} holds of \idr{n} and \idr{e}, then it
468 | also holds of \idr{S (S n)} and \idr{Ev_SS {n=S (S n)} e}.
469 |
470 | - If these subgoals can be proved, then the induction principle tells us that
471 | \idr{P} is true for all even numbers \idr{n} and evidence \idr{e} of their
472 | evenness.
473 |
474 | This is more flexibility than we normally need or want: it is giving us a way to
475 | prove logical assertions where the assertion involves properties of some piece
476 | of _evidence_ of evenness, while all we really care about is proving properties
477 | of _numbers_ that are even — we are interested in assertions about numbers, not
478 | about evidence. It would therefore be more convenient to have an induction
479 | principle for proving propositions \idr{P} that are parameterized just by
480 | \idr{n} and whose conclusion establishes \idr{P} for all even numbers \idr{n}:
481 |
482 | ```idris
483 | {P : Nat -> Type} ->
484 | ... ->
485 | (n : Nat) ->
486 | Ev n -> P n
487 | ```
488 |
489 | For this reason, Idris actually generates the following simplified induction
490 | principle for \idr{Ev}:
491 |
492 | > ev_ind : {P : Nat -> Type} -> P Z ->
493 | > ((n : Nat) -> Ev n -> P n -> P (S (S n))) ->
494 | > ((n : Nat) -> Ev n -> P n)
495 | > ev_ind pz _ Z Ev_0 = pz
496 | > ev_ind pz f (S (S k)) (Ev_SS ev) = f k ev (ev_ind pz f k ev)
497 |
498 | In particular, Idris has dropped the evidence term \idr{e} as a parameter of the
499 | the proposition \idr{P}.
500 |
501 | In English, \idr{ev_ind} says:
502 |
503 | - Suppose, \idr{P} is a property of natural numbers (that is, \idr{P n} is a
504 | \idr{Type} for every \idr{n}). To show that \idr{P n} holds whenever \idr{n}
505 | is even, it suffices to show:
506 |
507 | - \idr{P} holds for \idr{Z},
508 |
509 | - for any \idr{n}, if \idr{n} is even and \idr{P} holds for \idr{n}, then
510 | \idr{P} holds for \idr{S (S n)}.
511 |
512 | As expected, we can apply \idr{ev_ind} directly instead of using `induction`. For
513 | example, we can use it to show that \idr{Ev'} (the slightly awkward alternate
514 | definition of evenness that we saw in an exercise in the `IndProp` chapter)
515 | is equivalent to the cleaner inductive definition \idr{Ev}:
516 |
517 | > ev_ev' : Ev n -> Ev' n
518 | > ev_ev' {n} = ev_ind {P=Ev'}
519 | > Ev'_0
520 | > (\_, _, ev' => Ev'_sum Ev'_2 ev')
521 | > n
522 |
523 | The precise form of a \idr{data} definition can affect the induction principle
524 | Idris generates.
525 |
526 | For example, in chapter `IndProp`, we defined \idr{Le} as:
527 |
528 | ```idris
529 | data Le : Nat -> Nat -> Type where
530 | Le_n : Le n n
531 | Le_S : Le n m -> Le n (S m)
532 | ```
533 |
534 | This definition can be streamlined a little by observing that the left-hand
535 | argument \idr{n} is the same everywhere in the definition, so we can actually
536 | make it a "general parameter" to the whole definition, rather than an argument
537 | to each constructor.
538 |
539 | \todo[inline]{This doesn't actually seem to change anything in our case}
540 |
541 | ```idris
542 | data Le : (n:Nat) -> Nat -> Type where
543 | Le_n : Le n n
544 | Le_S : Le n m -> Le n (S m)
545 | ```
546 |
547 | The second one is better, even though it looks less symmetric. Why? Because it
548 | gives us a simpler induction principle.
549 |
550 | > le_ind : {n : Nat} -> {P : Nat -> Type} -> P n ->
551 | > ((m : Nat) -> (n <=' m) -> P m -> P (S m)) ->
552 | > ((n0 : Nat) -> (n <=' n0) -> P n0)
553 | > le_ind {n=n0} pn _ n0 Le_n = pn
554 | > le_ind pn f (S m) (Le_S le) = f m le (le_ind pn f m le)
555 |
556 |
557 | == Formal vs. Informal Proofs by Induction
558 |
559 | Question: What is the relation between a formal proof of a proposition \idr{P}
560 | and an informal proof of the same proposition \idr{P}?
561 |
562 | Answer: The latter should _teach_ the reader how to produce the former.
563 |
564 | Question: How much detail is needed??
565 |
566 | Unfortunately, there is no single right answer; rather, there is a range of
567 | choices.
568 |
569 | At one end of the spectrum, we can essentially give the reader the whole formal
570 | proof (i.e., the "informal" proof will amount to just transcribing the formal
571 | one into words). This may give the reader the ability to reproduce the formal
572 | one for themselves, but it probably doesn't _teach_ them anything much.
573 |
574 | At the other end of the spectrum, we can say "The theorem is true and you can
575 | figure out why for yourself if you think about it hard enough." This is also not
576 | a good teaching strategy, because often writing the proof requires one or more
577 | significant insights into the thing we're proving, and most readers will give up
578 | before they rediscover all the same insights as we did.
579 |
580 | In the middle is the golden mean — a proof that includes all of the essential
581 | insights (saving the reader the hard work that we went through to find the proof
582 | in the first place) plus high-level suggestions for the more routine parts to
583 | save the reader from spending too much time reconstructing these (e.g., what the
584 | \idr{ih} says and what must be shown in each case of an inductive proof), but
585 | not so much detail that the main ideas are obscured.
586 |
587 | Since we've spent much of this chapter looking "under the hood" at formal proofs
588 | by induction, now is a good moment to talk a little about _informal_ proofs by
589 | induction.
590 |
591 | \todo[inline]{Do we need 2 templates here?}
592 |
593 | In the real world of mathematical communication, written proofs range from
594 | extremely longwinded and pedantic to extremely brief and telegraphic. Although
595 | the ideal is somewhere in between, while one is getting used to the style it is
596 | better to start out at the pedantic end. Also, during the learning phase, it is
597 | probably helpful to have a clear standard to compare against. With this in mind,
598 | we offer two templates — one for proofs by induction over _data_ (i.e., where
599 | the thing we're doing induction on lives in \idr{Type}) and one for proofs by
600 | induction over _evidence_ (i.e., where the inductively defined thing lives in
601 | \idr{Type}).
602 |
603 |
604 | === Induction Over an Inductively Defined Set
605 |
606 | _Template_:
607 |
608 | - _Theorem_:
610 |
611 | _Proof_: By induction on \idr{n}.
612 |
613 |
614 |
615 | - Suppose \idr{n = c a1 ... ak}, where <...and here we state the \idr{IH}
616 | for each of the \idr{a}'s that has type \idr{s}, if any>. We must show
617 | <...and here we restate \idr{P(c a1 ... ak)}>.
618 |
619 |
620 |
621 | - $\square$
622 |
623 | _Example_:
624 |
625 | - _Theorem_: For all sets \idr{x}, lists \idr{l : List x}, and numbers
626 | \idr{n}, if \idr{length l = n} then \idr{index (S n) l = None}.
627 |
628 | _Proof_: By induction on \idr{l}.
629 |
630 | - Suppose \idr{l = []}. We must show, for all numbers \idr{n}, that, if
631 | \idr{length [] = n}, then \idr{index (S n) [] = None}.
632 |
633 | This follows immediately from the definition of \idr{index}.
634 |
635 | - Suppose \idr{l = x :: l'} for some \idr{x} and \idr{l'}, where
636 | \idr{length l' = n'} implies \idr{index (S n') l' = None}, for any number
637 | \idr{n'}. We must show, for all \idr{n}, that, if \idr{length (x::l') = n}
638 | then \idr{index (S n) (x::l') = None}.
639 |
640 | Let \idr{n} be a number with \idr{length l = n}. Since
641 | \idr{length l = length (x::l') = S (length l')},
642 | it suffices to show that
643 | \idr{index (S (length l')) l' = None}.
644 | But this follows directly from the induction hypothesis, picking \idr{n'}
645 | to be \idr{length l'}. $\square$
646 |
647 |
648 | === Induction Over an Inductively Defined Proposition
649 |
650 | Since inductively defined proof objects are often called "derivation trees,"
651 | this form of proof is also known as _induction on derivations_.
652 |
653 | _Template_:
654 |
655 | - _Theorem_: P}," where \idr{Q} is some
656 | inductively defined proposition (more generally, "For all \idr{x} \idr{y}
657 | \idr{z}, \idr{Q x y z -> P x y z}")>
658 |
659 | _Proof_: By induction on a derivation of \idr{Q}.
663 |
664 |
665 |
666 | - Suppose the final rule used to show \idr{Q} is \idr{c}. Then <...and
667 | here we state the types of all of the \idr{a}'s together with any
668 | equalities that follow from the definition of the constructor and the IH
669 | for each of the \idr{a}'s that has type \idr{Q}, if there are any>. We
670 | must show <...and here we restate \idr{P}>.
671 |
672 |
673 |
674 | $\square$
675 |
676 | _Example_
677 |
678 | - _Theorem_: The \idr{<=} relation is transitive — i.e., for all numbers
679 | \idr{n}, \idr{m}, and \idr{o}, if \idr{n <= m} and \idr{m <= o}, then
680 | \idr{n <= o}.
681 |
682 | _Proof_: By induction on a derivation of \idr{m <= o}.
683 |
684 | - Suppose the final rule used to show \idr{m <= o} is \idr{Le_n}. Then
685 | \idr{m = o} and we must show that \idr{n <= m}, which is immediate by
686 | hypothesis.
687 |
688 | - Suppose the final rule used to show \idr{m <= o} is \idr{Le_S}. Then
689 | \idr{o = S o'} for some \idr{o'} with \idr{m <= o'}. We must show that
690 | \idr{n <= S o'}. By induction hypothesis, \idr{n <= o'}.
691 |
692 | But then, by \idr{Le_S}, \idr{n <= S o'}. $\square$
693 |
--------------------------------------------------------------------------------
/src/Induction.lidr:
--------------------------------------------------------------------------------
1 | = Induction : Proof by Induction
2 |
3 | > module Induction
4 |
5 | First, we import all of our definitions from the previous chapter.
6 |
7 | > import Basics
8 |
9 | Next, we import the following \idr{Prelude} modules, since we'll be dealing with
10 | natural numbers.
11 |
12 | > import Prelude.Interfaces
13 | > import Prelude.Nat
14 |
15 | For \idr{import Basics} to work, you first need to use `idris` to compile
16 | `Basics.lidr` into `Basics.ibc`. This is like making a .class file from a .java
17 | file, or a .o file from a .c file. There are at least two ways to do it:
18 |
19 | - In your editor with an Idris plugin, e.g. [Emacs][idris-mode]:
20 |
21 | Open `Basics.lidr`. Evaluate \idr{idris-load-file}.
22 |
23 | There exists similar support for [Vim][idris-vim], [Sublime
24 | Text][idris-sublime] and [Visual Studio Code][idris-vscode] as well.
25 |
26 | [idris-mode]: https://github.com/idris-hackers/idris-mode
27 | [idris-vim]: https://github.com/idris-hackers/idris-vim
28 | [idris-sublime]: https://github.com/idris-hackers/idris-sublime
29 | [idris-vscode]: https://github.com/zjhmale/vscode-idris
30 |
31 | - From the command line:
32 |
33 | Run \mintinline[]{sh}{idris --check --total --noprelude src/Basics.lidr}.
34 |
35 | Refer to the Idris man page (or \mintinline[]{sh}{idris --help} for
36 | descriptions of the flags.
37 |
38 | > %access public export
39 |
40 | > %default total
41 |
42 |
43 | == Proof by Induction
44 |
45 | We proved in the last chapter that \idr{0} is a neutral element for \idr{+} on
46 | the left using an easy argument based on simplification. The fact that it is
47 | also a neutral element on the _right_...
48 |
49 | ```coq
50 | Theorem plus_n_O_firsttry : forall n:nat,
51 | n = n + 0.
52 | ```
53 |
54 | ... cannot be proved in the same simple way in Coq, but as we saw in `Basics`,
55 | Idris's \idr{Refl} just works.
56 |
57 | To prove interesting facts about numbers, lists, and other inductively defined
58 | sets, we usually need a more powerful reasoning principle: _induction_.
59 |
60 | Recall (from high school, a discrete math course, etc.) the principle of
61 | induction over natural numbers: If \idr{p n} is some proposition involving a
62 | natural number \idr{n} and we want to show that \idr{p} holds for _all_ numbers
63 | \idr{n}, we can reason like this:
64 |
65 | - show that \idr{p Z} holds;
66 | - show that, for any \idr{k}, if \idr{p k} holds, then so does \idr{p (S k)};
67 | - conclude that \idr{p n} holds for all \idr{n}.
68 |
69 | In Idris, the steps are the same and can often be written as function clauses by
70 | case splitting. Here's how this works for the theorem at hand.
71 |
72 | > plus_n_Z : (n : Nat) -> n = n + 0
73 | > plus_n_Z Z = Refl
74 | > plus_n_Z (S k) =
75 | > let inductiveHypothesis = plus_n_Z k in
76 | > rewrite inductiveHypothesis in Refl
77 |
78 | In the first clause, \idr{n} is replaced by \idr{Z} and the goal becomes \idr{0
79 | = 0}, which follows by \idr{Refl}exivity. In the second, \idr{n} is replaced by
80 | \idr{S k} and the goal becomes \idr{S k = S (plus k 0)}. Then we define the
81 | inductive hypothesis, \idr{k = k + 0}, which can be written as \idr{plus_n_Z k},
82 | and the goal follows from it.
83 |
84 | > minus_diag : (n : Nat) -> minus n n = 0
85 | > minus_diag Z = Refl
86 | > minus_diag (S k) = minus_diag k
87 |
88 |
89 | ==== Exercise: 2 stars, recommended (basic_induction)
90 |
91 | Prove the following using induction. You might need previously proven results.
92 |
93 | > mult_0_r : (n : Nat) -> n * 0 = 0
94 | > mult_0_r n = ?mult_0_r_rhs
95 |
96 | > plus_n_Sm : (n, m : Nat) -> S (n + m) = n + (S m)
97 | > plus_n_Sm n m = ?plus_n_Sm_rhs
98 |
99 | > plus_comm : (n, m : Nat) -> n + m = m + n
100 | > plus_comm n m = ?plus_comm_rhs
101 |
102 | > plus_assoc : (n, m, p : Nat) -> n + (m + p) = (n + m) + p
103 | > plus_assoc n m p = ?plus_assoc_rhs
104 |
105 | $\square$
106 |
107 |
108 | ==== Exercise: 2 stars (double_plus)
109 |
110 | Consider the following function, which doubles its argument:
111 |
112 | > double : (n : Nat) -> Nat
113 | > double Z = Z
114 | > double (S k) = S (S (double k))
115 |
116 | Use induction to prove this simple fact about \idr{double}:
117 |
118 | > double_plus : (n : Nat) -> double n = n + n
119 | > double_plus n = ?double_plus_rhs
120 |
121 | $\square$
122 |
123 |
124 | ==== Exercise: 2 stars, optional (evenb_S)
125 |
126 | One inconvenient aspect of our definition of \idr{evenb n} is that it may need
127 | to perform a recursive call on \idr{n - 2}. This makes proofs about \idr{evenb
128 | n} harder when done by induction on \idr{n}, since we may need an induction
129 | hypothesis about \idr{n - 2}. The following lemma gives a better
130 | characterization of \idr{evenb (S n)}:
131 |
132 | > evenb_S : (n : Nat) -> evenb (S n) = not (evenb n)
133 | > evenb_S n = ?evenb_S_rhs
134 |
135 | $\square$
136 |
137 |
138 | == Proofs Within Proofs
139 |
140 | \ \todo[inline]{Edit the section}
141 |
142 | In Coq, as in informal mathematics, large proofs are often broken into a
143 | sequence of theorems, with later proofs referring to earlier theorems. But
144 | sometimes a proof will require some miscellaneous fact that is too trivial and
145 | of too little general interest to bother giving it its own top-level name. In
146 | such cases, it is convenient to be able to simply state and prove the needed
147 | "sub-theorem" right at the point where it is used. The `assert` tactic allows us
148 | to do this. For example, our earlier proof of the \idr{mult_0_plus} theorem
149 | referred to a previous theorem named \idr{plus_Z_n}. We could instead use
150 | `assert` to state and prove \idr{plus_Z_n} in-line:
151 |
152 | > mult_0_plus' : (n, m : Nat) -> (0 + n) * m = n * m
153 | > mult_0_plus' n m = Refl
154 |
155 | The `assert` tactic introduces two sub-goals. The first is the assertion itself;
156 | by prefixing it with `H:` we name the assertion `H`. (We can also name the
157 | assertion with `as` just as we did above with `destruct` and `induction`, i.e.,
158 | `assert (0 + n = n) as H`.) Note that we surround the proof of this assertion
159 | with curly braces `{ ... }`, both for readability and so that, when using Coq
160 | interactively, we can see more easily when we have finished this sub-proof. The
161 | second goal is the same as the one at the point where we invoke `assert` except
162 | that, in the context, we now have the assumption `H` that `0 + n = n`. That is,
163 | `assert` generates one subgoal where we must prove the asserted fact and a
164 | second subgoal where we can use the asserted fact to make progress on whatever
165 | we were trying to prove in the first place.
166 |
167 | The `assert` tactic is handy in many sorts of situations. For example, suppose
168 | we want to prove that `(n + m) + (p + q) = (m + n) + (p + q)`. The only
169 | difference between the two sides of the `=` is that the arguments `m` and `n` to
170 | the first inner `+` are swapped, so it seems we should be able to use the
171 | commutativity of addition (`plus_comm`) to rewrite one into the other. However,
172 | the `rewrite` tactic is a little stupid about _where_ it applies the rewrite.
173 | There are three uses of `+` here, and it turns out that doing `rewrite ->
174 | plus_comm` will affect only the _outer_ one...
175 |
176 | ```idris
177 | plus_rearrange_firsttry : (n, m, p, q : Nat) ->
178 | (n + m) + (p + q) = (m + n) + (p + q)
179 | plus_rearrange_firsttry n m p q = rewrite plus_comm in Refl
180 | ```
181 | ```
182 | When checking right hand side of plus_rearrange_firsttry with expected type
183 | n + m + (p + q) = m + n + (p + q)
184 |
185 | _ does not have an equality type ((n1 : Nat) ->
186 | (n1 : Nat) -> plus n1 m1 = plus m1 n1)
187 | ```
188 |
189 | To get \idr{plus_comm} to apply at the point where we want it to, we can
190 | introduce a local lemma using the \idr{where} keyword stating that \idr{n + m =
191 | m + n} (for the particular \idr{m} and \idr{n} that we are talking about here),
192 | prove this lemma using \idr{plus_comm}, and then use it to do the desired
193 | rewrite.
194 |
195 | > plus_rearrange : (n, m, p, q : Nat) ->
196 | > (n + m) + (p + q) = (m + n) + (p + q)
197 | > plus_rearrange n m p q = rewrite plus_rearrange_lemma n m in Refl
198 | > where
199 | > plus_rearrange_lemma : (n, m : Nat) -> n + m = m + n
200 | > plus_rearrange_lemma = plus_comm
201 |
202 | == More Exercises
203 |
204 |
205 | ==== Exercise: 3 stars, recommended (mult_comm)
206 |
207 | Use \idr{rewrite} to help prove this theorem. You shouldn't need to use
208 | induction on \idr{plus_swap}.
209 |
210 | > plus_swap : (n, m, p : Nat) -> n + (m + p) = m + (n + p)
211 | > plus_swap n m p = ?plus_swap_rhs
212 |
213 | Now prove commutativity of multiplication. (You will probably need to define and
214 | prove a separate subsidiary theorem to be used in the proof of this one. You may
215 | find that \idr{plus_swap} comes in handy.)
216 |
217 | > mult_comm : (m, n : Nat) -> m * n = n * m
218 | > mult_comm m n = ?mult_comm_rhs
219 |
220 | $\square$
221 |
222 |
223 | ==== Exercise: 3 stars, optional (more_exercises)
224 |
225 | \ \todo[inline]{Edit}
226 |
227 | Take a piece of paper. For each of the following theorems, first _think_ about
228 | whether (a) it can be proved using only simplification and rewriting, (b) it
229 | also requires case analysis (`destruct`), or (c) it also requires induction.
230 | Write down your prediction. Then fill in the proof. (There is no need to turn in
231 | your piece of paper; this is just to encourage you to reflect before you hack!)
232 |
233 | > lte_refl : (n : Nat) -> True = lte n n
234 | > lte_refl n = ?lte_refl_rhs
235 |
236 | > zero_nbeq_S : (n : Nat) -> 0 == (S n) = False
237 | > zero_nbeq_S n = ?zero_nbeq_S_rhs
238 |
239 | > andb_false_r : (b : Bool) -> b && False = False
240 | > andb_false_r b = ?andb_false_r_rhs
241 |
242 | > plus_ble_compat_l : (n, m, p : Nat) ->
243 | > lte n m = True -> lte (p + n) (p + m) = True
244 | > plus_ble_compat_l n m p prf = ?plus_ble_compat_l_rhs
245 |
246 | > S_nbeq_0 : (n : Nat) -> (S n) == 0 = False
247 | > S_nbeq_0 n = ?S_nbeq_0_rhs
248 |
249 | > mult_1_l : (n : Nat) -> 1 * n = n
250 | > mult_1_l n = ?mult_1_l_rhs
251 |
252 | > all3_spec : (b, c : Bool) ->
253 | > (b && c) || ((not b) || (not c)) = True
254 | > all3_spec b c = ?all3_spec_rhs
255 |
256 | > mult_plus_distr_r : (n, m, p : Nat) -> (n + m) * p = (n * p) + (m * p)
257 | > mult_plus_distr_r n m p = ?mult_plus_distr_r_rhs
258 |
259 | > mult_assoc : (n, m, p : Nat) -> n * (m * p) = (n * m) * p
260 | > mult_assoc n m p = ?mult_assoc_rhs
261 |
262 | $\square$
263 |
264 |
265 | ==== Exercise: 2 stars, optional (beq_nat_refl)
266 |
267 | \ \todo[inline]{Edit}
268 |
269 | Prove the following theorem. (Putting the \idr{True} on the left-hand side of
270 | the equality may look odd, but this is how the theorem is stated in the Coq
271 | standard library, so we follow suit. Rewriting works equally well in either
272 | direction, so we will have no problem using the theorem no matter which way we
273 | state it.)
274 |
275 | > beq_nat_refl : (n : Nat) -> True = n == n
276 | > beq_nat_refl n = ?beq_nat_refl_rhs
277 |
278 | $\square$
279 |
280 |
281 | ==== Exercise: 2 stars, optional (plus_swap')
282 |
283 | \ \todo[inline]{Edit}
284 |
285 | The `replace` tactic allows you to specify a particular subterm to rewrite and
286 | what you want it rewritten to: `replace (t) with (u)` replaces (all copies of)
287 | expression `t` in the goal by expression `u`, and generates `t = u` as an
288 | additional subgoal. This is often useful when a plain \idr{rewrite} acts on the
289 | wrong part of the goal.
290 |
291 | Use the `replace` tactic to do a proof of \idr{plus_swap'}, just like
292 | \idr{plus_swap} but without needing `assert (n + m = m + n)`.
293 |
294 | > plus_swap' : (n, m, p : Nat) -> n + (m + p) = m + (n + p)
295 | > plus_swap' n m p = ?plus_swap__rhs
296 |
297 | $\square$
298 |
299 |
300 | ==== Exercise: 3 stars, recommended (binary_commute)
301 |
302 | Recall the \idr{incr} and \idr{bin_to_nat} functions that you wrote for the
303 | \idr{binary} exercise in the `Basics` chapter. Prove that the following diagram
304 | commutes:
305 |
306 | bin --------- incr -------> bin
307 | | |
308 | bin_to_nat bin_to_nat
309 | | |
310 | v v
311 | nat ---------- S ---------> nat
312 |
313 | That is, incrementing a binary number and then converting it to a (unary)
314 | natural number yields the same result as first converting it to a natural number
315 | and then incrementing. Name your theorem \idr{bin_to_nat_pres_incr} ("pres" for
316 | "preserves").
317 |
318 | Before you start working on this exercise, please copy the definitions from your
319 | solution to the \idr{binary} exercise here so that this file can be graded on
320 | its own. If you find yourself wanting to change your original definitions to
321 | make the property easier to prove, feel free to do so!
322 |
323 | $\square$
324 |
325 |
326 | ==== Exercise: 5 stars, advanced (binary_inverse)
327 |
328 | This exercise is a continuation of the previous exercise about binary numbers.
329 | You will need your definitions and theorems from there to complete this one.
330 |
331 | (a) First, write a function to convert natural numbers to binary numbers. Then
332 | prove that starting with any natural number, converting to binary, then
333 | converting back yields the same natural number you started with.
334 |
335 | (b) You might naturally think that we should also prove the opposite direction:
336 | that starting with a binary number, converting to a natural, and then back
337 | to binary yields the same number we started with. However, this is not true!
338 | Explain what the problem is.
339 |
340 | (c) Define a "direct" normalization function -- i.e., a function \idr{normalize}
341 | from binary numbers to binary numbers such that, for any binary number b,
342 | converting to a natural and then back to binary yields \idr{(normalize b)}.
343 | Prove it. (Warning: This part is tricky!)
344 |
345 | Again, feel free to change your earlier definitions if this helps here.
346 |
347 | $\square$
348 |
--------------------------------------------------------------------------------
/src/Lists.lidr:
--------------------------------------------------------------------------------
1 | = Lists : Working with Structured Data
2 |
3 | > module Lists
4 |
5 | > import Basics
6 |
7 | > %hide Prelude.Basics.fst
8 | > %hide Prelude.Basics.snd
9 | > %hide Prelude.Nat.pred
10 | > %hide Prelude.List.(++)
11 |
12 | > %access public export
13 | > %default total
14 |
15 |
16 | == Pairs of Numbers
17 |
18 | In an inductive type definition, each constructor can take any number of
19 | arguments -- none (as with \idr{True} and \idr{Z}), one (as with \idr{S}), or
20 | more than one, as here:
21 |
22 | > data NatProd : Type where
23 | > Pair : Nat -> Nat -> NatProd
24 |
25 | This declaration can be read: "There is just one way to construct a pair of
26 | numbers: by applying the constructor \idr{Pair} to two arguments of type
27 | \idr{Nat}."
28 |
29 | ```idris
30 | λΠ> :t Pair 3 5
31 | ```
32 |
33 | Here are two simple functions for extracting the first and second components of
34 | a pair. The definitions also illustrate how to do pattern matching on
35 | two-argument constructors.
36 |
37 | > fst : (p : NatProd) -> Nat
38 | > fst (Pair x y) = x
39 |
40 | > snd : (p : NatProd) -> Nat
41 | > snd (Pair x y) = y
42 |
43 | ```idris
44 | λΠ> fst (Pair 3 5)
45 | 3 : Nat
46 | ```
47 |
48 | Since pairs are used quite a bit, it is nice to be able to write them with the
49 | standard mathematical notation \idr{(x,y)} instead of \idr{Pair x y}. We can
50 | tell Idris to allow this with a \idr{syntax} declaration.
51 |
52 | > syntax "(" [x] "," [y] ")" = Pair x y
53 |
54 | The new pair notation can be used both in expressions and in pattern matches
55 | (indeed, we've actually seen this already in the previous chapter, in the
56 | definition of the \idr{minus} function -- this works because the pair notation
57 | is also provided as part of the standard library):
58 |
59 | ```idris
60 | λΠ> fst (3,5)
61 | 3 : Nat
62 | ```
63 |
64 | > fst' : (p : NatProd) -> Nat
65 | > fst' (x,y) = x
66 |
67 | > snd' : (p : NatProd) -> Nat
68 | > snd' (x,y) = y
69 |
70 | > swap_pair : (p : NatProd) -> NatProd
71 | > swap_pair (x,y) = (y,x)
72 |
73 | Let's try to prove a few simple facts about pairs.
74 |
75 | If we state things in a particular (and slightly peculiar) way, we can complete
76 | proofs with just reflexivity (and its built-in simplification):
77 |
78 | > surjective_pairing' : (n,m : Nat) -> (n,m) = (fst (n,m), snd (n,m))
79 | > surjective_pairing' n m = Refl
80 |
81 | But \idr{Refl} is not enough if we state the lemma in a more natural way:
82 |
83 | ```idris
84 | surjective_pairing_stuck : (p : NatProd) -> p = (fst p, snd p)
85 | surjective_pairing_stuck p = Refl
86 | ```
87 | ```
88 | When checking right hand side of
89 | surjective_pairing_stuck with expected type p = Pair (fst p) (snd p)
90 | ...
91 | Type mismatch between p and Pair (fst p) (snd p)
92 | ```
93 |
94 | We have to expose the structure of \idr{p} so that Idris can perform the pattern
95 | match in \idr{fst} and \idr{snd}. We can do this with \idr{case}.
96 |
97 | > surjective_pairing : (p : NatProd) -> p = (fst p, snd p)
98 | > surjective_pairing p = case p of (n,m) => Refl
99 |
100 | Notice that \idr{case} matches just one pattern here. That's because
101 | \idr{NatProd}s can only be constructed in one way.
102 |
103 |
104 | === Exercise: 1 star (snd_fst_is_swap)
105 |
106 | > snd_fst_is_swap : (p : NatProd) -> (snd p, fst p) = swap_pair p
107 | > snd_fst_is_swap p = ?snd_fst_is_swap_rhs
108 |
109 | $\square$
110 |
111 |
112 | === Exercise: 1 star, optional (fst_swap_is_snd)
113 |
114 | > fst_swap_is_snd : (p : NatProd) -> fst (swap_pair p) = snd p
115 | > fst_swap_is_snd p = ?fst_swap_is_snd_rhs
116 |
117 | $\square$
118 |
119 |
120 | == Lists of Numbers
121 |
122 | Generalizing the definition of pairs, we can describe the type of _lists_ of
123 | numbers like this: "A list is either the empty list or else a pair of a number
124 | and another list."
125 |
126 | > data NatList : Type where
127 | > Nil : NatList
128 | > (::) : Nat -> NatList -> NatList
129 |
130 | For example, here is a three-element list:
131 |
132 | > mylist : NatList
133 | > mylist = (::) 1 ((::) 2 ((::) 3 Nil))
134 |
135 | \todo[inline]{Edit the section - Idris's list sugar automatically works for
136 | anything with constructors \idr{Nil} and \idr{(::)}}
137 |
138 | As with pairs, it is more convenient to write lists in familiar programming
139 | notation. The following declarations allow us to use \idr{::} as an infix Cons
140 | operator and square brackets as an "outfix" notation for constructing lists.
141 |
142 | It is not necessary to understand the details of these declarations, but in case
143 | you are interested, here is roughly what's going on. The right associativity
144 | annotation tells Coq how to parenthesize expressions involving several uses of
145 | :: so that, for example, the next three declarations mean exactly the same
146 | thing:
147 |
148 | > mylist1 : NatList
149 | > mylist1 = 1 :: (2 :: (3 :: Nil))
150 |
151 | > mylist2 : NatList
152 | > mylist2 = 1::2::3::[]
153 |
154 | > mylist3 : NatList
155 | > mylist3 = [1,2,3]
156 |
157 | The at level 60 part tells Coq how to parenthesize expressions that involve both
158 | :: and some other infix operator. For example, since we defined + as infix
159 | notation for the plus function at level 50,
160 |
161 | ```coq
162 | Notation "x + y" := ( plus x y )
163 | ( at level 50, left associativity ).
164 | ```
165 |
166 | the `+` operator will bind tighter than `::` , so `1 + 2 :: [3]` will be parsed,
167 | as we'd expect, as `(1 + 2) :: [3]` rather than `1 + (2 :: [3])`.
168 |
169 | (Expressions like "`1 + 2 :: [3]`" can be a little confusing when you read them
170 | in a `.v` file. The inner brackets, around `3`, indicate a list, but the outer
171 | brackets, which are invisible in the HTML rendering, are there to instruct the
172 | "coqdoc" tool that the bracketed part should be displayed as Coq code rather
173 | than running text.)
174 |
175 | The second and third `Notation` declarations above introduce the standard
176 | square-bracket notation for lists; the right-hand side of the third one
177 | illustrates Coq's syntax for declaring n-ary notations and translating them to
178 | nested sequences of binary constructors.
179 |
180 |
181 | === Repeat
182 |
183 | A number of functions are useful for manipulating lists. For example, the
184 | \idr{repeat} function takes a number \idr{n} and a \idr{count} and returns a
185 | list of length \idr{count} where every element is \idr{n}.
186 |
187 | > repeat : (n, count : Nat) -> NatList
188 | > repeat n Z = []
189 | > repeat n (S k) = n :: repeat n k
190 |
191 |
192 | === Length
193 |
194 | The \idr{length} function calculates the length of a list.
195 |
196 | > length : (l : NatList) -> Nat
197 | > length [] = Z
198 | > length (h :: t) = S (length t)
199 |
200 |
201 | === Append
202 |
203 | The \idr{app} function concatenates (appends) two lists.
204 |
205 | > app : (l1, l2 : NatList) -> NatList
206 | > app [] l2 = l2
207 | > app (h :: t) l2 = h :: app t l2
208 |
209 | Actually, \idr{app} will be used a lot in some parts of what follows, so it is
210 | convenient to have an infix operator for it.
211 |
212 | > infixr 7 ++
213 |
214 | > (++) : (x, y : NatList) -> NatList
215 | > (++) = app
216 |
217 | > test_app1 : [1,2,3] ++ [4,5,6] = [1,2,3,4,5,6]
218 | > test_app1 = Refl
219 |
220 | > test_app2 : [] ++ [4,5] = [4,5]
221 | > test_app2 = Refl
222 |
223 | > test_app3 : [1,2,3] ++ [] = [1,2,3]
224 | > test_app3 = Refl
225 |
226 |
227 | === Head (with default) and Tail
228 |
229 | Here are two smaller examples of programming with lists. The \idr{hd} function
230 | returns the first element (the "head") of the list, while \idr{tl} returns
231 | everything but the first element (the "tail"). Of course, the empty list has no
232 | first element, so we must pass a default value to be returned in that case.
233 |
234 | > hd : (default : Nat) -> (l : NatList) -> Nat
235 | > hd default [] = default
236 | > hd default (h :: t) = h
237 |
238 | > tl : (l : NatList) -> NatList
239 | > tl [] = []
240 | > tl (h :: t) = t
241 |
242 | > test_hd1 : hd 0 [1,2,3] = 1
243 | > test_hd1 = Refl
244 |
245 | > test_hd2 : hd 0 [] = 0
246 | > test_hd2 = Refl
247 |
248 | > test_tl : tl [1,2,3] = [2,3]
249 | > test_tl = Refl
250 |
251 |
252 | === Exercises
253 |
254 |
255 | ==== Exercise: 2 stars, recommended (list_funs)
256 |
257 | Complete the definitions of \idr{nonzeros}, \idr{oddmembers} and
258 | \idr{countoddmembers} below. Have a look at the tests to understand what these
259 | functions should do.
260 |
261 | > nonzeros : (l : NatList) -> NatList
262 | > nonzeros l = ?nonzeros_rhs
263 |
264 | > test_nonzeros : nonzeros [0,1,0,2,3,0,0] = [1,2,3]
265 | > test_nonzeros = ?test_nonzeros_rhs
266 |
267 | > oddmembers : (l : NatList) -> NatList
268 | > oddmembers l = ?oddmembers_rhs
269 |
270 | > test_oddmembers : oddmembers [0,1,0,2,3,0,0] = [1,3]
271 | > test_oddmembers = ?test_oddmembers_rhs
272 |
273 | > countoddmembers : (l : NatList) -> Nat
274 | > countoddmembers l = ?countoddmembers_rhs
275 |
276 | > test_countoddmembers1 : countoddmembers [1,0,3,1,4,5] = 4
277 | > test_countoddmembers1 = ?test_countoddmembers1_rhs
278 |
279 | $\square$
280 |
281 |
282 | ==== Exercise: 3 stars, advanced (alternate)
283 |
284 | Complete the definition of \idr{alternate}, which "zips up" two lists into one,
285 | alternating between elements taken from the first list and elements from the
286 | second. See the tests below for more specific examples.
287 |
288 | Note: one natural and elegant way of writing \idr{alternate} will fail to
289 | satisfy Idris's requirement that all function definitions be "obviously
290 | terminating." If you find yourself in this rut, look for a slightly more verbose
291 | solution that considers elements of both lists at the same time. (One possible
292 | solution requires defining a new kind of pairs, but this is not the only way.)
293 |
294 | > alternate : (l1, l2 : NatList) -> NatList
295 | > alternate l1 l2 = ?alternate_rhs
296 |
297 | > test_alternate1 : alternate [1,2,3] [4,5,6] =
298 | > [1,4,2,5,3,6]
299 | > test_alternate1 = ?test_alternate1_rhs
300 |
301 | > test_alternate2 : alternate [1] [4,5,6] = [1,4,5,6]
302 | > test_alternate2 = ?test_alternate2_rhs
303 |
304 | > test_alternate3 : alternate [1,2,3] [4] = [1,4,2,3]
305 | > test_alternate3 = ?test_alternate3_rhs
306 |
307 | > test_alternate4 : alternate [] [20,30] = [20,30]
308 | > test_alternate4 = ?test_alternate4_rhs
309 |
310 | $\square$
311 |
312 |
313 | === Bags via Lists
314 |
315 | A \idr{Bag} (or \idr{Multiset}) is like a set, except that each element can
316 | appear multiple times rather than just once. One possible implementation is to
317 | represent a bag of numbers as a list.
318 |
319 | > Bag : Type
320 | > Bag = NatList
321 |
322 |
323 | ==== Exercise: 3 stars, recommended (bag_functions)
324 |
325 | Complete the following definitions for the functions \idr{count}, \idr{sum},
326 | \idr{add}, and \idr{member} for bags.
327 |
328 | > count : (v : Nat) -> (s : Bag) -> Nat
329 | > count v s = ?count_rhs
330 |
331 | All these proofs can be done just by \idr{Refl}.
332 |
333 | > test_count1 : count 1 [1,2,3,1,4,1] = 3
334 | > test_count1 = ?test_count1_rhs
335 |
336 | > test_count2 : count 6 [1,2,3,1,4,1] = 0
337 | > test_count2 = ?test_count2_rhs
338 |
339 | Multiset \idr{sum} is similar to set \idr{union}: \idr{sum a b} contains all the
340 | elements of \idr{a} and of \idr{b}. (Mathematicians usually define union on
341 | multisets a little bit differently, which is why we don't use that name for this
342 | operation.)
343 |
344 | \todo[inline]{How to forbid recursion here? Edit}
345 |
346 | For \idr{sum} we're giving you a header that does not give explicit names to the
347 | arguments. Moreover, it uses the keyword Definition instead of Fixpoint, so
348 | even if you had names for the arguments, you wouldn't be able to process them
349 | recursively. The point of stating the question this way is to encourage you to
350 | think about whether sum can be implemented in another way -- perhaps by using
351 | functions that have already been defined.
352 |
353 | > sum : Bag -> Bag -> Bag
354 | > sum x y = ?sum_rhs
355 |
356 | > test_sum1 : count 1 (sum [1,2,3] [1,4,1]) = 3
357 | > test_sum1 = ?test_sum1_rhs
358 |
359 | > add : (v : Nat) -> (s : Bag) -> Bag
360 | > add v s = ?add_rhs
361 |
362 | > test_add1 : count 1 (add 1 [1,4,1]) = 3
363 | > test_add1 = ?test_add1_rhs
364 |
365 | > test_add2 : count 5 (add 1 [1,4,1]) = 0
366 | > test_add2 = ?test_add2_rhs
367 |
368 | > member : (v : Nat) -> (s : Bag) -> Bool
369 | > member v s = ?member_rhs
370 |
371 | > test_member1 : member 1 [1,4,1] = True
372 | > test_member1 = ?test_member1_rhs
373 |
374 | > test_member2 : member 2 [1,4,1] = False
375 | > test_member2 = ?test_member2_rhs
376 |
377 | $\square$
378 |
379 |
380 | ==== Exercise: 3 stars, optional (bag_more_functions)
381 |
382 | Here are some more bag functions for you to practice with.
383 |
384 | When \idr{remove_one} is applied to a bag without the number to remove, it
385 | should return the same bag unchanged.
386 |
387 | > remove_one : (v : Nat) -> (s : Bag) -> Bag
388 | > remove_one v s = ?remove_one_rhs
389 |
390 | > test_remove_one1 : count 5 (remove_one 5 [2,1,5,4,1]) = 0
391 | > test_remove_one1 = ?test_remove_one1_rhs
392 |
393 | > test_remove_one2 : count 5 (remove_one 5 [2,1,4,1]) = 0
394 | > test_remove_one2 = ?test_remove_one2_rhs
395 |
396 | > test_remove_one3 : count 4 (remove_one 5 [2,1,5,4,1,4]) = 2
397 | > test_remove_one3 = ?test_remove_one3_rhs
398 |
399 | > test_remove_one4 : count 5 (remove_one 5 [2,1,5,4,5,1,4]) = 1
400 | > test_remove_one4 = ?test_remove_one4_rhs
401 |
402 | > remove_all : (v : Nat) -> (s : Bag) -> Bag
403 | > remove_all v s = ?remove_all_rhs
404 |
405 | > test_remove_all1 : count 5 (remove_all 5 [2,1,5,4,1]) = 0
406 | > test_remove_all1 = ?test_remove_all1_rhs
407 |
408 | > test_remove_all2 : count 5 (remove_all 5 [2,1,4,1]) = 0
409 | > test_remove_all2 = ?test_remove_all2_rhs
410 |
411 | > test_remove_all3 : count 4 (remove_all 5 [2,1,5,4,1,4]) = 2
412 | > test_remove_all3 = ?test_remove_all3_rhs
413 |
414 | > test_remove_all4 : count 5
415 | > (remove_all 5 [2,1,5,4,5,1,4,5,1,4]) = 0
416 | > test_remove_all4 = ?test_remove_all4_rhs
417 |
418 | > subset : (s1 : Bag) -> (s2 : Bag) -> Bool
419 | > subset s1 s2 = ?subset_rhs
420 |
421 | > test_subset1 : subset [1,2] [2,1,4,1] = True
422 | > test_subset1 = ?test_subset1_rhs
423 |
424 | > test_subset2 : subset [1,2,2] [2,1,4,1] = False
425 | > test_subset2 = ?test_subset2_rhs
426 |
427 | $\square$
428 |
429 |
430 | ==== Exercise: 3 stars, recommended (bag_theorem)
431 |
432 | Write down an interesting theorem \idr{bag_theorem} about bags involving the
433 | functions \idr{count} and \idr{add}, and prove it. Note that, since this problem
434 | is somewhat open-ended, it's possible that you may come up with a theorem which
435 | is true, but whose proof requires techniques you haven't learned yet. Feel free
436 | to ask for help if you get stuck!
437 |
438 | > bag_theorem : ?bag_theorem
439 |
440 | $\square$
441 |
442 |
443 | == Reasoning About Lists
444 |
445 | As with numbers, simple facts about list-processing functions can sometimes be
446 | proved entirely by simplification. For example, the simplification performed by
447 | \idr{Refl} is enough for this theorem...
448 |
449 | > nil_app : (l : NatList) -> ([] ++ l) = l
450 | > nil_app l = Refl
451 |
452 | ... because the \idr{[]} is substituted into the "scrutinee" (the value being
453 | "scrutinized" by the match) in the definition of \idr{app}, allowing the match
454 | itself to be simplified.
455 |
456 | Also, as with numbers, it is sometimes helpful to perform case analysis on the
457 | possible shapes (empty or non-empty) of an unknown list.
458 |
459 | > tl_length_pred : (l : NatList) -> pred (length l) = length (tl l)
460 | > tl_length_pred [] = Refl
461 | > tl_length_pred (n::l') = Refl
462 |
463 | Here, the \idr{Nil} case works because we've chosen to define \idr{tl Nil =
464 | Nil}. Notice that the case for \idr{Cons} introduces two names, \idr{n} and
465 | \idr{l'}, corresponding to the fact that the \idr{Cons} constructor for lists
466 | takes two arguments (the head and tail of the list it is constructing).
467 |
468 | Usually, though, interesting theorems about lists require induction for their
469 | proofs.
470 |
471 |
472 | ==== Micro-Sermon
473 |
474 | Simply reading example proof scripts will not get you very far! It is important
475 | to work through the details of each one, using Idris and thinking about what
476 | each step achieves. Otherwise it is more or less guaranteed that the exercises
477 | will make no sense when you get to them. 'Nuff said.
478 |
479 |
480 | === Induction on Lists
481 |
482 | Proofs by induction over datatypes like \idr{NatList} are a little less familiar
483 | than standard natural number induction, but the idea is equally simple. Each
484 | \idr{data} declaration defines a set of data values that can be built up using
485 | the declared constructors: a boolean can be either \idr{True} or \idr{False}; a
486 | number can be either \idr{Z} or \idr{S} applied to another number; a list can be
487 | either \idr{Nil} or \idr{Cons} applied to a number and a list.
488 |
489 | Moreover, applications of the declared constructors to one another are the
490 | _only_ possible shapes that elements of an inductively defined set can have, and
491 | this fact directly gives rise to a way of reasoning about inductively defined
492 | sets: a number is either \idr{Z} or else it is \idr{S} applied to some _smaller_
493 | number; a list is either \idr{Nil} or else it is \idr{Cons} applied to some
494 | number and some _smaller_ list; etc. So, if we have in mind some proposition
495 | \idr{p} that mentions a list \idr{l} and we want to argue that \idr{p} holds for
496 | _all_ lists, we can reason as follows:
497 |
498 | - First, show that \idr{p} is true of \idr{l} when \idr{l} is \idr{Nil}.
499 |
500 | - Then show that \idr{P} is true of \idr{l} when \idr{l} is \idr{Cons n l'}
501 | for some number \idr{n} and some smaller list \idr{l'}, assuming that
502 | \idr{p} is true for \idr{l'}.
503 |
504 | Since larger lists can only be built up from smaller ones, eventually reaching
505 | \idr{Nil}, these two arguments together establish the truth of \idr{p} for all
506 | lists \idr{l}. Here's a concrete example:
507 |
508 | > app_assoc : (l1, l2, l3 : NatList) -> ((l1 ++ l2) ++ l3) = (l1 ++ (l2 ++ l3))
509 | > app_assoc [] l2 l3 = Refl
510 | > app_assoc (n::l1') l2 l3 =
511 | > let inductiveHypothesis = app_assoc l1' l2 l3 in
512 | > rewrite inductiveHypothesis in Refl
513 |
514 | \todo[inline]{Edit}
515 |
516 | Notice that, as when doing induction on natural numbers, the as ... clause
517 | provided to the induction tactic gives a name to the induction hypothesis
518 | corresponding to the smaller list l1' in the cons case. Once again, this Coq
519 | proof is not especially illuminating as a static written document -- it is easy
520 | to see what's going on if you are reading the proof in an interactive Coq
521 | session and you can see the current goal and context at each point, but this
522 | state is not visible in the written-down parts of the Coq proof. So a
523 | natural-language proof -- one written for human readers -- will need to include
524 | more explicit signposts; in particular, it will help the reader stay oriented if
525 | we remind them exactly what the induction hypothesis is in the second case.
526 |
527 | For comparison, here is an informal proof of the same theorem.
528 |
529 | _Theorem_: For all lists \idr{l1}, \idr{l2}, and \idr{l3},
530 |
531 | \idr{(l1 ++ l2) ++ l3 = l1 ++ (l2 ++l3)}.
532 |
533 | _Proof_: By induction on \idr{l1}.
534 |
535 | - First, suppose \idr{l1 = []}. We must show
536 |
537 | \idr{([] ++ l2) ++ l3 = [] ++ (l2 ++ l3)},
538 |
539 | which follows directly from the definition of \idr{++}.
540 |
541 | - Next, suppose \idr{l1 = n :: l1'}, with
542 |
543 | \idr{(l1' ++ l2) ++ l3 = l1' ++ (l2 ++ l3)}
544 |
545 | (the induction hypothesis). We must show
546 |
547 | \idr{((n :: l1') ++ l2) ++ l3 = (n :: l1') ++ (l2 ++ l3)}.
548 |
549 | By the definition of \idr{++}, this follows from
550 |
551 | \idr{n :: ((l1' ++ l2) ++ l 3) = n :: (l1' ++ (l2 ++ l3))},
552 |
553 | which is immediate from the induction hypothesis. $\square$
554 |
555 |
556 | ==== Reversing a List
557 |
558 | For a slightly more involved example of inductive proof over lists, suppose we
559 | use \idr{app} to define a list-reversing function \idr{rev}:
560 |
561 | > rev : (l : NatList) -> NatList
562 | > rev Nil = Nil
563 | > rev (h :: t) = (rev t) ++ [h]
564 |
565 | > test_rev1 : rev [1,2,3] = [3,2,1]
566 | > test_rev1 = Refl
567 |
568 | > test_rev2 : rev Nil = Nil
569 | > test_rev2 = Refl
570 |
571 |
572 | ==== Properties of rev
573 |
574 | Now let's prove some theorems about our newly defined \idr{rev}. For something a
575 | bit more challenging than what we've seen, let's prove that reversing a list
576 | does not change its length. Our first attempt gets stuck in the successor
577 | case...
578 |
579 | ```idris
580 | rev_length_firsttry : (l : NatList) -> length (rev l) = length l
581 | rev_length_firsttry Nil = Refl
582 | rev_length_firsttry (n :: l') =
583 | -- Now we seem to be stuck: the goal is an equality involving `++`, but we don't
584 | -- have any useful equations in either the immediate context or in the global
585 | -- environment! We can make a little progress by using the IH to rewrite the
586 | -- goal...
587 | let inductiveHypothesis = rev_length_firsttry l' in
588 | rewrite inductiveHypothesis in
589 | -- ... but now we can't go any further.
590 | Refl
591 | ```
592 |
593 | So let's take the equation relating \idr{++} and \idr{length} that would have
594 | enabled us to make progress and prove it as a separate lemma.
595 |
596 | > app_length : (l1, l2 : NatList) ->
597 | > length (l1 ++ l2) = (length l1) + (length l2)
598 | > app_length Nil l2 = Refl
599 | > app_length (n :: l1') l2 =
600 | > let inductiveHypothesis = app_length l1' l2 in
601 | > rewrite inductiveHypothesis in
602 | > Refl
603 |
604 | Note that, to make the lemma as general as possible, we quantify over _all_
605 | \idr{NatList}s, not just those that result from an application of \idr{rev}.
606 | This should seem natural, because the truth of the goal clearly doesn't depend
607 | on the list having been reversed. Moreover, it is easier to prove the more
608 | general property.
609 |
610 | Now we can complete the original proof.
611 |
612 | > rev_length : (l : NatList) -> length (rev l) = length l
613 | > rev_length Nil = Refl
614 | > rev_length (n :: l') =
615 | > rewrite app_length (rev l') [n] in
616 | > -- Prelude's version of `Induction.plus_comm`
617 | > rewrite plusCommutative (length (rev l')) 1 in
618 | > let inductiveHypothesis = rev_length l' in
619 | > rewrite inductiveHypothesis in Refl
620 |
621 | For comparison, here are informal proofs of these two theorems:
622 |
623 | _Theorem_: For all lists \idr{l1} and \idr{l2},
624 |
625 | \idr{length (l1 ++ l2) = length l1 + length l2}.
626 |
627 | _Proof_: By induction on \idr{l1}.
628 |
629 | - First, suppose \idr{l1 = []}. We must show
630 |
631 | \idr{length ([] ++ l2) = length [] + length l2},
632 |
633 | which follows directly from the definitions of \idr{length} and \idr{++}.
634 |
635 | - Next, suppose \idr{l1 = n :: l1'}, with
636 |
637 | \idr{length (l1' ++ l2) = length l1' + length l2}.
638 |
639 | We must show
640 |
641 | \idr{length ((n :: l1') ++ l2) = length (n :: l1') + length l2)}.
642 |
643 | This follows directly from the definitions of \idr{length} and \idr{++}
644 | together with the induction hypothesis. $\square$
645 |
646 | _Theorem_: For all lists \idr{l}, \idr{length (rev l) = length l}.
647 |
648 | _Proof_: By induction on \idr{l}.
649 |
650 | - First, suppose \idr{l = []}. We must show
651 |
652 | \idr{length (rev []) = length []},
653 |
654 | which follows directly from the definitions of \idr{length} and \idr{rev}.
655 |
656 | - Next, suppose l = n :: l' , with
657 |
658 | \idr{length (rev l') = length l'}.
659 |
660 | We must show
661 |
662 | \idr{length (rev (n :: l')) = length (n :: l')}.
663 |
664 | By the definition of \idr{rev}, this follows from
665 |
666 | \idr{length ((rev l') ++ [n]) = S (length l')}
667 |
668 | which, by the previous lemma, is the same as
669 |
670 | \idr{length (rev l') + length [n] = S (length l')}.
671 |
672 | This follows directly from the induction hypothesis and the definition of
673 | \idr{length}. $\square$
674 |
675 | The style of these proofs is rather longwinded and pedantic. After the first
676 | few, we might find it easier to follow proofs that give fewer details (which can
677 | easily work out in our own minds or on scratch paper if necessary) and just
678 | highlight the non-obvious steps. In this more compressed style, the above proof
679 | might look like this:
680 |
681 | _Theorem_: For all lists \idr{l}, \idr{length (rev l) = length l}.
682 |
683 | _Proof_: First, observe that length \idr{(l ++ [n]) = S (length l)} for any
684 | \idr{l} (this follows by a straightforward induction on \idr{l}). The main
685 | property again follows by induction on \idr{l}, using the observation together
686 | with the induction hypothesis in the case where \idr{l = n' :: l'}. $\square$
687 |
688 | Which style is preferable in a given situation depends on the sophistication of
689 | the expected audience and how similar the proof at hand is to ones that the
690 | audience will already be familiar with. The more pedantic style is a good
691 | default for our present purposes.
692 |
693 |
694 | === Search
695 |
696 | \ \todo[inline]{Edit, mention \idr{:s} and \idr{:apropos}?}
697 |
698 | We've seen that proofs can make use of other theorems we've already proved,
699 | e.g., using \idr{rewrite}. But in order to refer to a theorem, we need to know
700 | its name! Indeed, it is often hard even to remember what theorems have been
701 | proven, much less what they are called.
702 |
703 | Coq's `Search` command is quite helpful with this. Typing `Search foo` will
704 | cause Coq to display a list of all theorems involving `foo`. For example, try
705 | uncommenting the following line to see a list of theorems that we have proved
706 | about `rev`:
707 |
708 | ```coq
709 | (* Search rev. *)
710 | ```
711 |
712 | Keep `Search` in mind as you do the following exercises and throughout the rest
713 | of the book; it can save you a lot of time!
714 |
715 | If you are using ProofGeneral, you can run `Search` with `C-c C-a C-a`. Pasting
716 | its response into your buffer can be accomplished with `C-c C-;`.
717 |
718 |
719 | === List Exercises, Part 1
720 |
721 |
722 | ==== Exercise: 3 stars (list_exercises)
723 |
724 | More practice with lists:
725 |
726 | > app_nil_r : (l : NatList) -> (l ++ []) = l
727 | > app_nil_r l = ?app_nil_r_rhs
728 |
729 | > rev_app_distr : (l1, l2 : NatList) -> rev (l1 ++ l2) = (rev l2) ++ (rev l1)
730 | > rev_app_distr l1 l2 = ?rev_app_distr_rhs
731 |
732 | > rev_involutive : (l : NatList) -> rev (rev l) = l
733 | > rev_involutive l = ?rev_involutive_rhs
734 |
735 | There is a short solution to the next one. If you find yourself getting tangled
736 | up, step back and try to look for a simpler way.
737 |
738 | > app_assoc4 : (l1, l2, l3, l4 : NatList) ->
739 | > (l1 ++ (l2 ++ (l3 ++ l4))) = ((l1 ++ l2) ++ l3) ++ l4
740 | > app_assoc4 l1 l2 l3 l4 = ?app_assoc4_rhs
741 |
742 | An exercise about your implementation of \idr{nonzeros}:
743 |
744 | > nonzeros_app : (l1, l2 : NatList) ->
745 | > nonzeros (l1 ++ l2) = (nonzeros l1) ++ (nonzeros l2)
746 | > nonzeros_app l1 l2 = ?nonzeros_app_rhs
747 |
748 | $\square$
749 |
750 |
751 | ==== Exercise: 2 stars (beq_NatList)
752 |
753 | Fill in the definition of \idr{beq_NatList}, which compares lists of numbers for
754 | equality. Prove that \idr{beq_NatList l l} yields \idr{True} for every list
755 | \idr{l}.
756 |
757 | > beq_NatList : (l1, l2 : NatList) -> Bool
758 | > beq_NatList l1 l2 = ?beq_NatList_rhs
759 |
760 | > test_beq_NatList1 : beq_NatList Nil Nil = True
761 | > test_beq_NatList1 = ?test_beq_NatList1_rhs
762 |
763 | > test_beq_NatList2 : beq_NatList [1,2,3] [1,2,3] = True
764 | > test_beq_NatList2 = ?test_beq_NatList2_rhs
765 |
766 | > test_beq_NatList3 : beq_NatList [1,2,3] [1,2,4] = False
767 | > test_beq_NatList3 = ?test_beq_NatList3_rhs
768 |
769 | > beq_NatList_refl : (l : NatList) -> True = beq_NatList l l
770 | > beq_NatList_refl l = ?beq_NatList_refl_rhs
771 |
772 | $\square$
773 |
774 |
775 | === List Exercises, Part 2
776 |
777 |
778 | ==== Exercise: 3 stars, advanced (bag_proofs)
779 |
780 | Here are a couple of little theorems to prove about your definitions about bags
781 | above.
782 |
783 | > count_member_nonzero : (s : Bag) -> lte 1 (count 1 (1 :: s)) = True
784 | > count_member_nonzero s = ?count_member_nonzero_rhs
785 |
786 | The following lemma about \idr{lte} might help you in the next proof.
787 |
788 | > ble_n_Sn : (n : Nat) -> lte n (S n) = True
789 | > ble_n_Sn Z = Refl
790 | > ble_n_Sn (S k) =
791 | > let inductiveHypothesis = ble_n_Sn k in
792 | > rewrite inductiveHypothesis in Refl
793 |
794 | > remove_decreases_count : (s : Bag) ->
795 | > lte (count 0 (remove_one 0 s)) (count 0 s) = True
796 | > remove_decreases_count s = ?remove_decreases_count_rhs
797 |
798 | $\square$
799 |
800 |
801 | ==== Exercise: 3 stars, optional (bag_count_sum)
802 |
803 | Write down an interesting theorem \idr{bag_count_sum} about bags involving the
804 | functions \idr{count} and \idr{sum}, and prove it. (You may find that the
805 | difficulty of the proof depends on how you defined \idr{count}!)
806 |
807 | > bag_count_sum : ?bag_count_sum
808 |
809 | $\square$
810 |
811 |
812 | ==== Exercise: 4 stars, advanced (rev_injective)
813 |
814 | Prove that the \idr{rev} function is injective -- that is,
815 |
816 | > rev_injective : (l1, l2 : NatList) -> rev l1 = rev l2 -> l1 = l2
817 | > rev_injective l1 l2 prf = ?rev_injective_rhs
818 |
819 | (There is a hard way and an easy way to do this.)
820 |
821 | $\square$
822 |
823 |
824 | == Options
825 |
826 | Suppose we want to write a function that returns the \idr{n}th element of some
827 | list. If we give it type \idr{Nat -> NatList -> Nat}, then we'll have to choose
828 | some number to return when the list is too short...
829 |
830 | > nth_bad : (l : NatList) -> (n : Nat) -> Nat
831 | > nth_bad Nil n = 42 -- arbitrary!
832 | > nth_bad (a :: l') n = case n == 0 of
833 | > True => a
834 | > False => nth_bad l' (pred n)
835 |
836 | This solution is not so good: If \idr{nth_bad} returns \idr{42}, we can't tell
837 | whether that value actually appears on the input without further processing. A
838 | better alternative is to change the return type of \idr{nth_bad} to include an
839 | error value as a possible outcome. We call this type \idr{NatOption}.
840 |
841 | > data NatOption : Type where
842 | > Some : Nat -> NatOption
843 | > None : NatOption
844 |
845 | We can then change the above definition of \idr{nth_bad} to return \idr{None}
846 | when the list is too short and \idr{Some a} when the list has enough members and
847 | \idr{a} appears at position \idr{n}. We call this new function \idr{nth_error}
848 | to indicate that it may result in an error.
849 |
850 | > nth_error : (l : NatList) -> (n : Nat) -> NatOption
851 | > nth_error Nil n = None
852 | > nth_error (a :: l') n = case n == 0 of
853 | > True => Some a
854 | > False => nth_error l' (pred n)
855 |
856 | > test_nth_error1 : nth_error [4,5,6,7] 0 = Some 4
857 | > test_nth_error1 = Refl
858 |
859 | > test_nth_error2 : nth_error [4,5,6,7] 3 = Some 7
860 | > test_nth_error2 = Refl
861 |
862 | > test_nth_error3 : nth_error [4,5,6,7] 9 = None
863 | > test_nth_error3 = Refl
864 |
865 | This example is also an opportunity to introduce one more small feature of Idris
866 | programming language: conditional expressions...
867 |
868 | > nth_error' : (l : NatList) -> (n : Nat) -> NatOption
869 | > nth_error' Nil n = None
870 | > nth_error' (a :: l') n = if n == 0
871 | > then Some a
872 | > else nth_error' l' (pred n)
873 |
874 | \todo[inline]{Edit or remove this paragraph, doesn't seem to hold in Idris}
875 |
876 | Coq's conditionals are exactly like those found in any other language, with one
877 | small generalization. Since the boolean type is not built in, Coq actually
878 | supports conditional expressions over any inductively defined type with exactly
879 | two constructors. The guard is considered true if it evaluates to the first
880 | constructor in the Inductive definition and false if it evaluates to the second.
881 |
882 | The function below pulls the \idr{Nat} out of a \idr{NatOption}, returning a
883 | supplied default in the \idr{None} case.
884 |
885 | > option_elim : (d : Nat) -> (o : NatOption) -> Nat
886 | > option_elim d (Some k) = k
887 | > option_elim d None = d
888 |
889 |
890 | ==== Exercise: 2 stars (hd_error)
891 |
892 | Using the same idea, fix the \idr{hd} function from earlier so we don't have to
893 | pass a default element for the \idr{Nil} case.
894 |
895 | > hd_error : (l : NatList) -> NatOption
896 | > hd_error l = ?hd_error_rhs
897 |
898 | > test_hd_error1 : hd_error [] = None
899 | > test_hd_error1 = ?test_hd_error1_rhs
900 |
901 | > test_hd_error2 : hd_error [1] = Some 1
902 | > test_hd_error2 = ?test_hd_error2_rhs
903 |
904 | > test_hd_error3 : hd_error [5,6] = Some 5
905 | > test_hd_error3 = ?test_hd_error3_rhs
906 |
907 | $\square$
908 |
909 |
910 | ==== Exercise: 1 star, optional (option_elim_hd)
911 |
912 | This exercise relates your new \idr{hd_error} to the old \idr{hd}.
913 |
914 | > option_elim_hd : (l : NatList) -> (default : Nat) ->
915 | > hd default l = option_elim default (hd_error l)
916 | > option_elim_hd l default = ?option_elim_hd_rhs
917 |
918 | $\square$
919 |
920 |
921 | == Partial Maps
922 |
923 | As a final illustration of how data structures can be defined in Idris, here is
924 | a simple _partial map_ data type, analogous to the map or dictionary data
925 | structures found in most programming languages.
926 |
927 | First, we define a new inductive datatype \idr{Id} to serve as the "keys" of our
928 | partial maps.
929 |
930 | > data Id : Type where
931 | > MkId : Nat -> Id
932 |
933 | Internally, an \idr{Id} is just a number. Introducing a separate type by
934 | wrapping each \idr{Nat} with the tag \idr{MkId} makes definitions more readable
935 | and gives us the flexibility to change representations later if we wish.
936 |
937 | We'll also need an equality test for \idr{Id}s:
938 |
939 | > beq_id : (x1, x2 : Id) -> Bool
940 | > beq_id (MkId n1) (MkId n2) = n1 == n2
941 |
942 |
943 | ==== Exercise: 1 star (beq_id_refl)
944 |
945 | > beq_id_refl : (x : Id) -> True = beq_id x x
946 | > beq_id_refl x = ?beq_id_refl_rhs
947 |
948 | $\square$
949 |
950 | Now we define the type of partial maps:
951 |
952 | > namespace PartialMap
953 |
954 | > data PartialMap : Type where
955 | > Empty : PartialMap
956 | > Record : Id -> Nat -> PartialMap -> PartialMap
957 |
958 | This declaration can be read: "There are two ways to construct a
959 | \idr{PartialMap}: either using the constructor \idr{Empty} to represent an empty
960 | partial map, or by applying the constructor \idr{Record} to a key, a value, and
961 | an existing \idr{PartialMap} to construct a \idr{PartialMap} with an additional
962 | key-to-value mapping."
963 |
964 | The \idr{update} function overrides the entry for a given key in a partial map
965 | (or adds a new entry if the given key is not already present).
966 |
967 | > update : (d : PartialMap) -> (x : Id) -> (value : Nat) -> PartialMap
968 | > update d x value = Record x value d
969 |
970 | Last, the \idr{find} function searches a \idr{PartialMap} for a given key. It
971 | returns \idr{None} if the key was not found and \idr{Some val} if the key was
972 | associated with \idr{val}. If the same key is mapped to multiple values,
973 | \idr{find} will return the first one it encounters.
974 |
975 | > find : (x : Id) -> (d : PartialMap) -> NatOption
976 | > find x Empty = None
977 | > find x (Record y v d') = if beq_id x y
978 | > then Some v
979 | > else find x d'
980 |
981 |
982 | ==== Exercise: 1 star (update_eq)
983 |
984 | > update_eq : (d : PartialMap) -> (x : Id) -> (v : Nat) ->
985 | > find x (update d x v) = Some v
986 | > update_eq d x v = ?update_eq_rhs
987 |
988 | $\square$
989 |
990 |
991 | ==== Exercise: 1 star (update_neq)
992 |
993 | > update_neq : (d : PartialMap) -> (x, y : Id) -> (o : Nat) ->
994 | > beq_id x y = False ->
995 | > find x (update d y o) = find x d
996 | > update_neq d x y o prf = ?update_neq_rhs
997 |
998 | $\square$
999 |
1000 |
1001 | ==== Exercise: 2 stars (baz_num_elts)
1002 |
1003 | Consider the following inductive definition:
1004 |
1005 | > data Baz : Type where
1006 | > Baz1 : Baz -> Baz
1007 | > Baz2 : Baz -> Bool -> Baz
1008 |
1009 | How _many_ elements does the type \idr{Baz} have? (Answer in English or the
1010 | natural language of your choice.)
1011 |
1012 | $\square$
1013 |
--------------------------------------------------------------------------------
/src/Makefile:
--------------------------------------------------------------------------------
1 | .DEFAULT_GOAL := all.pdf
2 |
3 |
4 | PANDOC ?= pandoc
5 |
6 |
7 | PANDOC_FLAGS := \
8 | --filter=pandoc-minted.hs \
9 | --pdf-engine=xelatex \
10 | --top-level-division=chapter \
11 | -f markdown+lhs+tex_math_single_backslash \
12 | -t latex+lhs
13 |
14 |
15 | LIDR_FILES := Preface.lidr \
16 | Basics.lidr \
17 | Induction.lidr \
18 | Lists.lidr \
19 | Poly.lidr \
20 | Logic.lidr \
21 | IndProp.lidr \
22 | Maps.lidr \
23 | ProofObjects.lidr \
24 | Rel.lidr \
25 | Imp.lidr \
26 | ImpParser.lidr \
27 | ImpCEvalFun.lidr
28 | # TODO: Add more chapters, in order, here.
29 |
30 |
31 | TEX_FILES := $(LIDR_FILES:.lidr=.tex)
32 |
33 |
34 | .PHONY: all clean clean-tex
35 |
36 |
37 | clean: clean-tex
38 | rm -rf all.* auto _minted-all $(TEX_FILES)
39 |
40 |
41 | clean-tex: all.tex
42 | latexmk -pdf -CA all.tex
43 |
44 |
45 | all.pdf: all.tex
46 | latexmk -gg -pdf $<
47 |
48 |
49 | all.tex: book.tex $(TEX_FILES)
50 | $(PANDOC) $(PANDOC_FLAGS) -N --toc -o $@ \
51 | $(foreach texfile,$(TEX_FILES),-A $(texfile)) -A footer.tex $<
52 |
53 |
54 | %.tex: %.lidr
55 | $(PANDOC) $(PANDOC_FLAGS) -o $@ $<
56 |
--------------------------------------------------------------------------------
/src/Maps.lidr:
--------------------------------------------------------------------------------
1 | = Maps: Total and Partial Maps
2 |
3 | > module Maps
4 | >
5 | > import Logic
6 | > import IndProp
7 | >
8 | > %access public export
9 | >
10 |
11 | Maps (or dictionaries) are ubiquitous data structures both generally and in the
12 | theory of programming languages in particular; we're going to need them in many
13 | places in the coming chapters. They also make a nice case study using ideas
14 | we've seen in previous chapters, including building data structures out of
15 | higher-order functions (from `Basics` and `Poly`) and the use of reflection to
16 | streamline proofs (from `IndProp`).
17 |
18 | We'll define two flavors of maps: _total_ maps, which include a "default"
19 | element to be returned when a key being looked up doesn't exist, and _partial_
20 | maps, which return a \idr{Maybe} to indicate success or failure. The latter is
21 | defined in terms of the former, using \idr{Nothing} as the default element.
22 |
23 |
24 | == The Idris Standard Library
25 |
26 | \todo[inline]{Edit}
27 |
28 | One small digression before we get to maps.
29 |
30 | Unlike the chapters we have seen so far, this one does not `Require Import` the
31 | chapter before it (and, transitively, all the earlier chapters). Instead, in
32 | this chapter and from now, on we're going to import the definitions and theorems
33 | we need directly from Idris's standard library stuff. You should not notice much
34 | difference, though, because we've been careful to name our own definitions and
35 | theorems the same as their counterparts in the standard library, wherever they
36 | overlap.
37 |
38 | ```coq
39 | Require Import Idris.Arith.Arith.
40 | Require Import Idris.Bool.Bool.
41 | Require Import Idris.Strings.String.
42 | Require Import Idris.Logic.FunctionalExtensionality.
43 | ```
44 |
45 | Documentation for the standard library can be found at
46 | \url{https://www.idris-lang.org/docs/current/}.
47 |
48 | The \idr{:search} command is a good way to look for theorems involving objects
49 | of specific types. Take a minute now to experiment with it.
50 |
51 |
52 | == Identifiers
53 |
54 | First, we need a type for the keys that we use to index into our maps. For this
55 | purpose, we again use the type \idr{Id} from the `Lists` chapter. To make this
56 | chapter self contained, we repeat its definition here, together with the
57 | equality comparison function for \idr{Id} and its fundamental property.
58 |
59 | > data Id : Type where
60 | > MkId : String -> Id
61 | >
62 | > beq_id : (x1, x2 : Id) -> Bool
63 | > beq_id (MkId n1) (MkId n2) = decAsBool $ decEq n1 n2
64 | >
65 |
66 | \todo[inline]{Edit}
67 |
68 | (The function \idr{decEq} comes from Idris's string library. If you check its
69 | result type, you'll see that it does not actually return a \idr{Bool}, but
70 | rather a type that looks like \idr{Either (x = y) (Not (x = y))}, called a
71 | {Dec}, which can be thought of as an "evidence-carrying boolean." Formally, an
72 | element of \idr{Dec (x=y)} is either a proof that two things are equal or a
73 | proof that they are unequal, together with a tag indicating which. But for
74 | present purposes you can think of it as just a fancy \idr{Bool}.)
75 |
76 | > beq_id_refl : (x : Id) -> True = beq_id x x
77 | > beq_id_refl (MkId n) with (decEq n n)
78 | > beq_id_refl _ | Yes _ = Refl
79 | > beq_id_refl _ | No contra = absurd $ contra Refl
80 | >
81 |
82 | The following useful property of \idr{beq_id} follows from an analogous lemma
83 | about strings:
84 |
85 | > beq_id_true_iff : (beq_id x y = True) <-> x = y
86 | > beq_id_true_iff = (bto, bfro)
87 | > where
88 | > bto : (beq_id x y = True) -> x = y
89 | > bto {x=MkId n1} {y=MkId n2} prf with (decEq n1 n2)
90 | > bto Refl | Yes eq = cong {f=MkId} eq
91 | > bto Refl | No _ impossible
92 | >
93 | > idInj : MkId x = MkId y -> x = y
94 | > idInj Refl = Refl
95 | >
96 | > bfro : (x = y) -> beq_id x y = True
97 | > bfro {x=MkId n1} {y=MkId n2} prf with (decEq n1 n2)
98 | > bfro _ | Yes _ = Refl
99 | > bfro prf | No contra = absurd $ contra $ idInj prf
100 | >
101 |
102 | Similarly:
103 |
104 | > beq_id_false_iff : (beq_id x y = False) <-> Not (x = y)
105 | > beq_id_false_iff = (to, fro)
106 | > where
107 | > to : (beq_id x y = False) -> Not (x = y)
108 | > to beqf = (snd not_true_iff_false) beqf . (snd beq_id_true_iff)
109 | >
110 | > fro : (Not (x = y)) -> beq_id x y = False
111 | > fro noteq = (fst not_true_iff_false) $ noteq . (fst beq_id_true_iff)
112 | >
113 |
114 |
115 | == Total Maps
116 |
117 | Our main job in this chapter will be to build a definition of partial maps that
118 | is similar in behavior to the one we saw in the `Lists` chapter, plus
119 | accompanying lemmas about its behavior.
120 |
121 | This time around, though, we're going to use _functions_, rather than lists of
122 | key-value pairs, to build maps. The advantage of this representation is that it
123 | offers a more _extensional_ view of maps, where two maps that respond to queries
124 | in the same way will be represented as literally the same thing (the very same
125 | function), rather than just "equivalent" data structures. This, in turn,
126 | simplifies proofs that use maps.
127 |
128 | We build partial maps in two steps. First, we define a type of _total maps_ that
129 | return a default value when we look up a key that is not present in the map.
130 |
131 | > TotalMap : Type -> Type
132 | > TotalMap a = Id -> a
133 | >
134 |
135 | Intuitively, a total map over an element type \idr{a} is just a function that
136 | can be used to look up \idr{Id}s, yielding \idr{a}s.
137 |
138 | The function \idr{t_empty} yields an empty total map, given a default element;
139 | this map always returns the default element when applied to any id.
140 |
141 | > t_empty : (v : a) -> TotalMap a
142 | > t_empty v = \_ => v
143 | >
144 |
145 | We can also write this as:
146 |
147 | ```idris
148 | t_empty = const
149 | ```
150 |
151 | More interesting is the \idr{update} function, which (as before) takes a map
152 | \idr{m}, a key \idr{x}, and a value \idr{v} and returns a new map that takes
153 | \idr{x} to \idr{v} and takes every other key to whatever \idr{m} does.
154 |
155 | > t_update : (x : Id) -> (v : a) -> (m : TotalMap a) -> TotalMap a
156 | > t_update x v m = \x' => if beq_id x x' then v else m x'
157 | >
158 |
159 | This definition is a nice example of higher-order programming: \idr{t_update}
160 | takes a _function_ \idr{m} and yields a new function \idr{\x' => ...} that
161 | behaves like the desired map.
162 |
163 | For example, we can build a map taking \idr{Id}s to \idr{Bool}s, where \idr{Id
164 | 3} is mapped to \idr{True} and every other key is mapped to \idr{False}, like
165 | this:
166 |
167 | \todo[inline]{Seems like a wrong description in the book here}
168 |
169 | > examplemap : TotalMap Bool
170 | > examplemap = t_update (MkId "foo") False $
171 | > t_update (MkId "bar") True $
172 | > t_empty False
173 | >
174 |
175 | This completes the definition of total maps. Note that we don't need to define a
176 | \idr{find} operation because it is just function application!
177 |
178 | > update_example1 : examplemap (MkId "baz") = False
179 | > update_example1 = Refl
180 | >
181 | > update_example2 : examplemap (MkId "foo") = False
182 | > update_example2 = Refl
183 | >
184 | > update_example3 : examplemap (MkId "quux") = False
185 | > update_example3 = Refl
186 | >
187 | > update_example4 : examplemap (MkId "bar") = True
188 | > update_example4 = Refl
189 | >
190 |
191 | To use maps in later chapters, we'll need several fundamental facts about how
192 | they behave. Even if you don't work the following exercises, make sure you
193 | thoroughly understand the statements of the lemmas! (Some of the proofs require
194 | the functional extensionality axiom, which is discussed in the `Logic` chapter.)
195 |
196 |
197 | ==== Exercise: 1 star, optional (t_apply_empty)
198 |
199 | First, the empty map returns its default element for all keys:
200 |
201 | > t_apply_empty : t_empty v x = v
202 | > t_apply_empty = ?t_apply_empty_rhs
203 | >
204 |
205 | $\square$
206 |
207 |
208 | ==== Exercise: 2 stars, optional (t_update_eq)
209 |
210 | Next, if we update a map \idr{m} at a key \idr{x} with a new value \idr{v} and
211 | then look up \idr{x} in the map resulting from the \idr{update}, we get back
212 | \idr{v}:
213 |
214 | > t_update_eq : (t_update x v m) x = v
215 | > t_update_eq = ?t_update_eq_rhs
216 | >
217 |
218 | $\square$
219 |
220 |
221 | ==== Exercise: 2 stars, optional (t_update_neq)
222 |
223 | On the other hand, if we update a map \idr{m} at a key \idr{x1} and then look up
224 | a _different_ key \idr{x2} in the resulting map, we get the same result that
225 | \idr{m} would have given:
226 |
227 | > t_update_neq : Not (x1 = x2) -> (t_update x1 v m) x2 = m x2
228 | > t_update_neq neq = ?t_update_neq_rhs
229 | >
230 |
231 | $\square$
232 |
233 |
234 | ==== Exercise: 2 stars, optional (t_update_shadow)
235 |
236 | If we update a map \idr{m} at a key \idr{x} with a value \idr{v1} and then
237 | update again with the same key \idr{x} and another value \idr{v2}, the resulting
238 | map behaves the same (gives the same result when applied to any key) as the
239 | simpler map obtained by performing just the second \idr{update} on \idr{m}:
240 |
241 | > t_update_shadow : t_update x v2 $ t_update x v1 m = t_update x v2 m
242 | > t_update_shadow = ?t_update_shadow_rhs
243 | >
244 |
245 | $\square$
246 |
247 | For the final two lemmas about total maps, it's convenient to use the reflection
248 | idioms introduced in chapter `IndProp`. We begin by proving a fundamental
249 | _reflection lemma_ relating the equality proposition on \idr{Id}s with the
250 | boolean function \idr{beq_id}.
251 |
252 |
253 | ==== Exercise: 2 stars, optional (beq_idP)
254 |
255 | Use the proof of \idr{beq_natP} in chapter `IndProp` as a template to prove the
256 | following:
257 |
258 | > beq_idP : {x, y : Id} -> Reflect (x = y) (beq_id x y)
259 | > beq_idP = ?beq_idP_rhs
260 | >
261 |
262 | $\square$
263 |
264 | Now, given \idr{Id}s \idr{x1} and \idr{x2}, we can use \idr{with (beq_idP x1
265 | x2)} to simultaneously perform case analysis on the result of \idr{beq_id x1 x2}
266 | and generate hypotheses about the equality (in the sense of \idr{=}) of \idr{x1}
267 | and \idr{x2}.
268 |
269 |
270 | ==== Exercise: 2 stars (t_update_same)
271 |
272 | With the example in chapter `IndProp` as a template, use \idr{beq_idP} to prove
273 | the following theorem, which states that if we update a map to assign key
274 | \idr{x} the same value as it already has in \idr{m}, then the result is equal to
275 | \idr{m}:
276 |
277 | > t_update_same : t_update x (m x) m = m
278 | > t_update_same = ?t_update_same_rhs
279 | >
280 |
281 | $\square$
282 |
283 |
284 | ==== Exercise: 3 stars, recommended (t_update_permute)
285 |
286 | Use \idr{beq_idP} to prove one final property of the \idr{update} function: If
287 | we update a map \idr{m} at two distinct keys, it doesn't matter in which order
288 | we do the updates.
289 |
290 | > t_update_permute : Not (x2 = x1) -> t_update x1 v1 $ t_update x2 v2 m
291 | > = t_update x2 v2 $ t_update x1 v1 m
292 | > t_update_permute neq = ?t_update_permute_rhs
293 | >
294 |
295 | $\square$
296 |
297 |
298 | == Partial maps
299 |
300 | Finally, we define _partial maps_ on top of total maps. A partial map with
301 | elements of type \idr{a} is simply a total map with elements of type \idr{Maybe
302 | a} and default element \idr{Nothing}.
303 |
304 | > PartialMap : Type -> Type
305 | > PartialMap a = TotalMap (Maybe a)
306 | >
307 | > empty : PartialMap a
308 | > empty = t_empty Nothing
309 | >
310 | > update : (x : Id) -> (v : a) -> (m : PartialMap a) -> PartialMap a
311 | > update x v m = t_update x (Just v) m
312 | >
313 |
314 | We now straightforwardly lift all of the basic lemmas about total maps to
315 | partial maps.
316 |
317 | > apply_empty : empty {a} x = Nothing {a}
318 | > apply_empty = Refl
319 | >
320 | > update_eq : (update x v m) x = Just v
321 | > update_eq {v} = t_update_eq {v=Just v}
322 | >
323 | > update_neq : Not (x2 = x1) -> (update x2 v m) x1 = m x1
324 | > update_neq {x1} {x2} {v} = t_update_neq {x1=x2} {x2=x1} {v=Just v}
325 | >
326 | > update_shadow : update x v2 $ update x v1 m = update x v2 m
327 | > update_shadow {v1} {v2} = t_update_shadow {v1=Just v1} {v2=Just v2}
328 | >
329 | > update_same : m x = Just v -> update x v m = m
330 | > update_same prf = rewrite sym prf in t_update_same
331 | >
332 | > update_permute : Not (x2 = x1) -> update x1 v1 $ update x2 v2 m
333 | > = update x2 v2 $ update x1 v1 m
334 | > update_permute {v1} {v2} = t_update_permute {v1=Just v1} {v2=Just v2}
335 |
--------------------------------------------------------------------------------
/src/Preface.lidr:
--------------------------------------------------------------------------------
1 | = Preface
2 |
3 | == Welcome
4 |
5 | This electronic book is a course on _Software Foundations_, the mathematical
6 | underpinnings of reliable software. Topics include basic concepts of logic,
7 | computer-assisted theorem proving, the Idris programming language, functional
8 | programming, operational semantics, Hoare logic, and static type systems. The
9 | exposition is intended for a broad range of readers, from advanced
10 | undergraduates to PhD students and researchers. No specific background in logic
11 | or programming languages is assumed, though a degree of mathematical maturity
12 | will be helpful.
13 |
14 | The principal novelty of the course is that it is one hundred percent formalized
15 | and machine-checked: the entire text is Literate Idris. It is intended to be
16 | read alongside an interactive session with Idris. All the details in the text
17 | are fully formalized in Idris, and the exercises are designed to be worked using
18 | Idris.
19 |
20 | The files are organized into a sequence of core chapters, covering about one
21 | semester's worth of material and organized into a coherent linear narrative,
22 | plus a number of "appendices" covering additional topics. All the core chapters
23 | are suitable for both upper-level undergraduate and graduate students.
24 |
25 |
26 | == Overview
27 |
28 | Building reliable software is hard. The scale and complexity of modern systems,
29 | the number of people involved in building them, and the range of demands placed
30 | on them render it extremely difficult to build software that is even
31 | more-or-less correct, much less 100% correct. At the same time, the increasing
32 | degree to which information processing is woven into every aspect of society
33 | continually amplifies the cost of bugs and insecurities.
34 |
35 | Computer scientists and software engineers have responded to these challenges by
36 | developing a whole host of techniques for improving software reliability,
37 | ranging from recommendations about managing software projects and organizing
38 | programming teams (e.g., extreme programming) to design philosophies for
39 | libraries (e.g., model-view-controller, publish-subscribe, etc.) and programming
40 | languages (e.g., object-oriented programming, aspect-oriented programming,
41 | functional programming, ...) to mathematical techniques for specifying and
42 | reasoning about properties of software and tools for helping validate these
43 | properties.
44 |
45 | The present course is focused on this last set of techniques. The text weaves
46 | together five conceptual threads:
47 |
48 | 1. basic tools from _logic_ for making and justifying precise claims about
49 | programs;
50 | 2. the use of _proof assistants_ to construct rigorous logical arguments;
51 |
52 | 3. the idea of _functional programming_, both as a method of programming that
53 | simplifies reasoning about programs and as a bridge between programming and
54 | logic;
55 |
56 | 4. formal techniques for _reasoning about the properties of specific programs_
57 | (e.g., the fact that a sorting function or a compiler obeys some formal
58 | specification); and
59 |
60 | 5. the use of _type systems_ for establishing well-behavedness guarantees for
61 | _all_ programs in a given programming language (e.g., the fact that
62 | well-typed Java programs cannot be subverted at runtime).
63 |
64 | Each of these topics is easily rich enough to fill a whole course in its own
65 | right, so tackling all of them together naturally means that much will be left
66 | unsaid. Nevertheless, we hope readers will find that the themes illuminate and
67 | amplify each other and that bringing them together creates a foundation from
68 | which it will be easy to dig into any of them more deeply. Some suggestions for
69 | further reading can be found in the [Postscript] chapter. Bibliographic
70 | information for all cited works can be found in the [Bib] chapter.
71 |
72 |
73 | === Logic
74 |
75 | Logic is the field of study whose subject matter is _proofs_ -- unassailable
76 | arguments for the truth of particular propositions. Volumes have been written
77 | about the central role of logic in computer science. Manna and Waldinger called
78 | it "the calculus of computer science," while Halpern et al.'s paper _On the
79 | Unusual Effectiveness of Logic in Computer Science_ catalogs scores of ways in
80 | which logic offers critical tools and insights. Indeed, they observe that "As a
81 | matter of fact, logic has turned out to be significantly more effective in
82 | computer science than it has been in mathematics. This is quite remarkable,
83 | especially since much of the impetus for the development of logic during the
84 | past one hundred years came from mathematics."
85 |
86 | In particular, the fundamental notion of inductive proofs is ubiquitous in all
87 | of computer science. You have surely seen them before, in contexts from discrete
88 | math to analysis of algorithms, but in this course we will examine them much
89 | more deeply than you have probably done so far.
90 |
91 |
92 | === Proof Assistants
93 |
94 | The flow of ideas between logic and computer science has not been in just one
95 | direction: CS has also made important contributions to logic. One of these has
96 | been the development of software tools for helping construct proofs of logical
97 | propositions. These tools fall into two broad categories:
98 |
99 | - _Automated theorem provers_ provide "push-button" operation: you give them a
100 | proposition and they return either _true_, _false_, or _ran out of time_.
101 | Although their capabilities are limited to fairly specific sorts of
102 | reasoning, they have matured tremendously in recent years and are used now
103 | in a huge variety of settings. Examples of such tools include SAT solvers,
104 | SMT solvers, and model checkers.
105 |
106 | - _Proof assistants_ are hybrid tools that automate the more routine aspects
107 | of building proofs while depending on human guidance for more difficult
108 | aspects. Widely used proof assistants include Isabelle, Agda, Twelf, ACL2,
109 | PVS, Coq, and Idris among many others.
110 |
111 | This course is based around Coq, a proof assistant that has been under
112 | development, mostly in France, since 1983 and that in recent years has attracted
113 | a large community of users in both research and industry. Coq provides a rich
114 | environment for interactive development of machine-checked formal reasoning. The
115 | kernel of the Coq system is a simple proof-checker, which guarantees that only
116 | correct deduction steps are performed. On top of this kernel, the Coq
117 | environment provides high-level facilities for proof development, including
118 | powerful tactics for constructing complex proofs semi-automatically, and a large
119 | library of common definitions and lemmas.
120 |
121 | Coq has been a critical enabler for a huge variety of work across computer
122 | science and mathematics:
123 |
124 | - As a _platform for modeling programming languages_, it has become a standard
125 | tool for researchers who need to describe and reason about complex language
126 | definitions. It has been used, for example, to check the security of the
127 | JavaCard platform, obtaining the highest level of common criteria
128 | certification, and for formal specifications of the x86 and LLVM instruction
129 | sets and programming languages such as C.
130 |
131 | - As an _environment for developing formally certified software_, Coq has been
132 | used, for example, to build CompCert, a fully-verified optimizing compiler
133 | for C, for proving the correctness of subtle algorithms involving floating
134 | point numbers, and as the basis for CertiCrypt, an environment for reasoning
135 | about the security of cryptographic algorithms.
136 |
137 | - As a _realistic environment for functional programming with dependent
138 | types_, it has inspired numerous innovations. For example, the Ynot project
139 | at Harvard embedded "relational Hoare reasoning" (an extension of the _Hoare
140 | Logic_ we will see later in this course) in Coq.
141 |
142 | - As a _proof assistant for higher-order logic_, it has been used to validate
143 | a number of important results in mathematics. For example, its ability to
144 | include complex computations inside proofs made it possible to develop the
145 | first formally verified proof of the 4-color theorem. This proof had
146 | previously been controversial among mathematicians because part of it
147 | included checking a large number of configurations using a program. In the
148 | Coq formalization, everything is checked, including the correctness of the
149 | computational part. More recently, an even more massive effort led to a Coq
150 | formalization of the Feit-Thompson Theorem -- the first major step in the
151 | classification of finite simple groups.
152 |
153 | By the way, in case you're wondering about the name, here's what the official
154 | Coq web site says: "Some French computer scientists have a tradition of naming
155 | their software as animal species: Caml, Elan, Foc or Phox are examples of this
156 | tacit convention. In French, 'coq' means rooster, and it sounds like the
157 | initials of the Calculus of Constructions (CoC) on which it is based." The
158 | rooster is also the national symbol of France, and C-o-q are the first three
159 | letters of the name of Thierry Coquand, one of Coq's early developers.
160 |
161 |
162 | === Functional Programming
163 |
164 | The term _functional programming_ refers both to a collection of programming
165 | idioms that can be used in almost any programming language and to a family of
166 | programming languages designed to emphasize these idioms, including Haskell,
167 | OCaml, Standard ML, F#, Scala, Scheme, Racket, Common Lisp, Clojure, Erlang,
168 | and Coq.
169 |
170 | Functional programming has been developed over many decades -- indeed, its roots
171 | go back to Church's lambda-calculus, which was invented in the 1930s, before
172 | there were even any computers! But since the early '90s it has enjoyed a surge
173 | of interest among industrial engineers and language designers, playing a key
174 | role in high-value systems at companies like Jane St. Capital, Microsoft,
175 | Facebook, and Ericsson.
176 |
177 | The most basic tenet of functional programming is that, as much as possible,
178 | computation should be _pure_, in the sense that the only effect of execution
179 | should be to produce a result: the computation should be free from _side
180 | effects_ such as I/O, assignments to mutable variables, redirecting pointers,
181 | etc. For example, whereas an _imperative_ sorting function might take a list of
182 | numbers and rearrange its pointers to put the list in order, a pure sorting
183 | function would take the original list and return a _new_ list containing the
184 | same numbers in sorted order.
185 |
186 | One significant benefit of this style of programming is that it makes programs
187 | easier to understand and reason about. If every operation on a data structure
188 | yields a new data structure, leaving the old one intact, then there is no need
189 | to worry about how that structure is being shared and whether a change by one
190 | part of the program might break an invariant that another part of the program
191 | relies on. These considerations are particularly critical in concurrent
192 | programs, where every piece of mutable state that is shared between threads is a
193 | potential source of pernicious bugs. Indeed, a large part of the recent interest
194 | in functional programming in industry is due to its simpler behavior in the
195 | presence of concurrency.
196 |
197 | Another reason for the current excitement about functional programming is
198 | related to the first: functional programs are often much easier to parallelize
199 | than their imperative counterparts. If running a computation has no effect other
200 | than producing a result, then it does not matter _where_ it is run. Similarly,
201 | if a data structure is never modified destructively, then it can be copied
202 | freely, across cores or across the network. Indeed, the "Map-Reduce" idiom,
203 | which lies at the heart of massively distributed query processors like Hadoop
204 | and is used by Google to index the entire web is a classic example of functional
205 | programming.
206 |
207 | For this course, functional programming has yet another significant attraction:
208 | it serves as a bridge between logic and computer science. Indeed, Coq itself can
209 | be viewed as a combination of a small but extremely expressive functional
210 | programming language plus with a set of tools for stating and proving logical
211 | assertions. Moreover, when we come to look more closely, we find that these two
212 | sides of Coq are actually aspects of the very same underlying machinery -- i.e.,
213 | _proofs are programs_.
214 |
215 |
216 | === Program Verification
217 |
218 | Approximately the first third of the book is devoted to developing the
219 | conceptual framework of logic and functional programming and gaining enough
220 | fluency with Coq to use it for modeling and reasoning about nontrivial
221 | artifacts. From this point on, we increasingly turn our attention to two broad
222 | topics of critical importance to the enterprise of building reliable software
223 | (and hardware): techniques for proving specific properties of particular
224 | _programs_ and for proving general properties of whole programming _languages_.
225 |
226 | For both of these, the first thing we need is a way of representing programs as
227 | mathematical objects, so we can talk about them precisely, together with ways of
228 | describing their behavior in terms of mathematical functions or relations. Our
229 | tools for these tasks are _abstract syntax_ and _operational semantics_, a
230 | method of specifying programming languages by writing abstract interpreters. At
231 | the beginning, we work with operational semantics in the so-called "big-step"
232 | style, which leads to somewhat simpler and more readable definitions when it is
233 | applicable. Later on, we switch to a more detailed "small-step" style, which
234 | helps make some useful distinctions between different sorts of "nonterminating"
235 | program behaviors and is applicable to a broader range of language features,
236 | including concurrency.
237 |
238 | The first programming language we consider in detail is _Imp_, a tiny toy
239 | language capturing the core features of conventional imperative programming:
240 | variables, assignment, conditionals, and loops. We study two different ways of
241 | reasoning about the properties of Imp programs.
242 |
243 | First, we consider what it means to say that two Imp programs are _equivalent_
244 | in the intuitive sense that they yield the same behavior when started in any
245 | initial memory state. This notion of equivalence then becomes a criterion for
246 | judging the correctness of _metaprograms_ -- programs that manipulate other
247 | programs, such as compilers and optimizers. We build a simple optimizer for Imp
248 | and prove that it is correct.
249 |
250 | Second, we develop a methodology for proving that particular Imp programs
251 | satisfy formal specifications of their behavior. We introduce the notion of
252 | _Hoare triples_ -- Imp programs annotated with pre- and post-conditions
253 | describing what should be true about the memory in which they are started and
254 | what they promise to make true about the memory in which they terminate -- and
255 | the reasoning principles of _Hoare Logic_, a "domain-specific logic" specialized
256 | for convenient compositional reasoning about imperative programs, with concepts
257 | like "loop invariant" built in.
258 |
259 | This part of the course is intended to give readers a taste of the key ideas and
260 | mathematical tools used in a wide variety of real-world software and hardware
261 | verification tasks.
262 |
263 |
264 | === Type Systems
265 |
266 | Our final major topic, covering approximately the last third of the course, is
267 | _type systems_, a powerful set of tools for establishing properties of _all_
268 | programs in a given language.
269 |
270 | Type systems are the best established and most popular example of a highly
271 | successful class of formal verification techniques known as _lightweight formal
272 | methods_. These are reasoning techniques of modest power -- modest enough that
273 | automatic checkers can be built into compilers, linkers, or program analyzers
274 | and thus be applied even by programmers unfamiliar with the underlying theories.
275 | Other examples of lightweight formal methods include hardware and software model
276 | checkers, contract checkers, and run-time property monitoring techniques for
277 | detecting when some component of a system is not behaving according to
278 | specification.
279 |
280 | This topic brings us full circle: the language whose properties we study in this
281 | part, the _simply typed lambda-calculus_, is essentially a simplified model of
282 | the core of Coq itself!
283 |
284 |
285 | === Further Reading
286 |
287 | This text is intended to be self contained, but readers looking for a deeper
288 | treatment of a particular topic will find suggestions for further reading in the
289 | [Postscript] chapter.
290 |
291 |
292 | == Practicalities
293 |
294 | === Chapter Dependencies
295 |
296 | A diagram of the dependencies between chapters and some suggested
297 | paths through the material can be found in the file [deps.html].
298 |
299 | === System Requirements
300 |
301 | Coq runs on Windows, Linux, and OS X. You will need:
302 |
303 | - A current installation of Coq, available from the Coq home page. Everything
304 | should work with version 8.4. (Version 8.5 will _not_ work, due to a few
305 | incompatible changes in Coq between 8.4 and 8.5.)
306 |
307 | - An IDE for interacting with Coq. Currently, there are two choices:
308 |
309 | - Proof General is an Emacs-based IDE. It tends to be preferred by users who
310 | are already comfortable with Emacs. It requires a separate installation
311 | (google "Proof General").
312 |
313 | - CoqIDE is a simpler stand-alone IDE. It is distributed with Coq, so it
314 | should "just work" once you have Coq installed. It can also be compiled
315 | from scratch, but on some platforms this may involve installing additional
316 | packages for GUI libraries and such.
317 |
318 | === Exercises
319 |
320 | Each chapter includes numerous exercises. Each is marked with a "star rating,"
321 | which can be interpreted as follows:
322 |
323 | - One star: easy exercises that underscore points in the text and that, for
324 | most readers, should take only a minute or two. Get in the habit of working
325 | these as you reach them.
326 |
327 | - Two stars: straightforward exercises (five or ten minutes).
328 |
329 | - Three stars: exercises requiring a bit of thought (ten minutes to half an
330 | hour).
331 |
332 | - Four and five stars: more difficult exercises (half an hour and up).
333 |
334 | Also, some exercises are marked "advanced", and some are marked "optional."
335 | Doing just the non-optional, non-advanced exercises should provide good coverage
336 | of the core material. Optional exercises provide a bit of extra practice with
337 | key concepts and introduce secondary themes that may be of interest to some
338 | readers. Advanced exercises are for readers who want an extra challenge (and, in
339 | return, a deeper contact with the material).
340 |
341 | _Please do not post solutions to the exercises in any public place_: Software
342 | Foundations is widely used both for self-study and for university courses.
343 | Having solutions easily available makes it much less useful for courses, which
344 | typically have graded homework assignments. The authors especially request that
345 | readers not post solutions to the exercises anyplace where they can be found by
346 | search engines.
347 |
348 |
349 | === Downloading the Coq Files
350 |
351 | A tar file containing the full sources for the "release version" of these notes
352 | (as a collection of Coq scripts and HTML files) is available here:
353 |
354 | http://www.cis.upenn.edu/~bcpierce/sf
355 |
356 | If you are using the notes as part of a class, you may be given access to a
357 | locally extended version of the files, which you should use instead of the
358 | release version.
359 |
360 |
361 | == Translations
362 |
363 | Thanks to the efforts of a team of volunteer translators, _Software Foundations_
364 | can now be enjoyed in Japanese at [http://proofcafe.org/sf]. A Chinese
365 | translation is underway.
366 |
--------------------------------------------------------------------------------
/src/ProofObjects.lidr:
--------------------------------------------------------------------------------
1 | = ProofObjects : The Curry-Howard Correspondence
2 |
3 | > module ProofObjects
4 | >
5 |
6 | \say{\textit{Algorithms are the computational content of proofs.}}
7 | -- Robert Harper
8 |
9 | > import Logic
10 | > import IndProp
11 | >
12 |
13 | We have seen that Idris has mechanisms both for _programming_, using inductive
14 | data types like \idr{Nat} or \idr{List} and functions over these types, and for
15 | _proving_ properties of these programs, using inductive propositions (like
16 | \idr{Ev}), implication, universal quantification, and the like. So far, we have
17 | mostly treated these mechanisms as if they were quite separate, and for many
18 | purposes this is a good way to think. But we have also seen hints that Idris's
19 | programming and proving facilities are closely related. For example, the keyword
20 | \idr{data} is used to declare both data types and propositions, and \idr{->} is
21 | used both to describe the type of functions on data and logical implication.
22 | This is not just a syntactic accident! In fact, programs and proofs in Idris are
23 | almost the same thing. In this chapter we will study how this works.
24 |
25 | We have already seen the fundamental idea: provability in Idris is represented
26 | by concrete _evidence_. When we construct the proof of a basic proposition, we
27 | are actually building a tree of evidence, which can be thought of as a data
28 | structure.
29 |
30 | If the proposition is an implication like \idr{A -> B}, then its proof will be
31 | an evidence _transformer_: a recipe for converting evidence for \idr{A} into
32 | evidence for \idr{B}. So at a fundamental level, proofs are simply programs that
33 | manipulate evidence.
34 |
35 | Question: If evidence is data, what are propositions themselves?
36 |
37 | Answer: They are types!
38 |
39 | Look again at the formal definition of the \idr{Ev} property.
40 |
41 | ```idris
42 | data Ev : Nat -> Type where
43 | Ev_0 : Ev Z
44 | Ev_SS : {n : Nat} -> Ev n -> Ev (S (S n))
45 | ```
46 |
47 | Suppose we introduce an alternative pronunciation of "\idr{:}". Instead of "has
48 | type," we can say "is a proof of." For example, the second line in the
49 | definition of \idr{Ev} declares that \idr{Ev_0 : Ev 0}. Instead of "\idr{Ev_0}
50 | has type \idr{Ev 0}," we can say that "\idr{Ev_0} is a proof of \idr{Ev 0}."
51 |
52 | This pun between types and propositions — between \idr{:} as "has type" and
53 | \idr{:} as "is a proof of" or "is evidence for" — is called the Curry-Howard
54 | correspondence. It proposes a deep connection between the world of logic and the
55 | world of computation:
56 |
57 | propositions ~ types
58 | proofs ~ data values
59 |
60 | \todo[inline]{Add http://dl.acm.org/citation.cfm?id=2699407 as a link}
61 |
62 | See [Wadler 2015] for a brief history and an up-to-date exposition.
63 |
64 | Many useful insights follow from this connection. To begin with, it gives us a
65 | natural interpretation of the type of the \idr{Ev_SS} constructor:
66 |
67 | ```idris
68 | λΠ> :t Ev_SS
69 | Ev_SS : Ev n -> Ev (S (S n))
70 | ```
71 |
72 | This can be read "\idr{Ev_SS} is a constructor that takes two arguments — a
73 | number \idr{n} and evidence for the proposition \idr{Ev n} — and yields evidence
74 | for the proposition \idr{Ev (S (S n))}."
75 |
76 | Now let's look again at a previous proof involving \idr{Ev}.
77 |
78 | > ev_4 : Ev 4
79 | > ev_4 = Ev_SS {n=2} $ Ev_SS {n=0} Ev_0
80 |
81 | As with ordinary data values and functions, we can use the \idr{:printdef}
82 | command to see the proof object that results from this proof script.
83 |
84 | ```idris
85 | λΠ> :printdef ev_4
86 | ev_4 : Ev 4
87 | ev_4 = Ev_SS (Ev_SS Ev_0)
88 | ```
89 |
90 | As a matter of fact, we can also write down this proof object directly, without
91 | the need for a separate proof script:
92 |
93 | ```idris
94 | λΠ> Ev_SS $ Ev_SS Ev_0
95 | Ev_SS (Ev_SS Ev_0) : Ev 4
96 | ```
97 |
98 | The expression \idr{Ev_SS {n=2} $ Ev_SS {n=0} Ev_0} can be thought of as
99 | instantiating the parameterized constructor \idr{Ev_SS} with the specific
100 | arguments \idr{2} and \idr{0} plus the corresponding proof objects for its
101 | premises \idr{Ev 2} and \idr{Ev 0}. Alternatively, we can think of \idr{Ev_SS}
102 | as a primitive "evidence constructor" that, when applied to a particular number,
103 | wants to be further applied to evidence that that number is even; its type,
104 |
105 | ```idris
106 | {n : Nat} -> Ev n -> Ev (S (S n))
107 | ```
108 |
109 | expresses this functionality, in the same way that the polymorphic type
110 | \idr{{x : Type} -> List x} expresses the fact that the constructor \idr{Nil} can
111 | be thought of as a function from types to empty lists with elements of that
112 | type.
113 |
114 | \todo[inline]{Edit or remove}
115 |
116 | We saw in the `Logic` chapter that we can use function application syntax to
117 | instantiate universally quantified variables in lemmas, as well as to supply
118 | evidence for assumptions that these lemmas impose. For instance:
119 |
120 | ```coq
121 | Theorem ev_4': ev 4.
122 | Proof.
123 | apply (ev_SS 2 (ev_SS 0 ev_0)).
124 | Qed.
125 | ```
126 |
127 | We can now see that this feature is a trivial consequence of the status the
128 | Idris grants to proofs and propositions: Lemmas and hypotheses can be combined
129 | in expressions (i.e., proof objects) according to the same basic rules used for
130 | programs in the language.
131 |
132 |
133 | == Proof Scripts
134 |
135 | \ \todo[inline]{Rewrite, keep explanation about holes? Seems a bit late for
136 | that}
137 |
138 | The _proof objects_ we've been discussing lie at the core of how Idris operates.
139 | When Idris is following a proof script, what is happening internally is that it
140 | is gradually constructing a proof object — a term whose type is the proposition
141 | being proved. The expression on the right hand side of \idr{=} tell it how to
142 | build up a term of the required type. To see this process in action, let's use
143 | the `Show Proof` command to display the current state of the proof tree at
144 | various points in the following tactic proof.
145 |
146 | ```coq
147 | Theorem ev_4'' : ev 4.
148 | Proof.
149 | Show Proof.
150 | apply ev_SS.
151 | Show Proof.
152 | apply ev_SS.
153 | Show Proof.
154 | apply ev_0.
155 | Show Proof.
156 | Qed.
157 | ```
158 |
159 | At any given moment, Idris has constructed a term with a "hole" (indicated by
160 | `?Goal` here, and so on), and it knows what type of evidence is needed to fill
161 | this hole.
162 |
163 | Each hole corresponds to a subgoal, and the proof is finished when there are no
164 | more subgoals. At this point, the evidence we've built stored in the global
165 | context under the name given in the type definition.
166 |
167 | Tactic proofs are useful and convenient, but they are not essential: in
168 | principle, we can always construct the required evidence by hand, as shown
169 | above. Then we can use `Definition` (rather than `Theorem`) to give a global
170 | name directly to a piece of evidence.
171 |
172 | ```coq
173 | Definition ev_4''' : ev 4 :=
174 | ev_SS 2 (ev_SS 0 ev_0).
175 | ```
176 |
177 | All these different ways of building the proof lead to exactly the same evidence
178 | being saved in the global environment.
179 |
180 | ```coq
181 | Print ev_4.
182 | (* ===> ev_4 = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
183 | Print ev_4'.
184 | (* ===> ev_4' = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
185 | Print ev_4''.
186 | (* ===> ev_4'' = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
187 | Print ev_4'''.
188 | (* ===> ev_4''' = ev_SS 2 (ev_SS 0 ev_0) : ev 4 *)
189 | ```
190 |
191 | ==== Exercise: 1 star (eight_is_even)
192 |
193 | \ \todo[inline]{Remove?}
194 |
195 | Give a tactic proof and a proof object showing that \idr{Ev 8}.
196 |
197 | > ev_8 : Ev 8
198 | > ev_8 = ?ev_8_rhs
199 |
200 | $\square$
201 |
202 |
203 | ==== Quantifiers, Implications, Functions
204 |
205 | \ \todo[inline]{Edit the section}
206 |
207 | In Idris's computational universe (where data structures and programs live),
208 | there are two sorts of values with arrows in their types: _constructors_
209 | introduced by \idr{data} definitions, and _functions_.
210 |
211 | Similarly, in Idris's logical universe (where we carry out proofs), there are
212 | two ways of giving evidence for an implication: constructors introduced by
213 | \idr{data}-defined propositions, and... functions!
214 |
215 | For example, consider this statement:
216 |
217 | > ev_plus4 : Ev n -> Ev (4 + n)
218 | > ev_plus4 x = Ev_SS $ Ev_SS x
219 |
220 | What is the proof object corresponding to `ev_plus4`?
221 |
222 | We're looking for an expression whose type is
223 | \idr{{n: Nat} -> Ev n -> Ev (4 + n)} — that is, a function that takes two
224 | arguments (one number and a piece of evidence) and returns a piece of evidence!
225 | Here it is:
226 |
227 | ```coq
228 | Definition ev_plus4' : forall n, ev n -> ev (4 + n) :=
229 | fun (n : Nat) => fun (H : ev n) =>
230 | ev_SS (S (S n)) (ev_SS n H).
231 | ```
232 |
233 | Recall that \idr{\n => blah} means "the function that, given \idr{n}, yields
234 | \idr{blah}," and that Idris treats \idr{4 + n} and \idr{S (S (S (S n)))} as
235 | synonyms. Another equivalent way to write this definition is:
236 |
237 | ```coq
238 | Definition ev_plus4'' (n : Nat) (H : ev n) : ev (4 + n) :=
239 | ev_SS (S (S n)) (ev_SS n H).
240 |
241 | Check ev_plus4''.
242 | (* ===> ev_plus4'' : forall n : Nat, ev n -> ev (4 + n) *)
243 | ```
244 |
245 | When we view the proposition being proved by \idr{ev_plus4} as a function type,
246 | one aspect of it may seem a little unusual. The second argument's type,
247 | \idr{Ev n}, mentions the _value_ of the first argument, \idr{n}. While such
248 | _dependent types_ are not found in conventional programming languages, they can
249 | be useful in programming too, as the recent flurry of activity in the functional
250 | programming community demonstrates.
251 |
252 | \todo[inline]{Reword?}
253 |
254 | Notice that both implication (\idr{->}) and quantification
255 | (\idr{(x : t) -> f x}) correspond to functions on evidence. In fact, they are
256 | really the same thing: \idr{->} is just a shorthand for a degenerate use of
257 | quantification where there is no dependency, i.e., no need to give a name to the
258 | type on the left-hand side of the arrow.
259 |
260 | For example, consider this proposition:
261 |
262 | > ev_plus2 : Type
263 | > ev_plus2 = (n : Nat) -> (e : Ev n) -> Ev (n + 2)
264 |
265 | A proof term inhabiting this proposition would be a function with two arguments:
266 | a number \idr{n} and some evidence \idr{e} that \idr{n} is even. But the name
267 | \idr{e} for this evidence is not used in the rest of the statement of
268 | \idr{ev_plus2}, so it's a bit silly to bother making up a name for it. We could
269 | write it like this instead:
270 |
271 | > ev_plus2' : Type
272 | > ev_plus2' = (n : Nat) -> Ev n -> Ev (n + 2)
273 |
274 | In general, "\idr{p -> q}" is just syntactic sugar for "\idr{(_ : p) -> q}".
275 |
276 |
277 | == Programming with Tactics
278 |
279 | \ \todo[inline]{Edit and move to an appendix about ElabReflection/Pruviloj?}
280 |
281 | If we can build proofs by giving explicit terms rather than executing tactic
282 | scripts, you may be wondering whether we can build _programs_ using _tactics_
283 | rather than explicit terms. Naturally, the answer is yes!
284 |
285 | ```coq
286 | Definition add1 : Nat -> Nat.
287 | intro n.
288 | Show Proof.
289 | apply S.
290 | Show Proof.
291 | apply n. Defined.
292 |
293 | Print add1.
294 | (* ==>
295 | add1 = fun n : Nat => S n
296 | : Nat -> Nat
297 | *)
298 |
299 | Compute add1 2.
300 | (* ==> 3 : Nat *)
301 | ```
302 |
303 | Notice that we terminate the `Definition` with a `.` rather than with `:=`
304 | followed by a term. This tells Idris to enter _proof scripting mode_ to build an
305 | object of type \idr{Nat -> Nat}. Also, we terminate the proof with `Defined`
306 | rather than `Qed`; this makes the definition _transparent_ so that it can be
307 | used in computation like a normally-defined function. (`Qed`-defined objects are
308 | opaque during computation.)
309 |
310 | This feature is mainly useful for writing functions with dependent types, which
311 | we won't explore much further in this book. But it does illustrate the
312 | uniformity and orthogonality of the basic ideas in Idris.
313 |
314 |
315 | == Logical Connectives as Inductive Types
316 |
317 | Inductive definitions are powerful enough to express most of the connectives and
318 | quantifiers we have seen so far. Indeed, only universal quantification (and thus
319 | implication) is built into Idris; all the others are defined inductively. We'll
320 | see these definitions in this section.
321 |
322 |
323 | === Conjunction
324 |
325 | \ \todo[inline]{Edit}
326 |
327 | To prove that \idr{(p,q)} holds, we must present evidence for both \idr{p} and
328 | \idr{q}. Thus, it makes sense to define a proof object for \idr{(p,q)} as
329 | consisting of a pair of two proofs: one for \idr{p} and another one for \idr{q}.
330 | This leads to the following definition.
331 |
332 | > data And : (p, q : Type) -> Type where
333 | > Conj : p -> q -> And p q
334 |
335 | Notice the similarity with the definition of the \idr{Prod} type, given in
336 | chapter `Poly`; the only difference is that \idr{Prod} takes Type arguments,
337 | whereas and takes Prop arguments.
338 |
339 | ```idris
340 | data Prod : (x, y : Type) -> Type where
341 | PPair : x -> y -> Prod x y
342 | ```
343 |
344 | This should clarify why pattern matching can be used on a conjunctive
345 | hypothesis. Case analysis allows us to consider all possible ways in which
346 | \idr{(p,q)} was proved — here just one (the \idr{Conj} constructor). Similarly,
347 | the `split` tactic actually works for any inductively defined proposition with
348 | only one constructor. In particular, it works for \idr{And}:
349 |
350 | > and_comm : (And p q) <-> (And q p)
351 | > and_comm = (\(Conj x y) => Conj y x,
352 | > \(Conj y x) => Conj x y)
353 |
354 | This shows why the inductive definition of `and` can be manipulated by tactics
355 | as we've been doing. We can also use it to build proofs directly, using
356 | pattern-matching. For instance:
357 |
358 | > and_comm'_aux : And p q -> And q p
359 | > and_comm'_aux (Conj x y) = Conj y x
360 |
361 | > and_comm' : (And p q) <-> (And q p)
362 | > and_comm' {p} {q} = (and_comm'_aux {p} {q}, and_comm'_aux {p=q} {q=p})
363 |
364 |
365 | ==== Exercise: 2 stars, optional (conj_fact)
366 |
367 | Construct a proof object demonstrating the following proposition.
368 |
369 | > conj_fact : And p q -> And q r -> And p r
370 | > conj_fact pq qr = ?conj_fact_rhs
371 |
372 | $\square$
373 |
374 |
375 | === Disjunction
376 |
377 | The inductive definition of disjunction uses two constructors, one for each side
378 | of the disjunct:
379 |
380 | > data Or : (p, q : Type) -> Type where
381 | > IntroL : p -> Or p q
382 | > IntroR : q -> Or p q
383 |
384 | This declaration explains the behavior of pattern matching on a disjunctive
385 | hypothesis, since the generated subgoals match the shape of the \idr{IntroL} and
386 | \idr{IntroR} constructors.
387 |
388 | Once again, we can also directly write proof objects for theorems involving
389 | \idr{Or}, without resorting to tactics.
390 |
391 |
392 | ==== Exercise: 2 stars, optional (or_comm)
393 |
394 | \ \todo[inline]{Edit}
395 |
396 | Try to write down an explicit proof object for \idr{or_comm} (without using
397 | `Print` to peek at the ones we already defined!).
398 |
399 | > or_comm : Or p q -> Or q p
400 | > or_comm pq = ?or_comm_rhs
401 |
402 | $\square$
403 |
404 |
405 | === Existential Quantification
406 |
407 | To give evidence for an existential quantifier, we package a witness \idr{x}
408 | together with a proof that \idr{x} satisfies the property \idr{p}:
409 |
410 | > data Ex : (p : a -> Type) -> Type where
411 | > ExIntro : (x : a) -> p x -> Ex p
412 |
413 | This may benefit from a little unpacking. The core definition is for a type
414 | former \idr{Ex} that can be used to build propositions of the form \idr{Ex p},
415 | where \idr{p} itself is a function from witness values in the type \idr{a} to
416 | propositions. The \idr{ExIntro} constructor then offers a way of constructing
417 | evidence for \idr{Ex p}, given a witness \idr{x} and a proof of \idr{p x}.
418 |
419 | The more familiar form \idr{(x ** p x)} desugars to an expression involving
420 | \idr{Ex}:
421 |
422 | \todo[inline]{Edit}
423 |
424 | ```coq
425 | Check ex (fun n => ev n).
426 | (* ===> exists n : Nat, ev n
427 | : Prop *)
428 | ```
429 |
430 | Here's how to define an explicit proof object involving \idr{Ex}:
431 |
432 | > some_nat_is_even : Ex (\n => Ev n)
433 | > some_nat_is_even = ExIntro 4 (Ev_SS $ Ev_SS Ev_0)
434 |
435 |
436 | ==== Exercise: 2 stars, optional (ex_ev_Sn)
437 |
438 | Complete the definition of the following proof object:
439 |
440 | > ex_ev_Sn : Ex (\n => Ev (S n))
441 | > ex_ev_Sn = ?ex_ev_Sn_rhs
442 |
443 | $\square$
444 |
445 |
446 | \subsection{\idr{Unit} and \idr{Void}}
447 |
448 | The inductive definition of the \idr{Unit} proposition is simple:
449 |
450 | ```idris
451 | data Unit : Type where
452 | () : Unit
453 | ```
454 |
455 | It has one constructor (so every proof of \idr{Unit} is the same, so being given
456 | a proof of\idr{Unit} is not informative.)
457 |
458 | \idr{Void} is equally simple — indeed, so simple it may look syntactically wrong
459 | at first glance!
460 |
461 | \todo[inline]{Edit, this actually is wrong, stdlib uses \idr{runElab} to define
462 | it}
463 |
464 | ```idris
465 | data Void : Type where
466 | ```
467 |
468 | That is, \idr{Void} is an inductive type with _no_ constructors — i.e., no way
469 | to build evidence for it.
470 |
471 |
472 | == Equality
473 |
474 | \ \todo[inline]{Edit, it actually is built in}
475 |
476 | Even Idris's equality relation is not built in. It has the following inductive
477 | definition. (Actually, the definition in the standard library is a small variant
478 | of this, which gives an induction principle that is slightly easier to use.)
479 |
480 | > data PropEq : {t : Type} -> t -> t -> Type where
481 | > EqRefl : PropEq x x
482 |
483 | > syntax [x] "='" [y] = PropEq x y
484 |
485 | The way to think about this definition is that, given a set \idr{t}, it defines
486 | a _family_ of propositions "\idr{x} is equal to \idr{y}," indexed by pairs of
487 | values (\idr{x} and \idr{y}) from \idr{t}. There is just one way of constructing
488 | evidence for each member of this family: applying the constructor \idr{EqRefl}
489 | to a type \idr{t} and a value \idr{x : t} yields evidence that \idr{x} is equal
490 | to \idr{x}.
491 |
492 | \todo[inline]{Edit}
493 |
494 | We can use \idr{EqRefl} to construct evidence that, for example, \idr{2 = 2}.
495 | Can we also use it to construct evidence that \idr{1 + 1 = 2}? Yes, we can.
496 | Indeed, it is the very same piece of evidence! The reason is that Idris treats
497 | as "the same" any two terms that are _convertible_ according to a simple set of
498 | computation rules. These rules, which are similar to those used by `Compute`,
499 | include evaluation of function application, inlining of definitions, and
500 | simplification of `match`es.
501 |
502 | > four : (2 + 2) =' (1 + 3)
503 | > four = EqRefl
504 |
505 | The \idr{Refl} that we have used to prove equalities up to now is essentially
506 | just an application of an equality constructor.
507 |
508 | \todo[inline]{Edit}
509 |
510 | In tactic-based proofs of equality, the conversion rules are normally hidden in
511 | uses of `simpl` (either explicit or implicit in other tactics such as
512 | `reflexivity`). But you can see them directly at work in the following explicit
513 | proof objects:
514 |
515 | ```coq
516 | Definition four' : 2 + 2 = 1 + 3 :=
517 | eq_refl 4.
518 | ```
519 |
520 | > singleton : ([]++[x]) =' (x::[])
521 | > singleton = EqRefl
522 |
523 | > quiz6 : Ex (\x => (x + 3) =' 4)
524 | > quiz6 = ExIntro 1 EqRefl
525 |
526 |
527 | ==== Exercise: 2 stars (equality__leibniz_equality)
528 |
529 | The inductive definition of equality corresponds to _Leibniz equality_: what we
530 | mean when we say "\idr{x} and \idr{y} are equal" is that every property \idr{p}
531 | that is true of \idr{x} is also true of \idr{y}.
532 |
533 | > equality__leibniz_equality : (x =' y) -> ((p : t -> Type) -> p x -> p y)
534 | > equality__leibniz_equality eq p px = ?equality__leibniz_equality_rhs
535 |
536 | $\square$
537 |
538 |
539 | ==== Exercise: 5 stars, optional (leibniz_equality__equality)
540 |
541 | Show that, in fact, the inductive definition of equality is _equivalent_ to
542 | Leibniz equality:
543 |
544 | > leibniz_equality__equality : ((p : t -> Type) -> p x -> p y) -> (x =' y)
545 | > leibniz_equality__equality pxy = ?leibniz_equality__equality_rhs
546 |
547 | $\square$
548 |
549 |
550 | === Inversion, Again
551 |
552 | \ \todo[inline]{Edit/remove}
553 |
554 | We've seen `inversion` used with both equality hypotheses and hypotheses about
555 | inductively defined propositions. Now that we've seen that these are actually
556 | the same thing, we're in a position to take a closer look at how `inversion`
557 | behaves.
558 |
559 | In general, the `inversion` tactic...
560 |
561 | - takes a hypothesis `H` whose type `P` is inductively defined, and
562 |
563 | - for each constructor `C` in `P`'s definition,
564 |
565 | - generates a new subgoal in which we assume `H` was built with `C`,
566 |
567 | - adds the arguments (premises) of `C` to the context of the subgoal as
568 | extra hypotheses,
569 |
570 | - matches the conclusion (result type) of `C` against the current goal and
571 | calculates a set of equalities that must hold in order for `C` to be
572 | applicable,
573 |
574 | - adds these equalities to the context (and, for convenience, rewrites them
575 | in the goal), and
576 |
577 | - if the equalities are not satisfiable (e.g., they involve things like
578 | \idr{S n = Z}), immediately solves the subgoal.
579 |
580 | _Example_: If we invert a hypothesis built with \idr{Or}, there are two
581 | constructors, so two subgoals get generated. The conclusion (result type) of the
582 | constructor (\idr{Or p q}) doesn't place any restrictions on the form of \idr{p}
583 | or \idr{q}, so we don't get any extra equalities in the context of the subgoal.
584 |
585 | _Example_: If we invert a hypothesis built with \idr{And}, there is only one
586 | constructor, so only one subgoal gets generated. Again, the conclusion (result
587 | type) of the constructor (\idr{And p q}) doesn't place any restrictions on the
588 | form of \idr{p} or \idr{q}, so we don't get any extra equalities in the context
589 | of the subgoal. The constructor does have two arguments, though, and these can
590 | be seen in the context in the subgoal.
591 |
592 | _Example_: If we invert a hypothesis built with \idr{PropEq}, there is again
593 | only one constructor, so only one subgoal gets generated. Now, though, the form
594 | of the \idr{EqRefl} constructor does give us some extra information: it tells us
595 | that the two arguments to \idr{PropEq} must be the same! The `inversion` tactic
596 | adds this fact to the context.
597 |
--------------------------------------------------------------------------------
/src/Rel.lidr:
--------------------------------------------------------------------------------
1 | = Rel : Properties of Relations
2 |
3 | > module Rel
4 | >
5 |
6 | \todo[inline]{Add hyperlinks}
7 |
8 | This short (and optional) chapter develops some basic definitions and a few
9 | theorems about binary relations in Idris. The key definitions are repeated where
10 | they are actually used (in the `Smallstep` chapter), so readers who are already
11 | comfortable with these ideas can safely skim or skip this chapter. However,
12 | relations are also a good source of exercises for developing facility with
13 | Idris's basic reasoning facilities, so it may be useful to look at this material
14 | just after the `IndProp` chapter.
15 |
16 | > import Logic
17 | > import IndProp
18 | >
19 |
20 | A binary _relation_ on a set \idr{t} is a family of propositions parameterized
21 | by two elements of \idr{t} — i.e., a proposition about pairs of elements of
22 | \idr{t}.
23 |
24 | > Relation : Type -> Type
25 | > Relation t = t -> t -> Type
26 |
27 | \todo[inline]{Edit, there's n-relation \idr{Data.Rel} in \idr{contrib}, but no
28 | \idr{Relation}}
29 |
30 | Confusingly, the Idris standard library hijacks the generic term "relation" for
31 | this specific instance of the idea. To maintain consistency with the library, we
32 | will do the same. So, henceforth the Idris identifier `relation` will always
33 | refer to a binary relation between some set and itself, whereas the English word
34 | "relation" can refer either to the specific Idris concept or the more general
35 | concept of a relation between any number of possibly different sets. The context
36 | of the discussion should always make clear which is meant.
37 |
38 | \todo[inline]{There's a similar concept called \idr{LTE} in \idr{Prelude.Nat},
39 | but it's defined by induction from zero}
40 |
41 | An example relation on \idr{Nat} is \idr{Le}, the less-than-or-equal-to
42 | relation, which we usually write \idr{n1 <= n2}.
43 |
44 | ```idris
45 | λΠ> the (Relation Nat) Le
46 | Le : Nat -> Nat -> Type
47 | ```
48 |
49 | \todo[inline]{Edit to show it (probably) doesn't matter in Idris}
50 |
51 | (Why did we write it this way instead of starting with \idr{data Le : Relation
52 | Nat ...}? Because we wanted to put the first \idr{Nat} to the left of the
53 | \idr{:}, which makes Idris generate a somewhat nicer induction principle for
54 | reasoning about \idr{<='}.)
55 |
56 |
57 | == Basic Properties
58 |
59 | As anyone knows who has taken an undergraduate discrete math course, there is a
60 | lot to be said about relations in general, including ways of classifying
61 | relations (as reflexive, transitive, etc.), theorems that can be proved
62 | generically about certain sorts of relations, constructions that build one
63 | relation from another, etc. For example...
64 |
65 |
66 | === Partial Functions
67 |
68 | A relation \idr{r} on a set \idr{t} is a _partial function_ if, for every
69 | \idr{x}, there is at most one \idr{y} such that \idr{r x y} — i.e., \idr{r x y1}
70 | and \idr{r x y2} together imply \idr{y1 = y2}.
71 |
72 | > Partial_function : (r : Relation t) -> Type
73 | > Partial_function {t} r = (x, y1, y2 : t) -> r x y1 -> r x y2 -> y1 = y2
74 |
75 | \todo[inline]{"Earlier" = in \idr{IndProp}, add hyperlink?}
76 |
77 | For example, the \idr{Next_nat} relation defined earlier is a partial function.
78 |
79 | ```idris
80 | λΠ> the (Relation Nat) Next_nat
81 | Next_nat : Nat -> Nat -> Type
82 | ```
83 |
84 | > next_nat_partial_function : Partial_function Next_nat
85 | > next_nat_partial_function x (S x) (S x) Nn Nn = Refl
86 |
87 | However, the \idr{<='} relation on numbers is not a partial function. (Assume,
88 | for a contradiction, that \idr{<='} is a partial function. But then, since
89 | \idr{0 <=' 0} and \idr{0 <=' 1}, it follows that \idr{0 = 1}. This is nonsense,
90 | so our assumption was contradictory.)
91 |
92 | > le_not_a_partial_function : Not (Partial_function Le)
93 | > le_not_a_partial_function f = absurd $ f 0 0 1 Le_n (Le_S Le_n)
94 |
95 |
96 | ==== Exercise: 2 stars, optional
97 |
98 | \ \todo[inline]{Again, "earlier" = \idr{IndProp}}
99 |
100 | Show that the \idr{Total_relation} defined in earlier is not a partial function.
101 |
102 | > -- FILL IN HERE
103 |
104 | $\square$
105 |
106 |
107 | ==== Exercise: 2 stars, optional
108 |
109 | Show that the \idr{Empty_relation} that we defined earlier is a partial
110 | function.
111 |
112 | > --FILL IN HERE
113 |
114 | $\square$
115 |
116 |
117 | === Reflexive Relations
118 |
119 | A _reflexive_ relation on a set \idr{t} is one for which every element of
120 | \idr{t} is related to itself.
121 |
122 | > Reflexive : (r : Relation t) -> Type
123 | > Reflexive {t} r = (a : t) -> r a a
124 |
125 | > le_reflexive : Reflexive Le
126 | > le_reflexive n = Le_n {n}
127 |
128 |
129 | === Transitive Relations
130 |
131 | A relation \idr{r} is _transitive_ if \idr{r a c} holds whenever \idr{r a b} and
132 | \idr{r b c} do.
133 |
134 | > Transitive : (r : Relation t) -> Type
135 | > Transitive {t} r = (a, b, c : t) -> r a b -> r b c -> r a c
136 |
137 | > le_trans : Transitive Le
138 | > le_trans _ _ _ lab Le_n = lab
139 | > le_trans a b (S c) lab (Le_S lbc) = Le_S $ le_trans a b c lab lbc
140 |
141 | > lt_trans : Transitive Lt
142 | > lt_trans a b c lab lbc = le_trans (S a) (S b) c (Le_S lab) lbc
143 |
144 |
145 | ==== Exercise: 2 stars, optional
146 |
147 | We can also prove \idr{lt_trans} more laboriously by induction, without using
148 | \idr{le_trans}. Do this.
149 |
150 | > lt_trans' : Transitive Lt
151 | > -- Prove this by induction on evidence that a is less than c.
152 | > lt_trans' a b c lab lbc = ?lt_trans__rhs
153 |
154 | $\square$
155 |
156 |
157 | ==== Exercise: 2 stars, optional
158 |
159 | \ \todo[inline]{Not sure how is this different from \idr{lt_trans'}?}
160 |
161 | Prove the same thing again by induction on \idr{c}.
162 |
163 | > lt_trans'' : Transitive Lt
164 | > lt_trans'' a b c lab lbc = ?lt_trans___rhs
165 |
166 | $\square$
167 |
168 | The transitivity of \idr{Le}, in turn, can be used to prove some facts that will
169 | be useful later (e.g., for the proof of antisymmetry below)...
170 |
171 | > le_Sn_le : ((S n) <=' m) -> (n <=' m)
172 | > le_Sn_le {n} {m} = le_trans n (S n) m (Le_S Le_n)
173 |
174 |
175 | ==== Exercise: 1 star, optional
176 |
177 | > le_S_n : ((S n) <=' (S m)) -> (n <=' m)
178 | > le_S_n less = ?le_S_n_rhs
179 |
180 | $\square$
181 |
182 |
183 | ==== Exercise: 2 stars, optional (le_Sn_n_inf)
184 |
185 | Provide an informal proof of the following theorem:
186 |
187 | Theorem: For every \idr{n}, \idr{Not ((S n) <=' n)}
188 |
189 | A formal proof of this is an optional exercise below, but try writing an
190 | informal proof without doing the formal proof first.
191 |
192 | Proof:
193 |
194 | > -- FILL IN HERE
195 |
196 | $\square$
197 |
198 |
199 | ==== Exercise: 1 star, optional
200 |
201 | > le_Sn_n : Not ((S n) <=' n)
202 | > le_Sn_n = ?le_Sn_n_rhs
203 |
204 | $\square$
205 |
206 | Reflexivity and transitivity are the main concepts we'll need for later
207 | chapters, but, for a bit of additional practice working with relations in Idris,
208 | let's look at a few other common ones...
209 |
210 |
211 | === Symmetric and Antisymmetric Relations
212 |
213 | A relation \idr{r} is _symmetric_ if \idr{r a b} implies \idr{r b a}.
214 |
215 | > Symmetric : (r : Relation t) -> Type
216 | > Symmetric {t} r = (a, b : t) -> r a b -> r b a
217 |
218 |
219 | ==== Exercise: 2 stars, optional
220 |
221 | > le_not_symmetric : Not (Symmetric Le)
222 | > le_not_symmetric = ?le_not_symmetric_rhs
223 |
224 | $\square$
225 |
226 | A relation \idr{r} is _antisymmetric_ if \idr{r a b} and \idr{r b a} together
227 | imply \idr{a = b} — that is, if the only "cycles" in \idr{r} are trivial ones.
228 |
229 | > Antisymmetric : (r : Relation t) -> Type
230 | > Antisymmetric {t} r = (a, b : t) -> r a b -> r b a -> a = b
231 |
232 |
233 | ==== Exercise: 2 stars, optional
234 |
235 | > le_antisymmetric : Antisymmetric Le
236 | > le_antisymmetric = ?le_antisymmetric_rhs
237 |
238 | $\square$
239 |
240 |
241 | ==== Exercise: 2 stars, optional
242 |
243 | > le_step : (n <' m) -> (m <=' (S p)) -> (n <=' p)
244 | > le_step ltnm lemsp = ?le_step_rhs
245 |
246 | $\square$
247 |
248 |
249 | === Equivalence Relations
250 |
251 | A relation is an _equivalence_ if it's reflexive, symmetric, and transitive.
252 |
253 | > Equivalence : (r : Relation t) -> Type
254 | > Equivalence r = (Reflexive r, Symmetric r, Transitive r)
255 |
256 |
257 | === Partial Orders and Preorders
258 |
259 | \ \todo[inline]{Edit}
260 |
261 | A relation is a _partial order_ when it's reflexive, _anti_-symmetric, and
262 | transitive. In the Idris standard library it's called just "order" for short.
263 |
264 | > Order : (r : Relation t) -> Type
265 | > Order r = (Reflexive r, Antisymmetric r, Transitive r)
266 |
267 | A preorder is almost like a partial order, but doesn't have to be antisymmetric.
268 |
269 | > Preorder : (r : Relation t) -> Type
270 | > Preorder r = (Reflexive r, Transitive r)
271 |
272 | > le_order : Order Le
273 | > le_order = (le_reflexive, le_antisymmetric, le_trans)
274 |
275 |
276 | == Reflexive, Transitive Closure
277 |
278 | \ \todo[inline]{Edit}
279 |
280 | The _reflexive, transitive closure_ of a relation \idr{r} is the smallest
281 | relation that contains \idr{r} and that is both reflexive and transitive.
282 | Formally, it is defined like this in the Relations module of the Idris standard
283 | library:
284 |
285 | > data Clos_refl_trans : (r : Relation t) -> Relation t where
286 | > Rt_step : r x y -> Clos_refl_trans r x y
287 | > Rt_refl : Clos_refl_trans r x x
288 | > Rt_trans : Clos_refl_trans r x y -> Clos_refl_trans r y z ->
289 | > Clos_refl_trans r x z
290 |
291 | For example, the reflexive and transitive closure of the \idr{Next_nat} relation
292 | coincides with the \idr{Le} relation.
293 |
294 | > next_nat_closure_is_le : (n <=' m) <-> (Clos_refl_trans Next_nat n m)
295 | > next_nat_closure_is_le = (to, fro)
296 | > where
297 | > to : Le n m -> Clos_refl_trans Next_nat n m
298 | > to Le_n = Rt_refl
299 | > to (Le_S {m} le) = Rt_trans {y=m} (to le) (Rt_step Nn)
300 | > fro : Clos_refl_trans Next_nat n m -> Le n m
301 | > fro (Rt_step Nn) = Le_S Le_n
302 | > fro Rt_refl = Le_n
303 | > fro (Rt_trans {x=n} {y} {z=m} ny ym) =
304 | > le_trans n y m (fro ny) (fro ym)
305 |
306 | The above definition of reflexive, transitive closure is natural: it says,
307 | explicitly, that the reflexive and transitive closure of \idr{r} is the least
308 | relation that includes \idr{r} and that is closed under rules of reflexivity and
309 | transitivity. But it turns out that this definition is not very convenient for
310 | doing proofs, since the "nondeterminism" of the \idr{Rt_trans} rule can
311 | sometimes lead to tricky inductions. Here is a more useful definition:
312 |
313 | > data Clos_refl_trans_1n : (r : Relation t) -> (x : t) -> t -> Type where
314 | > Rt1n_refl : Clos_refl_trans_1n r x x
315 | > Rt1n_trans : r x y -> Clos_refl_trans_1n r y z -> Clos_refl_trans_1n r x z
316 |
317 | \todo[inline]{Edit}
318 |
319 | Our new definition of reflexive, transitive closure "bundles" the \idr{Rt_step}
320 | and \idr{Rt_trans} rules into the single rule step. The left-hand premise of
321 | this step is a single use of \idr{r}, leading to a much simpler induction
322 | principle.
323 |
324 | Before we go on, we should check that the two definitions do indeed define the
325 | same relation...
326 |
327 | First, we prove two lemmas showing that \idr{Clos_refl_trans_1n} mimics the
328 | behavior of the two "missing" \idr{Clos_refl_trans} constructors.
329 |
330 | > rsc_R : r x y -> Clos_refl_trans_1n r x y
331 | > rsc_R rxy = Rt1n_trans rxy Rt1n_refl
332 |
333 |
334 | ==== Exercise: 2 stars, optional (rsc_trans)
335 |
336 | > rsc_trans : Clos_refl_trans_1n r x y -> Clos_refl_trans_1n r y z ->
337 | > Clos_refl_trans_1n r x z
338 | > rsc_trans crxy cryz = ?rsc_trans_rhs
339 |
340 | $\square$
341 |
342 | Then we use these facts to prove that the two definitions of reflexive,
343 | transitive closure do indeed define the same relation.
344 |
345 |
346 | ==== Exercise: 3 stars, optional (rtc_rsc_coincide)
347 |
348 | > rtc_rsc_coincide : (Clos_refl_trans r x y) <-> (Clos_refl_trans_1n r x y)
349 | > rtc_rsc_coincide = ?rtc_rsc_coincide_rhs
350 |
351 | $\square$
352 |
--------------------------------------------------------------------------------
/src/book.tex:
--------------------------------------------------------------------------------
1 | ---
2 | title: Software Foundations
3 | subtitle: Idris Translation
4 | author:
5 | - Benjamin C. Pierce
6 | - Arthur Azevedo de Amorim
7 | - Chris Casinghino
8 | - Marco Gaboardi
9 | - Michael Greenberg
10 | - Cătălin Hriţcu
11 | - Vilhelm Sjöberg
12 | - Brent Yorgey
13 | date: Summer 2016
14 | thanks: >
15 | with Loris D'Antoni, Andrew W. Appel, Arthur Chargueraud, Anthony Cowley,
16 | Jeffrey Foster, Dmitri Garbuzov, Michael Hicks, Ranjit Jhala, Greg Morrisett,
17 | Jennifer Paykin, Mukund Raghothaman, Chung-chieh Shan, Leonid Spesivtsev,
18 | Andrew Tolmach, Stephanie Weirich and Steve Zdancewic.\newline\newline
19 | Idris translation by Eric Bailey, Alex Gryzlov and Erlend Hamberg.
20 | toc: true
21 | documentclass: amsbook
22 | classoption: twoside
23 | papersize: b5
24 | geometry: margin=1in
25 | header-includes:
26 | - \setmonofont{Iosevka}
27 | -
28 | - \usepackage[utf8]{inputenc}
29 | - \usepackage{newunicodechar}
30 | - \newunicodechar{∀}{$\forall$}
31 |
32 | -
33 | - \usepackage{minted}
34 | - \usemintedstyle{lovelace}
35 | - \newmintinline[el]{elisp}{}
36 | - \newmintinline[idr]{idris}{}
37 | -
38 | - \usepackage[xindy]{glossaries}
39 | - \makeglossaries
40 | - \input{glossary}
41 | - \renewcommand*{\glstextformat}[1]{\textsl{#1}}
42 | -
43 | - \usepackage{todonotes}
44 | -
45 | - \usepackage{ebproof}
46 | - \usepackage{dirtytalk}
47 | include-before: \frontmatter
48 | ---
49 |
50 | \mainmatter
51 |
--------------------------------------------------------------------------------
/src/footer.tex:
--------------------------------------------------------------------------------
1 | \printglossaries
2 |
--------------------------------------------------------------------------------
/src/glossary.tex:
--------------------------------------------------------------------------------
1 | \newglossaryentry{algebraic data type}
2 | {
3 | name={algebraic data type},
4 | description={\todo{define}}
5 | }
6 |
7 | \newglossaryentry{computation rule}
8 | {
9 | name={computation rule},
10 | description={\todo{define}}
11 | }
12 |
13 | \newglossaryentry{expression}
14 | {
15 | name={expression},
16 | description={\todo{define}}
17 | }
18 |
19 | \newglossaryentry{first-class}
20 | {
21 | name={first-class},
22 | description={\todo{define}}
23 | }
24 |
25 | \newglossaryentry{fully certified}
26 | {
27 | name={fully certified},
28 | description={\todo{define}}
29 | }
30 |
31 | \newglossaryentry{function type}
32 | {
33 | name={function type},
34 | description={\todo{define}}
35 | }
36 |
37 | \newglossaryentry{functional programming}
38 | {
39 | name={function programming},
40 | description={\todo{define}}
41 | }
42 |
43 |
44 | \longnewglossaryentry{idris-add-clause}{
45 | name=\el{idris-add-clause},
46 | sort={idris-add-clause}
47 | }
48 | {
49 | \el{(idris-add-clause PROOF)}
50 |
51 | Add clauses to the declaration at point.
52 | }
53 |
54 | \longnewglossaryentry{idris-case-split}{
55 | name=\el{idris-case-split},
56 | sort={idris-case-split}
57 | }
58 | {
59 | \el{(idris-case-split)}
60 |
61 | Case split the pattern variable at point.
62 | }
63 |
64 | \longnewglossaryentry{idris-load-file}{
65 | name=\el{idris-load-file},
66 | sort={idris-load-file}
67 | }
68 | {
69 | \el{(idris-load-file &optional SET-LINE)}
70 |
71 | Pass the current buffer’s file to the inferior Idris process.
72 |
73 | A prefix argument restricts loading to the current line.
74 | }
75 |
76 | \longnewglossaryentry{idris-proof-search}{
77 | name=\el{idris-proof-search},
78 | sort={idris-proof-search}
79 | }
80 | {
81 | \el{(idris-proof-search &optional ARG)}
82 |
83 | Invoke the proof search. A plain prefix argument causes the
84 | command to prompt for hints and recursion depth, while a numeric
85 | prefix argument sets the recursion depth directly.
86 | }
87 |
88 | \newglossaryentry{induction}
89 | {
90 | name={induction},
91 | description={\todo{define}}
92 | }
93 |
94 | \newglossaryentry{inductive rule}
95 | {
96 | name={inductive rule},
97 | description={\todo{define}}
98 | }
99 |
100 | \newglossaryentry{module system}
101 | {
102 | name={module system},
103 | description={\todo{define}}
104 | }
105 |
106 | \newglossaryentry{pattern matching}
107 | {
108 | name={pattern matching},
109 | description={\todo{define}}
110 | }
111 |
112 | \newglossaryentry{polymorphic type system}
113 | {
114 | name={polymorphic type system},
115 | description={\todo{define}}
116 | }
117 |
118 | \newglossaryentry{proof assistant}
119 | {
120 | name={proof assistant},
121 | description={\todo{define}}
122 | }
123 |
124 | \newglossaryentry{structural recursion}
125 | {
126 | name={structural recursion},
127 | description={\todo{define}}
128 | }
129 |
130 | \newglossaryentry{syntax}
131 | {
132 | name=\idr{syntax},
133 | description={\todo{define}},
134 | sort={syntax}
135 | }
136 |
137 | \newglossaryentry{tactic}
138 | {
139 | name={tactic},
140 | description={\todo{define}}
141 | }
142 |
143 | \newglossaryentry{type}
144 | {
145 | name={type},
146 | description={\todo{define}}
147 | }
148 |
149 | \newglossaryentry{type system}
150 | {
151 | name={type system},
152 | description={\todo{define}}
153 | }
154 |
155 | \newglossaryentry{wildcard pattern}
156 | {
157 | name={wildcard pattern},
158 | description={\todo{define}}
159 | }
160 |
--------------------------------------------------------------------------------
/src/latexmkrc:
--------------------------------------------------------------------------------
1 | $clean_ext .= ' %R.ist %R.xdy';
2 | $pdflatex = q/xelatex %O --shell-escape %S/;
3 |
4 |
5 | add_cus_dep('glo', 'gls', 0, 'run_makeglossaries');
6 | add_cus_dep('acn', 'acr', 0, 'run_makeglossaries');
7 |
8 |
9 | sub run_makeglossaries {
10 | if ( $silent ) {
11 | system "makeglossaries -q '$_[0]'";
12 | }
13 | else {
14 | system "makeglossaries '$_[0]'";
15 | };
16 | }
17 |
18 |
19 | push @generated_exts, 'glo', 'gls', 'glg';
20 | push @generated_exts, 'acn', 'acr', 'alg';
21 |
--------------------------------------------------------------------------------
/src/pandoc-minted.hs:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env runghc
2 |
3 |
4 | -- Project-specific Haskell port of Nick Ulle's pandoc-minted.py:
5 | -- https://github.com/nick-ulle/pandoc-minted
6 |
7 |
8 | import Data.List (intercalate)
9 | import Text.Pandoc.Generic (topDown)
10 | import Text.Pandoc.JSON
11 |
12 |
13 | data Minted = MintedInline (String, String) String
14 | | MintedBlock (String, String) String
15 |
16 |
17 | instance Show Minted where
18 | show (MintedInline (attrs, language) contents) =
19 | "\\mintinline[" ++ attrs ++ "]{" ++ language ++ "}{" ++ contents ++ "}"
20 | show (MintedBlock (attrs, language) contents) =
21 | unlines [ "\\begin{minted}[" ++ attrs ++ "]{" ++ language ++ "}"
22 | , contents
23 | , "\\end{minted}"
24 | ]
25 |
26 |
27 | main :: IO ()
28 | main = toJSONFilter minted
29 |
30 |
31 | minted :: Pandoc -> Pandoc
32 | minted = topDown (concatMap mintinline) .
33 | topDown (concatMap mintedBlock)
34 |
35 |
36 | mintinline :: Inline -> [Inline]
37 | mintinline (Code attr contents) =
38 | let
39 | latex = show $ MintedInline (unpackCode attr "text") contents
40 | in
41 | [ RawInline (Format "latex") latex ]
42 | mintinline x = [x]
43 |
44 |
45 | mintedBlock :: Block -> [Block]
46 | mintedBlock (CodeBlock attr contents) =
47 | let
48 | latex = show $ MintedBlock (unpackCode attr "text") contents
49 | in
50 | [ RawBlock (Format "latex") latex ]
51 | mintedBlock x = [x]
52 |
53 |
54 | unpackCode :: Attr -> String -> (String, String)
55 | unpackCode (_, [], kvs) defaultLanguage =
56 | (unpackAttrs kvs, defaultLanguage)
57 | unpackCode (identifier, "sourceCode" : _, kvs) defaultLanguage =
58 | unpackCode (identifier, ["idris"], kvs) defaultLanguage
59 | unpackCode (_, language : _, kvs) _ =
60 | (unpackAttrs kvs, language)
61 |
62 |
63 | unpackAttrs :: [(String, String)] -> String
64 | unpackAttrs kvs = intercalate ", " [ k ++ "=" ++ v | (k, v) <- kvs ]
65 |
--------------------------------------------------------------------------------