├── .gitignore ├── AUTHORS.md ├── README.md ├── files ├── introduction │ └── introduction.pdf ├── lecture_1 │ └── lecture_1.pdf ├── revision_1 │ ├── images │ │ ├── list.png │ │ ├── logical_and.png │ │ ├── logical_or.png │ │ ├── memory.png │ │ └── zip.png │ ├── revision_1.pdf │ └── revision_1.tex └── revision_2 │ ├── revision_2.pdf │ └── revision_2.tex ├── notebook ├── Lecture-1-Numerical-methods-1-Solutions.ipynb ├── Lecture-1-Numerical-methods-1.ipynb ├── Lecture-2-Numerical-methods-1-Solutions.ipynb ├── Lecture-2-Numerical-methods-1-extra-exercises-Solutions.ipynb ├── Lecture-2-Numerical-methods-1-extra-exercises.ipynb ├── Lecture-2-Numerical-methods-1.ipynb ├── Lecture-3-Numerical-methods-1-Solutions.ipynb ├── Lecture-3-Numerical-methods-1.ipynb ├── Lecture-4-Numerical-methods-1-Solutions.ipynb ├── Lecture-4-Numerical-methods-1.ipynb ├── Lecture-5-Numerical-methods-1-Solutions.ipynb ├── Lecture-5-Numerical-methods-1.ipynb ├── Lecture-6-Numerical-methods-1-Solutions.ipynb ├── Lecture-6-Numerical-methods-1.ipynb ├── Lecture-7-Numerical-methods-1-Solutions.ipynb ├── Lecture-7-Numerical-methods-1-extra-exercises-Solutions.ipynb ├── Lecture-7-Numerical-methods-1-extra-exercises.ipynb ├── Lecture-7-Numerical-methods-1.ipynb ├── auxilliary_plots.py ├── data │ ├── Length-Width.dat │ └── shot.txt ├── exam-2017-solutions.ipynb ├── exam-2017.ipynb ├── exam_2016.ipynb ├── exam_2016_solutions.ipynb ├── images │ ├── Width-Length.png │ ├── central_diff.png │ ├── circuit.png │ ├── circuit.tex │ ├── euler_vs_heun.png │ ├── fd_cd_convergence.png │ └── forward_diff.png ├── revision_exercises.ipynb └── revision_exercises_solutions.ipynb └── past_exams ├── assessed-coursework-2016.ipynb ├── exam-2.18-2012.pdf └── exam-3.08-2012.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | # Authors 2 | 3 | * Gerard Gorman 4 | * Matthew Piggott 5 | * Christian Jacobs 6 | * Stephan Kramer 7 | * Philipp Lang 8 | 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Numerical Methods 1 2 | =================== 3 | 4 | This course makes use of the text book ["Numerical Methods in Engineering with Python 3" by Jaan Kiusalaas](http://www.cambridge.org/us/academic/subjects/engineering/engineering-mathematics-and-programming/numerical-methods-engineering-python-3-3rd-edition) as well as [course notes from an earler verison of this course in 2012 by David Ham](https://bitbucket.org/David_Ham/numerical_methods_1). 5 | 6 | An updated version of this course can always be found on the [GitHub page](http://ggorman.github.io/Numerical-methods-1/). 7 | 8 | -------------------------------------------------------------------------------- /files/introduction/introduction.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/introduction/introduction.pdf -------------------------------------------------------------------------------- /files/lecture_1/lecture_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/lecture_1/lecture_1.pdf -------------------------------------------------------------------------------- /files/revision_1/images/list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/revision_1/images/list.png -------------------------------------------------------------------------------- /files/revision_1/images/logical_and.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/revision_1/images/logical_and.png -------------------------------------------------------------------------------- /files/revision_1/images/logical_or.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/revision_1/images/logical_or.png -------------------------------------------------------------------------------- /files/revision_1/images/memory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/revision_1/images/memory.png -------------------------------------------------------------------------------- /files/revision_1/images/zip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/revision_1/images/zip.png -------------------------------------------------------------------------------- /files/revision_1/revision_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/revision_1/revision_1.pdf -------------------------------------------------------------------------------- /files/revision_1/revision_1.tex: -------------------------------------------------------------------------------- 1 | \documentclass[t,11pt,british,english, top=1.0in]{beamer} 2 | %\usetheme{amcg} 3 | %\usetheme{iclpt} 4 | \usepackage{url} 5 | \usepackage{hyperref} 6 | \usepackage{graphics} 7 | \usepackage{amsmath} 8 | \usepackage{amssymb} 9 | \usepackage{tikz} 10 | \usetikzlibrary{calc,decorations.pathreplacing} 11 | \usepackage{natbib} 12 | %You can use the package \textbf{pgfpages} 13 | %to arrange your slides for printing. This is also explained 14 | %in the \textbf{beamer} documentation. 15 | 16 | \newcommand{\tensor}[1]{\overline{\overline{#1}}} 17 | \newcommand{\tautens}{\tensor{\tau}} 18 | 19 | \setbeamerfont{framesubtitle}{size=\normalsize} 20 | \setbeamerfont{framesubtitle}{size=\normalsize} 21 | %\setbeamertemplate{frametitle}[default][center] 22 | %\setbeamersize{text margin left=6mm} 23 | 24 | \usetheme{Madrid} 25 | \usecolortheme{orchid} 26 | 27 | %gets rid of bottom navigation bars 28 | \setbeamertemplate{footline}[page number]{} 29 | \setbeamertemplate{headline}{} 30 | 31 | %gets rid of navigation symbols 32 | \setbeamertemplate{navigation symbols}{} 33 | 34 | \begin{document} 35 | \title{Introduction to programming for Geoscientists\\\vspace*{5mm}Revision Lecture 1} 36 | \author{} 37 | \date{} 38 | 39 | \frame{\titlepage} 40 | 41 | \frame{ 42 | \frametitle{Variables} 43 | \framesubtitle{Definition} 44 | \begin{itemize} 45 | \item {\color{red}Variable}: a place in the computer's memory which holds a value. 46 | \vspace*{2mm} 47 | \begin{itemize} 48 | \item Memory {\color{red}address} + symbolic {\color{red}name}\vspace*{1mm} 49 | \item You define the symbolic name in your Python program.\vspace*{1mm} 50 | \item e.g. If a variable called \texttt{a} does not already exist, the statement \texttt{a = 5} stores the value 5 in an un-used block of memory.\vspace*{1mm} 51 | \item The value can then be {\color{red}referenced} (i.e. accessed) using the symbolic name, e.g. \texttt{print a}. 52 | \end{itemize} 53 | \end{itemize} 54 | \vspace*{5mm} 55 | A simplified view of variables: 56 | \begin{figure}[H] 57 | \centering\includegraphics[width=0.9\columnwidth]{images/memory.png} 58 | \end{figure} 59 | } 60 | 61 | \frame{ 62 | \frametitle{Variables} 63 | \framesubtitle{Key points} 64 | \begin{itemize} 65 | \item Always make sure variables are defined \textbf{before trying to use them}! The following block of code will not work: 66 | \begin{minipage}[t]{.5\textwidth} 67 | \texttt{b = 5\\ c = a*b\\ a = 10} 68 | \end{minipage}\vspace*{5mm} 69 | \item Variable names:\vspace*{1mm} 70 | \begin{itemize} 71 | \item are {\color{red}case sensitive}.\vspace*{1mm} 72 | \item cannot start with a digit.\vspace*{1mm} 73 | \item cannot be a Python keyword: \texttt{and, as, assert, break, class, continue, def, del, elif, else, except, exec, finally, for, from, global, if, import, in, is, lambda, not, or, pass, print, raise, return, try, with, while, yield}.\vspace*{1mm} 74 | \end{itemize} 75 | \end{itemize} 76 | } 77 | 78 | \frame{ 79 | \frametitle{Printing} 80 | \framesubtitle{} 81 | \begin{itemize} 82 | \item Data held in variables can be printed to the screen using 83 | \begin{minipage}[t]{.5\textwidth} 84 | \texttt{b = 5.67560\\ print b} 85 | \end{minipage}\vspace*{2mm} 86 | \item Or, to present data in a nicer way, use {\color{red}printf style formatting}: 87 | \begin{minipage}[t]{\textwidth} 88 | \texttt{print "The data held in variable b is: \%.2f" \% (b)} 89 | \end{minipage}\vspace*{2mm} 90 | \item The {\color{red}format specifier} \texttt{\%.2f} acts like a {\color{red}placeholder}. When printing to the screen, Python substitutes this for the data in \texttt{b} and formats it accordingly: 91 | \begin{itemize} 92 | \item \texttt{\%.2f} prints out the data in \texttt{b} to 2 decimal places (i.e. \texttt{5.67}). 93 | \item \texttt{\%d} prints out the data in \texttt{b} as an integer (i.e. \texttt{5}). 94 | \item \texttt{\%g} prints out the data in \texttt{b} to the minimum number of significant figures (i.e. \texttt{5.6756}). 95 | \end{itemize} 96 | \item If you see numbers like \texttt{5e-2}, this is just Python's way of writing $5 \times 10^{-2}$. It has nothing to do with the mathematical constant $e \approx 2.71828$. \vspace*{2mm} 97 | \end{itemize} 98 | } 99 | 100 | \frame{ 101 | \frametitle{Integer division} 102 | \framesubtitle{} 103 | \begin{itemize} 104 | \item In Python, dividing an integer by another integer will result in {\color{red}another integer}.\vspace*{2mm} 105 | \item Python computes the result, and {\color{red}drops the decimal point and everything after it}. e.g. 9/5 = 1.8 will evaluate to 1\vspace*{2mm} 106 | \item This is a common error made in Python programs, so watch out for it.\vspace*{2mm} 107 | \item If in doubt, just make the numerator or denominator (or both) floating-point numbers. e.g. 3 $\rightarrow$ 3.0\vspace*{2mm} 108 | \end{itemize} 109 | } 110 | 111 | \frame{ 112 | \frametitle{Variable type conversion} 113 | \framesubtitle{} 114 | \begin{itemize} 115 | \item Converting a variable's data from one type to another.\vspace*{2mm} 116 | \begin{itemize} 117 | \item \texttt{int(5.0)} $\rightarrow$ \texttt{5} 118 | \item \texttt{float(7)} $\rightarrow$ \texttt{7.0} 119 | \item \texttt{str(8.15)} $\rightarrow$ ``\texttt{8.15}'' 120 | \item \texttt{int("5")} $\rightarrow$ \texttt{5} 121 | \end{itemize} 122 | \item Also known as {\color{red}type casting}.\vspace*{2mm} 123 | 124 | \item You will most likely use casting: 125 | \begin{itemize} 126 | \item to avoid integer division problems, e.g. \texttt{float(3)/5} 127 | \item when you want to use numerical data read in from the keyboard using \texttt{raw\_input}, e.g. 128 | \begin{minipage}[t]{\textwidth} 129 | \texttt{a = 5\\ b = raw\_input("Please enter a number.")\\ c = a*float(b)} 130 | \end{minipage}\vspace*{2mm} 131 | \end{itemize} 132 | 133 | \end{itemize} 134 | } 135 | 136 | \frame{ 137 | \frametitle{Operator precedence} 138 | \framesubtitle{} 139 | \begin{itemize} 140 | \item Expressions like \texttt{2.0 + 3.0/5.0} are evaluated in a particular order, determined by {\color{red}operator precedence}.\vspace*{2mm} 141 | \item Division has a {\color{red}higher precedence} than addition, so \texttt{3.0/5.0} is evaluated {\color{red}first}, and \texttt{2.0} is then added on afterwards.\vspace*{2mm} 142 | \item If we wanted \texttt{2.0 + 3.0} to be evaluated first, then we need to use {\color{red}parentheses}: \texttt{(2.0 + 3.0)/5.0}.\vspace*{2mm} 143 | \item {\color{red}BODMAS}: {\color{red}B}rackets, {\color{red}O}rder, {\color{red}D}ivision, {\color{red}M}ultiplication, {\color{red}A}ddition, {\color{red}S}ubtraction. 144 | \newline 145 | \item Note: Python groups certain operators together such that they have the same precedence, and then evaluates expressions from left to right. See \texttt{http://docs.python.org/2/reference/expressions.html}. 146 | \end{itemize} 147 | } 148 | 149 | \frame{ 150 | \frametitle{Importing modules} 151 | \framesubtitle{} 152 | \begin{itemize} 153 | \item Python {\color{red}modules} are useful when you want to split your code up to make it more manageable, or to make a piece of code available for use in other programs.\vspace*{2mm} 154 | \item Mathematical functions like $\sin(x)$, $\cos(x)$, $\log(x)$ are available in the \texttt{math} module.\vspace*{2mm} 155 | \item There are two ways of importing functions from modules:\vspace*{2mm} 156 | \begin{itemize} 157 | \item \texttt{import math}: Python will import all the functions in the math module, but will keep the functions in their own separate {\color{red}namespace}. That is, you must use {\color{red}prepend \texttt{math.} to the function's name} to use it, e.g. \texttt{x = 0.5; y = math.sin(x)}\vspace*{2mm} 158 | \item \texttt{from math import *}: Python will import all the functions in the math module into the current namespace. That is, you can simply do \texttt{x = 0.5; y = sin(x)}. But: be careful that you do not have another function named \texttt{sin} in your program! 159 | \end{itemize} 160 | \end{itemize} 161 | } 162 | 163 | \frame{ 164 | \frametitle{If statement} 165 | \framesubtitle{Definition} 166 | \begin{itemize} 167 | \item The {\color{red}if statement} is a programming construct that executes different blocks of code depending on whether a boolean {\color{red}condition} evaluates to {\color{red}\texttt{True}} or {\color{red}\texttt{False}}.\vspace*{2mm} 168 | 169 | \begin{minipage}[t]{\textwidth} 170 | \texttt{if(boolean condition):} 171 | 172 | \ \ \ \ \ \indent\texttt{print "The condition is True"} 173 | 174 | \texttt{else:} 175 | 176 | \ \ \ \ \ \indent\texttt{print "The condition is False"} 177 | \end{minipage}\vspace*{2mm} 178 | \vspace*{2mm} 179 | \item Condition: it is raining (True or False) 180 | \item Possible actions: take an umbrella, don't take an umbrella. 181 | \begin{minipage}[t]{\textwidth} 182 | \texttt{if(it is raining):} 183 | 184 | \ \ \ \ \ \indent\texttt{Take an umbrella.} 185 | 186 | \texttt{else:} 187 | 188 | \ \ \ \ \ \indent\texttt{Don't take an umbrella.} 189 | \end{minipage}\vspace*{2mm} 190 | \end{itemize} 191 | 192 | } 193 | 194 | \frame{ 195 | \frametitle{If statements} 196 | \framesubtitle{Examples of conditions} 197 | \begin{itemize} 198 | \item \texttt{b = 40}\vspace*{2mm} 199 | \item {\color{red}Equality} condition: \texttt{b == 40} $\rightarrow$ \texttt{True}\vspace*{2mm} 200 | \item {\color{red}Negation} (also known as the logical complement):\\\texttt{b != 40} $\rightarrow$ \texttt{False}\\ 201 | \texttt{not(b == 40)} $\rightarrow$ \texttt{False}\vspace*{2mm}\\ 202 | \item {\color{red}Or} condition: \texttt{b >= 40} $\rightarrow$ \texttt{True}\\ 203 | \texttt{b == 40 or b > 40} $\rightarrow$ \texttt{True}\vspace*{2mm}\\ 204 | \item {\color{red}And} condition: \texttt{b > 30 and b < 70} $\rightarrow$ \texttt{True}\\ 205 | \texttt{b > 30 and b < 35} $\rightarrow$ \texttt{False} 206 | \end{itemize} 207 | } 208 | 209 | \frame{ 210 | \frametitle{Logicals} 211 | \framesubtitle{And} 212 | \begin{figure}[H] 213 | \centering\includegraphics[width=0.9\columnwidth]{images/logical_and.png} 214 | \end{figure} 215 | } 216 | 217 | \frame{ 218 | \frametitle{Logicals} 219 | \framesubtitle{Or} 220 | \begin{figure}[H] 221 | \centering\includegraphics[width=0.9\columnwidth]{images/logical_or.png} 222 | \end{figure} 223 | } 224 | 225 | \frame{ 226 | \frametitle{Lists} 227 | \framesubtitle{Definition} 228 | \begin{itemize} 229 | \item {\color{red}List}: a Python data structure that can hold a {\color{red}sequence} of {\color{red}elements}/items/values. Elements can be added to, or removed from, a list.\vspace*{2mm} 230 | \item A list can be defined by enclosing the elements (separated by {\color{red}commas}) in {\color{red}square brackets}, e.g. \texttt{L = [4, 6, 2, -1]}\vspace*{2mm} 231 | \item {\color{red}Append} an element to the end of a list by using the general form: \texttt{list\_name.append(value)}\vspace*{2mm} 232 | \item Get the length of a list using the {\color{red}\texttt{len} function}: \texttt{len(L)} returns a value of 4. 233 | \end{itemize} 234 | } 235 | 236 | \frame{ 237 | \frametitle{Lists} 238 | \framesubtitle{Referencing elements} 239 | \begin{itemize} 240 | \item Each element of the list is assigned an {\color{red}index}, with the first element's index being zero.\vspace*{2mm} 241 | \item To reference/access an element of the list, follow the general form: {\color{red}\texttt{list\_name[element\_index]}} 242 | \begin{itemize} 243 | \item \texttt{L[0]} $\rightarrow$ \texttt{4} 244 | \item \texttt{L[-1]} $\rightarrow$ \texttt{-1} 245 | \item \texttt{L[len(L)-1]} $\rightarrow$ \texttt{-1} 246 | \end{itemize} 247 | \end{itemize} 248 | \begin{figure}[H] 249 | \centering\includegraphics[width=0.9\columnwidth]{images/list.png} 250 | \end{figure} 251 | } 252 | 253 | 254 | \frame{ 255 | \frametitle{Lists} 256 | \framesubtitle{Slicing} 257 | \begin{itemize} 258 | \item Sub-lists can also be extracted from a list. This is known as {\color{red}slicing}.\vspace*{2mm} 259 | \item General usage: \texttt{list\_name[start\_index:end\_index]}. By default, \texttt{start\_index} is implicitly set to 0 if not provided by the user. Similarly, \texttt{end\_index} is implicitly set to \texttt{len(list\_name)} if not provided.\vspace*{2mm} 260 | \item \texttt{L[0:len(L)]} $\equiv$ \texttt{L[:]} $\equiv$ \texttt{L}\vspace*{2mm} 261 | \item Example: \texttt{L = [2, 5, 8, 0, 5, 1]}.\\ \texttt{A = L[:4]} $\rightarrow$ \texttt{A = [2, 5, 8, 0]}. 262 | \end{itemize} 263 | } 264 | 265 | \frame{ 266 | \frametitle{Lists} 267 | \framesubtitle{Zip two lists} 268 | \begin{itemize} 269 | \item Elements from two lists can be combined using the {\color{red}zip} function to form a new list: a {\color{red}list of tuples}. 270 | \end{itemize} 271 | \begin{figure}[H] 272 | \centering\includegraphics[width=0.9\columnwidth]{images/zip.png} 273 | \end{figure} 274 | } 275 | 276 | \frame{ 277 | \frametitle{Loops} 278 | \framesubtitle{Definition} 279 | \begin{itemize} 280 | \item {\color{red}Loop}: a programming construct that allows a block of code to be executed multiple times.\vspace*{2mm} 281 | \item Two types of loop: {\color{red}\texttt{while}} and {\color{red}\texttt{for}}. 282 | \end{itemize} 283 | } 284 | 285 | \frame{ 286 | \frametitle{Loops} 287 | \framesubtitle{While loop} 288 | \begin{itemize} 289 | \item Iterate indefinitely while some boolean condition is \texttt{True}. This condition is called the {\color{red}loop invariant}.\vspace*{2mm} 290 | \item The invariant is evaluated before the start of each iteration. If it evaluates to True, Python executes all the statements in the indented code block.\vspace*{2mm} 291 | \item General form: 292 | \begin{minipage}[t]{\textwidth} 293 | \texttt{while(boolean condition is True):} 294 | 295 | \ \ \ \ \ \indent\texttt{Statements to be executed} 296 | 297 | \ \ \ \ \ \indent\texttt{within a single loop iteration}. 298 | \end{minipage}\vspace*{2mm} 299 | \item Remember to update any variables that the boolean condition depends on within the loop, e.g. if the condition is \texttt{i < 100}, you might do \texttt{i = i + 1}. Otherwise, \texttt{i} will never increase, the boolean condition will always be True, and the loop will never end. 300 | \end{itemize} 301 | } 302 | 303 | \frame{ 304 | \frametitle{Loops} 305 | \framesubtitle{For loop} 306 | \begin{itemize} 307 | \item For loops must have {\color{red}something to iterate over}. This is usually a list or an array. 308 | \item General form of a \texttt{for} loop: 309 | \begin{minipage}[t]{\textwidth} 310 | \texttt{for {\color{red}iterator} in {\color{red}iterable\_object}:} 311 | 312 | \ \ \ \ \ \indent\texttt{Do some cool stuff, possibly involving the iterator.} 313 | \end{minipage}\vspace*{2mm} 314 | \item Example: 315 | \begin{itemize} 316 | \item Iterator: \texttt{i} 317 | \item Iterable object: \texttt{range(0, 3)} $\rightarrow$ \texttt{[0, 1, 2]} 318 | \begin{minipage}[t]{\textwidth} 319 | \texttt{for i in range(0, 3):} 320 | 321 | \ \ \ \ \ \indent\texttt{print i*2} 322 | 323 | \texttt{print "Out of the loop!"} 324 | \end{minipage}\vspace*{2mm} 325 | \item Iteration 1: \texttt{i} = 0, Python prints out 0 to the screen.$\curvearrowright$ 326 | \item Iteration 2: \texttt{i} = 1, Python prints out 2 to the screen.$\curvearrowright$ 327 | \item Iteration 3: \texttt{i} = 2, Python prints out 4 to the screen.$\curvearrowright$ 328 | \item No more elements to iterate over, so the loop ends. 329 | \item Python prints \texttt{"Out of the loop!"} 330 | \end{itemize} 331 | \end{itemize} 332 | } 333 | 334 | \frame{ 335 | \frametitle{Functions} 336 | \framesubtitle{Definition} 337 | \begin{itemize} 338 | \item {\color{red}Function}: a programming construct that expects zero or more {\color{red}inputs}, and returns zero or more {\color{red}outputs}.\vspace*{2mm} 339 | \item General form:\vspace*{2mm} 340 | \begin{minipage}[t]{\textwidth} 341 | \texttt{def function\_name(input1, input2):} 342 | 343 | \ \ \ \ \ \indent\texttt{The function's {\color{red}body}. Compute any output values here.} 344 | 345 | \ \ \ \ \ \indent\texttt{return output1, output2, output3} 346 | \end{minipage}\vspace*{2mm} 347 | \item The inputs are known as {\color{red}arguments}.\vspace*{2mm} 348 | \item Example: the function \texttt{len} takes in 1 argument (a list/tuple/string/...) and returns 1 value (the length of that list/tuple/string). 349 | \end{itemize} 350 | } 351 | 352 | \frame{ 353 | \frametitle{User input} 354 | \framesubtitle{} 355 | \begin{itemize} 356 | \item User input can be read from the keyboard using the {\color{red}\texttt{raw\_input}} function. This takes 1 argument (a message that you want to show to the user, e.g. ``Enter a number between 1 and 10''), and gives 1 output (the user's input).\vspace*{2mm} 357 | \item This return/output value of the \texttt{raw\_input} function is always a {\color{red}string}. \vspace*{2mm} 358 | \item Remember: numerical data in string form needs to be {\color{red}converted}, or {\color{red}casted}, to a float or integer.\vspace*{2mm} 359 | \end{itemize} 360 | } 361 | 362 | 363 | \frame{ 364 | \frametitle{Exception handling} 365 | \framesubtitle{Definition} 366 | \begin{itemize} 367 | \item {\color{red}Exceptions}: errors that occur when Python cannot properly execute a line of code at run-time.\vspace*{2mm} 368 | \item Common examples include: 369 | \begin{itemize} 370 | \item Trying to reference an element in a list that doesn't exist. e.g. \texttt{L = [1, 2]; print L[2]} 371 | \item Trying to divide by zero. 372 | \end{itemize}\vspace*{2mm} 373 | \item It is important that we handle these errors {\color{red}gracefully}, because: 374 | \begin{itemize} 375 | \item The standard exception error message will probably confuse an average user. 376 | \item The program might not be able to continue executing properly. 377 | \end{itemize}\vspace*{2mm} 378 | \end{itemize} 379 | } 380 | 381 | \frame{ 382 | \frametitle{Exception handling} 383 | \framesubtitle{try-except blocks} 384 | \begin{itemize} 385 | \item {\color{red}try-except blocks} are used to handle exceptions.\vspace*{2mm} 386 | \item Identify lines of your code where an exception may occur, and wrap them in a \texttt{try} block.\vspace*{2mm} 387 | \item In the \texttt{except} block, we decide how to handle the error. e.g. 388 | \begin{minipage}[t]{\textwidth} 389 | \texttt{try:} 390 | 391 | \ \ \ \ \ \indent\texttt{number = float(raw\_input("Enter a number:")} 392 | 393 | \texttt{except ValueError:} 394 | 395 | \ \ \ \ \ \indent\texttt{print "Error: you didn't enter a number."} 396 | \end{minipage}\vspace*{2mm} 397 | \end{itemize} 398 | } 399 | 400 | \end{document} 401 | -------------------------------------------------------------------------------- /files/revision_2/revision_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/files/revision_2/revision_2.pdf -------------------------------------------------------------------------------- /files/revision_2/revision_2.tex: -------------------------------------------------------------------------------- 1 | \documentclass[t,11pt,british,english, top=1.0in]{beamer} 2 | %\usetheme{amcg} 3 | %\usetheme{iclpt} 4 | \usepackage{url} 5 | \usepackage{hyperref} 6 | \usepackage{graphics} 7 | \usepackage{amsmath} 8 | \usepackage{amssymb} 9 | \usepackage{tikz} 10 | \usetikzlibrary{calc,decorations.pathreplacing} 11 | \usepackage{natbib} 12 | %You can use the package \textbf{pgfpages} 13 | %to arrange your slides for printing. This is also explained 14 | %in the \textbf{beamer} documentation. 15 | 16 | \newcommand{\tensor}[1]{\overline{\overline{#1}}} 17 | \newcommand{\tautens}{\tensor{\tau}} 18 | 19 | \setbeamerfont{framesubtitle}{size=\normalsize} 20 | \setbeamerfont{framesubtitle}{size=\normalsize} 21 | %\setbeamertemplate{frametitle}[default][center] 22 | %\setbeamersize{text margin left=6mm} 23 | 24 | \usetheme{Madrid} 25 | \usecolortheme{orchid} 26 | 27 | %gets rid of bottom navigation bars 28 | \setbeamertemplate{footline}[page number]{} 29 | \setbeamertemplate{headline}{} 30 | 31 | %gets rid of navigation symbols 32 | \setbeamertemplate{navigation symbols}{} 33 | 34 | \begin{document} 35 | \title{Introduction to programming for Geoscientists\\\vspace*{5mm}Revision Lecture 2} 36 | \author{} 37 | \date{} 38 | 39 | \frame{\titlepage} 40 | 41 | \frame{ 42 | \frametitle{Classes and Objects} 43 | \framesubtitle{Definitions} 44 | \begin{itemize} 45 | \item {\color{red}Class}: a programming construct which models/describes a set of objects with common properties as a generic package that encapsulates the objects' data (attributes) and functions (behaviour).\vspace*{2mm} 46 | \begin{itemize} 47 | \item They make programs more managable. 48 | \item They allow information hiding. 49 | \end{itemize}\vspace*{2mm} 50 | \item {\color{red}Object}: a specific {\color{red}instance} of a class.\vspace*{2mm} 51 | \item {\color{red}Instantiation}: the process of creating an object from a class.\vspace*{15mm} 52 | 53 | \item A {\color{red}class} is like a {\color{red}blueprint/template} from which {\color{red}objects} can be {\color{red}created/instantiated}.\vspace*{2mm} 54 | \end{itemize} 55 | } 56 | 57 | \frame{ 58 | \frametitle{Classes and Objects} 59 | \framesubtitle{Examples} 60 | \begin{itemize} 61 | \item A human ({\color{red}object}) is born ({\color{red}instantiated}) with physical characteristics defined by a genetic makeup ({\color{red}class}):\vspace*{2mm} 62 | \begin{itemize} 63 | \item data/attributes: eye colour, hair colour 64 | \item functions/behaviour: sleep, drink, eat 65 | \end{itemize}\vspace*{5mm} 66 | \item A cake ({\color{red}object}) is baked in the oven ({\color{red}instantiated}) once prepared from a generic cake recipe ({\color{red}class}):\vspace*{2mm} 67 | \begin{itemize} 68 | \item data/attributes: flavour, number of slices 69 | \item functions/behaviour: remove slice, expire 70 | \end{itemize}\vspace*{5mm} 71 | \end{itemize} 72 | } 73 | 74 | 75 | \frame{ 76 | \frametitle{Classes and Objects} 77 | \framesubtitle{Motivation} 78 | \begin{minipage}[t]{\textwidth} 79 | \texttt{def print\_data(id, name, course):} 80 | 81 | \ \ \ \ \ \indent\texttt{print "Student id: \%d, name: \%s, course: \%s" \% \\(id, name, course)} 82 | 83 | \texttt{ } 84 | 85 | \texttt{student1\_id = 1} 86 | 87 | \texttt{student1\_name = "Bob"} 88 | 89 | \texttt{student1\_course = "Geology"} 90 | 91 | \texttt{student2\_id = 2} 92 | 93 | \texttt{student2\_name = "Alice"} 94 | 95 | \texttt{student2\_course = "Computer science"} 96 | 97 | \texttt{print\_data(student2\_id, student2\_name, student2\_course)} 98 | \end{minipage}\vspace*{2mm} 99 | } 100 | 101 | \frame{ 102 | \frametitle{Classes and Objects} 103 | \framesubtitle{Motivation} 104 | \begin{minipage}[t]{\textwidth} 105 | \texttt{class Student:} 106 | 107 | \ \ \ \ \ \indent\texttt{def \_\_init\_\_(self, id, name, course):} 108 | 109 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.id = id} 110 | 111 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.name = name} 112 | 113 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.course = course} 114 | 115 | \ \ \ \ \ \indent\texttt{def print\_data(self):} 116 | 117 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{print "Student id: \%d, name: \%s, course: \%s" \% \\(self.id, self.name, self.course)} 118 | 119 | \texttt{ } 120 | 121 | \texttt{student1 = Student(1, "Bob", "Geology")} 122 | 123 | \texttt{student2 = Student(2, "Alice", "Computer science")} 124 | 125 | \texttt{student2.print\_data()} 126 | \end{minipage}\vspace*{2mm} 127 | } 128 | 129 | \frame{ 130 | \frametitle{Classes and Objects} 131 | \framesubtitle{Terminology} 132 | \begin{itemize} 133 | \item Variables belonging to a class are known as {\color{red}attributes}.\vspace*{2mm} 134 | \item Functions belonging to a class are known as {\color{red}methods}.\vspace*{2mm} 135 | \item It is good practice to change attributes via {\color{red}get/set} methods, not directly. 136 | \end{itemize} 137 | } 138 | 139 | \frame{ 140 | \frametitle{Classes and Objects} 141 | \framesubtitle{Example} 142 | \begin{itemize} 143 | \item General example of a class definition in Python:\vspace*{2mm} 144 | \begin{minipage}[t]{\textwidth} 145 | \texttt{class ClassName:} 146 | 147 | \ \ \ \ \ \indent\texttt{def \_\_init\_\_(self, input1, input2):} 148 | 149 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.a = input1} 150 | 151 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.b = input2} 152 | 153 | \ \ \ \ \ \indent\texttt{def method1(self, input1):} 154 | 155 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{print "Hello \%s!" \% input1} 156 | 157 | \ \ \ \ \ \indent\texttt{def method2(self):} 158 | 159 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{print "a = \%d, b = \%d" \% (self.a, self.b)} 160 | 161 | \texttt{A = ClassName(5, 10)} 162 | 163 | \texttt{A.method1("world")} $\rightarrow$ ``Hello world!'' 164 | 165 | \texttt{A.method2()} $\rightarrow$ ``a = 5, b = 10'' 166 | \end{minipage}\vspace*{2mm} 167 | \item \texttt{self} can be thought of as the object that is calling one of the methods. 168 | \item \texttt{\_\_init\_\_} is a special method used to initialise/setup objects. 169 | \end{itemize} 170 | } 171 | 172 | \frame{ 173 | \frametitle{Classes and Objects} 174 | \framesubtitle{Cake} 175 | \begin{itemize} 176 | \item The following example describes cake: 177 | \begin{minipage}[t]{\textwidth} 178 | \texttt{class Cake:} 179 | 180 | \ \ \ \ \ \indent\texttt{def \_\_init\_\_(self, cake\_type):} 181 | 182 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.type = cake\_type} 183 | 184 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.number\_of\_slices = 10} 185 | 186 | \ \ \ \ \ \indent\texttt{def eat\_slice(self):} 187 | 188 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{self.number\_of\_slices = self.number\_of\_slices-1} 189 | 190 | \ \ \ \ \ \ \ \ \ \ \indent\texttt{print "\%d slices remaining." \% self.number\_of\_slices} 191 | 192 | \ \ \ \ \ \ \ \ \ \ 193 | 194 | \texttt{A = Cake("Chocolate")} 195 | 196 | \texttt{B = Cake("Lemon")} 197 | 198 | \ \ \ \ \ \ \ \ \ \ 199 | 200 | \indent\texttt{print A.type} $\rightarrow$ ``Chocolate'' 201 | 202 | \indent\texttt{print B.type} $\rightarrow$ ``Lemon'' 203 | 204 | \indent\texttt{B.eat\_slice()} $\rightarrow$ ``9 slices remaining.'' 205 | \end{minipage}\vspace*{2mm} 206 | 207 | \end{itemize} 208 | } 209 | 210 | \frame{ 211 | \frametitle{NumPy Arrays} 212 | \framesubtitle{Definition} 213 | \begin{itemize} 214 | \item {\color{red}Arrays}: data structures which contain a sequence of {\color{red}elements}/items/values.\vspace*{2mm} 215 | \item Similar to lists (or lists of lists), but are of a {\color{red}fixed size} and can only contain {\color{red}one data type}.\vspace*{2mm} 216 | \item Arrays are generally {\color{red}faster than lists} because array elements are stored in {\color{red}contiguous} areas of memory. 217 | \end{itemize} 218 | } 219 | 220 | \frame{ 221 | \frametitle{NumPy Arrays} 222 | \framesubtitle{linspace and zeros} 223 | \begin{itemize} 224 | \item Two useful functions for creating arrays:\vspace*{2mm} 225 | \begin{itemize} 226 | \item {\color{red}linspace(start, end, n)}: creates an array of $n$ uniformly distributed points in the interval [start, end].\vspace*{2mm} 227 | \item {\color{red}zeros(n)}: creates an array of $n$ elements that are all initialised to zero. 228 | \end{itemize}\vspace*{2mm} 229 | \item Or...simply define as a list of lists (in 2D) and cast/convert to an array:\vspace*{2mm} 230 | \begin{minipage}[t]{0.4\textwidth} 231 | $$ 232 | \left\lbrack\begin{array}{ccc} 233 | 0 & 12 & -1\cr 234 | -1 & -1 & -1\cr 235 | 11 & 5 & 5 236 | \end{array}\right\rbrack 237 | $$ 238 | \end{minipage} \vspace*{2mm} 239 | \begin{minipage}[t]{0.4\textwidth} 240 | \texttt{a = [ [0, 12, -1],} 241 | 242 | \texttt{[-1, -1, -1],} 243 | 244 | \texttt{[11, 5, 5] ]} 245 | 246 | \texttt{a = array(a)} 247 | 248 | \end{minipage}\vspace*{2mm} 249 | 250 | \item Remember to include \texttt{from numpy import *} in your program. 251 | \end{itemize} 252 | } 253 | 254 | \frame{ 255 | \frametitle{NumPy Arrays} 256 | \framesubtitle{Referencing/accessing elements} 257 | \begin{itemize} 258 | \item Referencing/accessing elements in an array is the same as referencing list elements.\vspace*{2mm} 259 | \item \texttt{a[i][j]} accesses the element at row \texttt{i} and column \texttt{j}.\vspace*{2mm} 260 | \item {\color{red}R}ow fi{\color{red}R}st, {\color{red}C}olumn se{\color{red}C}ond.\vspace*{2mm} 261 | \end{itemize} 262 | } 263 | 264 | \frame{ 265 | \frametitle{NumPy Arrays} 266 | \framesubtitle{Vectorised functions} 267 | \begin{itemize} 268 | \item A {\color{red}vectorised} function can accept an array as its input...\vspace*{2mm} 269 | \item ...and for each element of that array, compute the result...\vspace*{2mm} 270 | \item ...and output all results in a new array. 271 | \begin{minipage}[t]{\textwidth} 272 | \texttt{from numpy import *} 273 | 274 | \texttt{a = linspace(0, 1, 10)} 275 | 276 | \texttt{result = sin(a) \# Result is an array here.} 277 | \end{minipage}\vspace*{2mm} 278 | \item This is like doing: 279 | \begin{minipage}[t]{\textwidth} 280 | \texttt{from numpy import *} 281 | 282 | \texttt{a = linspace(0, 1, 10)} 283 | 284 | \texttt{result = zeros(10)} 285 | 286 | \texttt{for i in range(0, 10):} 287 | 288 | \ \ \ \ \ \indent\texttt{result[i] = sin(a[i])} 289 | \end{minipage}\vspace*{2mm} 290 | \item But with vectorised functions, this \texttt{for} loop is implicit. 291 | \end{itemize} 292 | } 293 | 294 | \frame{ 295 | \frametitle{Strings} 296 | \framesubtitle{Definition} 297 | \begin{itemize} 298 | \item {\color{red}String}: a {\color{red}sequence of characters}, terminated by an {\color{red}end-of-line marker}.\vspace*{2mm} 299 | \item Each character in a string can be accessed in the same way as elements of a list or array: 300 | \begin{minipage}[t]{\textwidth} 301 | \texttt{s = "hello"} 302 | 303 | \texttt{print s[0]} $\rightarrow$ ``h'' 304 | 305 | \texttt{print s[2]} $\rightarrow$ ``l'' 306 | \end{minipage}\vspace*{2mm} 307 | \item \texttt{split} breaks up strings wherever a user-defined {\color{red}delimiter} is encountered.\vspace*{2mm} 308 | \item For example, if the delimiter is a comma: 309 | \begin{minipage}[t]{\textwidth} 310 | \texttt{s = "hello world, Python is really awesome."} 311 | 312 | \texttt{print s.split(",")} $\rightarrow$ [``hello world'', `` Python is really awesome''] 313 | \end{minipage}\vspace*{2mm} 314 | \item Remember: Strings are {\color{red}immutable}/{\color{red}constant} data structures. They cannot be modified once defined. 315 | \end{itemize} 316 | } 317 | 318 | \frame{ 319 | \frametitle{Files} 320 | \framesubtitle{Reading} 321 | \begin{itemize} 322 | \item Open a file (for reading) using \texttt{f = open("file\_name\_here.txt", "r")}.\vspace*{2mm} 323 | \item A file can be thought of as a {\color{red}list of strings}, with each string being a single line of the file.\vspace*{2mm} 324 | \item We can select one line at a time using \texttt{f.readline()}, ...\vspace*{2mm} 325 | \item ...or select all the lines in the file using \texttt{f.readlines()}.\vspace*{2mm} 326 | \item It is good practice to {\color{red}close} the file (once it is no longer needed) with \texttt{f.close()} 327 | \end{itemize} 328 | } 329 | 330 | \frame{ 331 | \frametitle{Files} 332 | \framesubtitle{Writing} 333 | \begin{itemize} 334 | \item Open a file (for writing) using \texttt{f = open("file\_name\_here.txt", "w")}.\vspace*{2mm} 335 | \item Write a string to the file using \texttt{f.write(string\_to\_write\_here)}.\vspace*{2mm} 336 | \item Once again, remember to close the file after use. 337 | \end{itemize} 338 | } 339 | 340 | \frame{ 341 | \frametitle{Dictionaries} 342 | \framesubtitle{Definition} 343 | \begin{itemize} 344 | \item {\color{red}Dictionary}: a data structure whose elements are {\color{red}key-value pairs}.\vspace*{2mm} 345 | \item The key does not have to be an integer.\vspace*{2mm} 346 | \item Example: \texttt{d = \{"Barcelona":11.0, "Lleida":6.0, "Tarragona":8.0 \}}\vspace*{2mm} 347 | \item \texttt{d.keys()} $\rightarrow$ [``Barcelona'', ``Lleida'', ``Tarragona'']\vspace*{2mm} 348 | \item \texttt{d.values()} $\rightarrow$ [11.0, 6.0, 8.0]\vspace*{2mm} 349 | \item Items can be added using \texttt{b[new\_key\_here] = value\_here}.\vspace*{2mm} 350 | \item ...or existing items can be accessed using the key: \texttt{print b["Barcelona"]}.\vspace*{2mm} 351 | \end{itemize} 352 | } 353 | 354 | \end{document} 355 | -------------------------------------------------------------------------------- /notebook/Lecture-2-Numerical-methods-1-extra-exercises.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Numerical Methods 1\n", 8 | "### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott), [Nicolas Barral](http://www.imperial.ac.uk/people/n.barral)" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Lecture 2: Numerical Differentiation. Extra exercises\n", 16 | "\n", 17 | "## Learning objectives:\n", 18 | "\n", 19 | "* Learn about finite difference approximations to derivatives.\n", 20 | "* Be able to implement forward and central difference methods.\n", 21 | "* Calculate higher-order derivatives.\n", 22 | "* Solve simple ODEs using the finite difference method." 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## Exercise: Write Taylor expansions\n", 30 | "\n", 31 | "Write the Taylor series expansion of the following functions to the order 5:\n", 32 | " - $f(x) = e^x$ in $x_0 = 0$\n", 33 | " - $f(x) = \\frac{1}{1-x}$ in $x_0 = 0$\n", 34 | " - $f(x) = sin(x)$ in $x_0 = \\pi$" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "## Exercise: Write a finite difference scheme of $f'''$\n", 42 | "\n", 43 | "The third derivative of function f is noted $f'''(x) = f^{(3)}(x) = \\frac{\\mathrm{d}^3 f}{\\mathrm{d}x^3}$.\n", 44 | "Notice that $f'''(x) = g'(x)$ where $g(x)=f''(x)$. Use this remark to write a finite difference approximation of $f'''$ in $x_0$. Use central differencing for both first and second derivative approximations." 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "## Exercise: Accuracy of forward Euler and Heun's methods\n", 52 | "\n", 53 | "Consider equation \n", 54 | "$$u'(t)=u(t),\\quad u(0)=1\\,.$$\n", 55 | "\n", 56 | "Use the functions from Exercises 2.5 and 2.6 to plot the forward Euler and Heun's approximate solutions of this equation for different values of $h$.\n", 57 | "Determine visually when both methods start to produce similar result.\n", 58 | "\n", 59 | "Write a function *approx_error(f, u0, t0, t, h)* that returns the approximation error at a given time $t$ for both methods.\n", 60 | "\n", 61 | "Using a while loop, compute the error at $t=3$ for $h$ from $1$ to $10^{-6}$, and plot it against h in logarithmic scale.\n", 62 | "\n", 63 | "When does the difference between both approximate solutions at time $t=3$ start to be smaller than $10^{-3}$." 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": { 70 | "collapsed": true 71 | }, 72 | "outputs": [], 73 | "source": [] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "## Exercise: Study a function and its derivative [Langtangen B.3]\n", 80 | "\n", 81 | "Consider the function:\n", 82 | "$$ u(x) = \\sin\\left(\\frac{1}{x+\\epsilon}\\right)$$ \n", 83 | "for x ranging from 0 to 1, and the derivative\n", 84 | "$$u'(x) = \\frac{-\\cos({\\frac{1}{x+\\epsilon}})}{(x+\\epsilon)^2} $$\n", 85 | "Here, $\\epsilon$ is a given input parameter.\n", 86 | "\n", 87 | "\n", 88 | "Make sure you can find the expression of $u'(x)$ by pen and paper.\n", 89 | "\n", 90 | "Let us subdivide interval $[0,1]$ into n points $x_0=0, ..., x_{n-1}=1$.\n", 91 | "\n", 92 | "Write a function *compute_discrete_u(epsilon, n)* that returns two numpy arrays:\n", 93 | " - *x_array* contains the coordinates of the $n$ points\n", 94 | " - *u_array* contains the discrete values of $u$ at these points.\n", 95 | "\n", 96 | "Write a function *compute_derivatives(x_array, u_array)* that returns two numpy arrays:\n", 97 | " - *derivative_exact_array* contains the exact derivative given by $u' = u'(x)$ above\n", 98 | " - *derivative_fd_array* contains an approximate derivative obtained with a finite difference method. Use a central differencing method for inner points, and a forward (resp. backward) differencing for the first (resp. last) points.\n", 99 | "\n", 100 | "Test the function using $n = 10$ and $\\epsilon= 1/5$ and plot the two derivatives against x.\n", 101 | "\n", 102 | "How large do you have to choose $n$ in order for the relative difference between\n", 103 | "these two functions to be less than 0.1? \n", 104 | "For the difference, you can use the formula:\n", 105 | "$$ D = \\max_{i=0...n} \\left|\\frac{u'_{exact}(x_i)-u'_{approx}(x_i)}{u'_{exact}(x_i)}\\right|$$\n", 106 | "\n", 107 | "Let $\\epsilon = 1/10$ and $1/20$, and repeat previous question.\n", 108 | "\n", 109 | "Try to determine experimentally how large $n$ needs to be for a given\n", 110 | "value of $\\epsilon$ such that increasing n further does not change the plot\n", 111 | "so much that you can view it on the screen. Note, that there\n", 112 | "is no exact solution to this problem." 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": { 119 | "collapsed": true 120 | }, 121 | "outputs": [], 122 | "source": [] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": {}, 127 | "source": [ 128 | "## Exercise: Experimental data [Kiusalaas A.15]\n", 129 | "\n", 130 | "The relationship between the stress $\\sigma$ and the strain $\\epsilon$ of some biological materials in uniaxial tension is\n", 131 | "\n", 132 | "$$\n", 133 | "\\frac{\\mathrm{d}\\sigma}{\\mathrm{d}\\epsilon} = a + b\\sigma\n", 134 | "$$\n", 135 | "\n", 136 | "where a and b are constants. \n", 137 | "\n", 138 | "The following table gives experimental data for the stress and strain of such a material: \n", 139 | "\n", 140 | " \n", 141 | "\n", 142 | "\n", 143 | "\n", 144 | "\n", 145 | "\n", 146 | "\n", 147 | "\n", 148 | "\n", 149 | "\n", 150 | "\n", 151 | "\n", 152 | "
Strain $\\epsilon$ Stress $\\sigma$ (MPa)
0 0
0.05 0.252
0.10 0.531
0.15 0.840
0.20 1.184
0.25 1.558
0.30 1.975
0.35 2.444
0.40 2.943
0.45 3.500
0.50 4.115
\n", 153 | "\n", 154 | "Write a program that computes and plots the tangent modulus $\\mathrm{d}\\sigma/\\mathrm{d}\\epsilon$ versus $\\sigma$.\n", 155 | "\n", 156 | "Using [Lecture 1](http://nbviewer.jupyter.org/github/ggorman/Numerical-methods-1/blob/master/notebook/interpolation.ipynb) compute the parameters $a$ and $b$ by linear regression (*i.e.* fit a straight line, (a polynomial of degree ...) to the data).\n" 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": null, 162 | "metadata": { 163 | "collapsed": true 164 | }, 165 | "outputs": [], 166 | "source": [] 167 | } 168 | ], 169 | "metadata": { 170 | "kernelspec": { 171 | "display_name": "Python 3", 172 | "language": "python", 173 | "name": "python3" 174 | }, 175 | "language_info": { 176 | "codemirror_mode": { 177 | "name": "ipython", 178 | "version": 3 179 | }, 180 | "file_extension": ".py", 181 | "mimetype": "text/x-python", 182 | "name": "python", 183 | "nbconvert_exporter": "python", 184 | "pygments_lexer": "ipython3", 185 | "version": "3.6.2" 186 | } 187 | }, 188 | "nbformat": 4, 189 | "nbformat_minor": 1 190 | } 191 | -------------------------------------------------------------------------------- /notebook/Lecture-2-Numerical-methods-1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "slideshow": { 7 | "slide_type": "slide" 8 | } 9 | }, 10 | "source": [ 11 | "# Numerical Methods 1\n", 12 | "### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott), [Nicolas Barral](http://www.imperial.ac.uk/people/n.barral)\n", 13 | "\n", 14 | "# Lecture 2: Numerical Differentiation" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": { 20 | "slideshow": { 21 | "slide_type": "slide" 22 | } 23 | }, 24 | "source": [ 25 | "## Learning objectives:\n", 26 | "\n", 27 | "* Learn about finite difference approximations to derivatives.\n", 28 | "* Be able to implement forward and central difference methods.\n", 29 | "* Calculate higher-order derivatives.\n", 30 | "* Solve simple ODEs using the finite difference method." 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": { 36 | "slideshow": { 37 | "slide_type": "slide" 38 | } 39 | }, 40 | "source": [ 41 | "## Finite differences -- the forward difference\n", 42 | "\n", 43 | "Finite differences are a class of approximation methods for estimating/computing derivatives of functions.\n", 44 | "\n", 45 | "Approximations to the derivatives of a function can be computed by using weighted sums of function evaluations at a number of points. The elementary definition of the derivative of a function $f$ at a point $x_0$ is given by:\n", 46 | "\n", 47 | " $$ f'(x_0)=\\lim_{h\\rightarrow 0} \\frac{f(x_0+h)-f(x_0)}{h} $$" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": { 53 | "slideshow": { 54 | "slide_type": "subslide" 55 | } 56 | }, 57 | "source": [ 58 | "We can turn this into an approximation rule for $f'(x)$ by replacing the limit as $h$ approaches $0$ with a small but finite $h$:\n", 59 | "\n", 60 | " $$ f'(x_0)\\approx \\frac{f(x_0+h)-f(x)}{h},\\qquad h>0 $$\n", 61 | "\n", 62 | "The figure below illustrates this approximation. Because the approximate gradient is calculated using values of $x$ greater than $x_0$, this algorithm is known as the **forward difference method**. In the figure the derivative is approximated by the slope of the red line, while the true derivative is the slope of the blue line -- if the second (and/or higher) derivative of the function is large then this approximation might not be very good unless you make $h$ very small." 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": { 68 | "slideshow": { 69 | "slide_type": "subslide" 70 | } 71 | }, 72 | "source": [ 73 | "![Forward difference method for approximating $f'(x_0)$. The derivative is approximated by the slope of the red line, while the true derivative is the slope of the blue line.](images/forward_diff.png)" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": { 79 | "slideshow": { 80 | "slide_type": "slide" 81 | } 82 | }, 83 | "source": [ 84 | "## Taylor series to estimate accuracy\n", 85 | "We can use a [Taylor series expansion](http://mathworld.wolfram.com/TaylorSeries.html) to estimate the accuracy of the method. Recall that Taylor series in one dimention tells us that we can expand an increment to the evaluation point of a function as follows:\n", 86 | "\n", 87 | "\\begin{align*}\n", 88 | "f(x_0+h)&=f(x_0)+hf'(x_0)+ \\frac{h^2}{2!}f''(x_0) + \\frac{h^3}{3!}f'''(x_0) + \\ldots\\\\ & =f(x_0)+hf'(x_0)+O(h^2)\n", 89 | "\\end{align*}\n", 90 | " \n", 91 | "where $O(h^2)$ represents the collection of terms that are second-order in $h$ or higher." 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": { 97 | "slideshow": { 98 | "slide_type": "subslide" 99 | } 100 | }, 101 | "source": [ 102 | "If we rearrange this expression to isolate the gradient term $f'(x_0)$ on the left hand side, we find:\n", 103 | "\n", 104 | " $$ hf'(x_0)=f(x_0+h)-f(x_0) +O(h^2) $$\n", 105 | " \n", 106 | "and therefore, by dividing through by $h$,\n", 107 | " \n", 108 | " $$ f'(x_0)=\\frac{f(x_0+h)-f(x_0)}{h}+O(h) $$\n", 109 | "\n", 110 | "As we are left with $O(h)$ at the end, we know that the forward difference method is first-order (i.e. $h^1$) -- as we make the spacing $h$ smaller we expect the error in our derivative to fall linearly." 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": { 116 | "slideshow": { 117 | "slide_type": "subslide" 118 | } 119 | }, 120 | "source": [ 121 | "For general numerical methods we generally strive for something better than this -- if we halve our $h$ (and so are doing twice as much (or more) work potentially) we would like our error to drop super-linearly: i.e. by a factor of 4 (second-order method) or 8 (third-order method) or more." 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": { 127 | "slideshow": { 128 | "slide_type": "slide" 129 | } 130 | }, 131 | "source": [ 132 | "## Exercise 2.1: Compute first derivative using forward differencing\n", 133 | "\n", 134 | "Use the forward difference scheme to compute an approximation to $f'(2.36)$ from the following data:\n", 135 | "\n", 136 | "$f(2.36) = 0.85866$\n", 137 | "\n", 138 | "$f(2.37) = 0.86289$\n", 139 | "\n", 140 | "You should get an answer of 0.423." 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": { 146 | "slideshow": { 147 | "slide_type": "slide" 148 | } 149 | }, 150 | "source": [ 151 | "## Central differencing\n", 152 | "\n", 153 | "In an attempt to derive a more accurate method, we use two Taylor series expansions; one in the positive $x$ direction from $x_0$, and one in the negative direction. Because we hope to achieve better than first order, we include an extra term in the series:\n", 154 | "\n", 155 | "$$ f(x_0+h)=f(x_0)+hf'(x_0)+\\frac{h^2}{2}f''(x_0) + O(h^3),$$\n", 156 | "\n", 157 | "$$ f(x_0-h)=f(x_0)-hf'(x_0)+\\frac{(-h)^2}{2}f''(x_0) + O((-h)^3).$$" 158 | ] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": { 163 | "slideshow": { 164 | "slide_type": "subslide" 165 | } 166 | }, 167 | "source": [ 168 | "Using the fact that $(-h)^2=h^2$ and the absolute value signs from the definition of $O$, this is equivalent to:\n", 169 | "\n", 170 | "$$ f(x_0+h)=f(x_0)+hf'(x_0)+\\frac{h^2}{2}f''(x_0) + O(h^3),$$\n", 171 | " \n", 172 | "$$ f(x_0-h)=f(x_0)-hf'(x_0)+\\frac{h^2}{2}f''(x_0) + O(h^3).$$" 173 | ] 174 | }, 175 | { 176 | "cell_type": "markdown", 177 | "metadata": { 178 | "slideshow": { 179 | "slide_type": "subslide" 180 | } 181 | }, 182 | "source": [ 183 | "Remember that we are looking for an expression for $f'(x_0)$. Noticing the sign change between the derivative terms in the two equations, we subtract the bottom equation from the top equation to give:\n", 184 | "\n", 185 | "$$ f(x_0+h)-f(x_0-h)=2hf'(x_0) + O(h^3).$$\n", 186 | "\n", 187 | "Finally, rearrange to get an expression for $f'(x_0)$:\n", 188 | "\n", 189 | "$$ f'(x_0)=\\frac{f(x_0+h)-f(x_0-h)}{2h} + O(h^2).$$" 190 | ] 191 | }, 192 | { 193 | "cell_type": "markdown", 194 | "metadata": { 195 | "slideshow": { 196 | "slide_type": "subslide" 197 | } 198 | }, 199 | "source": [ 200 | "We can see that by taking an interval symmetric about $x_0$, we have created a second-order approximation for the derivative of $f$. This symmetry gives the scheme its name: the central difference method. The figure below illustrates this scheme. The derivative is approximated by the slope of the red line, while the true derivative is the slope of the blue line. \n", 201 | "\n", 202 | "Even without the analysis above it's hopefully clear visually why this should in general give a lower error than the forward difference approach. However the analysis of the two methods does tell us that as we halve $h$ the error should drop by a factor 4 rather than the 2 we get for the first-order forward differencing." 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": { 208 | "slideshow": { 209 | "slide_type": "subslide" 210 | } 211 | }, 212 | "source": [ 213 | "![\"Central difference method for approximating $f'(x_0)$. The derivative is approximated by the slope of the red line, while the true derivative is the slope of the blue line.\"](images/central_diff.png)" 214 | ] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "metadata": { 219 | "slideshow": { 220 | "slide_type": "slide" 221 | } 222 | }, 223 | "source": [ 224 | "## Exercise 2.2: Compute first derivative using central differencing\n", 225 | "\n", 226 | "Use the data below to compute $f'(0.2)$ using central differencing:\n", 227 | "\n", 228 | "$$f(0.1) = 0.078348$$\n", 229 | "$$f(0.2) = 0.138910$$\n", 230 | "$$f(0.3) = 0.192916$$\n", 231 | "\n", 232 | "You should get 0.57284" 233 | ] 234 | }, 235 | { 236 | "cell_type": "markdown", 237 | "metadata": { 238 | "slideshow": { 239 | "slide_type": "slide" 240 | } 241 | }, 242 | "source": [ 243 | "## Example: Write a function to perform numerical differentiation\n", 244 | "\n", 245 | "As covered above, the formula\n", 246 | "\n", 247 | "$$f^\\prime(x) \\approx \\frac{f(x+h) - f(x-h)}{2h}$$\n", 248 | "\n", 249 | "can be used to find an approximate derivative of a mathematical function $f(x)$ if $h$ is small. \n", 250 | "\n", 251 | "Write a function *diff*( *f*, *x*, *h*=1E-6) that returns the approximation of the derivative of a mathematical function represented by a Python function f(x)." 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": 1, 257 | "metadata": { 258 | "collapsed": true, 259 | "slideshow": { 260 | "slide_type": "subslide" 261 | } 262 | }, 263 | "outputs": [], 264 | "source": [ 265 | "# Write a function for numerical differentiation\n", 266 | "\n", 267 | "from math import exp, cos, log, pi\n", 268 | "\n", 269 | "def diff(f, x, h = 1E-6):\n", 270 | " numerator = f(x + h) - f(x - h)\n", 271 | " derivative = numerator/(2.0*h)\n", 272 | " return derivative" 273 | ] 274 | }, 275 | { 276 | "cell_type": "markdown", 277 | "metadata": { 278 | "slideshow": { 279 | "slide_type": "subslide" 280 | } 281 | }, 282 | "source": [ 283 | "Apply the above formula to differentiate $f(x) = e^x$ at x = 0, $f(x) = e^{−2x}$ at\n", 284 | "x = 0, $f(x) = \\cos(x)$ at x = 2$\\pi$ , and $f(x) = \\ln(x)$ at x = 1. \n", 285 | "\n", 286 | "Use $h = 0.01$.\n", 287 | "\n", 288 | "In each case, write out the error, i.e., the difference between the exact derivative and the result of the formula above." 289 | ] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "execution_count": 2, 294 | "metadata": { 295 | "slideshow": { 296 | "slide_type": "subslide" 297 | } 298 | }, 299 | "outputs": [ 300 | { 301 | "name": "stdout", 302 | "output_type": "stream", 303 | "text": [ 304 | "The approximate derivative of exp(x) at x = 0 is: 1.000017. The error is 0.000017.\n" 305 | ] 306 | } 307 | ], 308 | "source": [ 309 | "h = 0.01 # The step size\n", 310 | "\n", 311 | "x = 0\n", 312 | "f = exp\n", 313 | "derivative = diff(f, x, h)\n", 314 | "print(\"The approximate derivative of exp(x) at x = 0 is: %f. The error is %f.\" % (derivative, abs(derivative - 1))) \n", 315 | "# The 'abs' function returns the absolute value." 316 | ] 317 | }, 318 | { 319 | "cell_type": "code", 320 | "execution_count": 3, 321 | "metadata": { 322 | "slideshow": { 323 | "slide_type": "subslide" 324 | } 325 | }, 326 | "outputs": [ 327 | { 328 | "name": "stdout", 329 | "output_type": "stream", 330 | "text": [ 331 | "The approximate derivative of exp(-2*x) at x = 0 is: -2.000133. The error is 0.000133.\n" 332 | ] 333 | } 334 | ], 335 | "source": [ 336 | "x = 0\n", 337 | "# Here it is not possible to simply pass in the math module's exp function,\n", 338 | "# so we need to define our own function instead.\n", 339 | "def g(x):\n", 340 | " return exp(-2*x)\n", 341 | "\n", 342 | "f = g\n", 343 | "derivative = diff(f, x, h)\n", 344 | "print(\"The approximate derivative of exp(-2*x) at x = 0 is: %f. The error is %f.\" % (derivative, abs(derivative - (-2.0))))" 345 | ] 346 | }, 347 | { 348 | "cell_type": "code", 349 | "execution_count": 4, 350 | "metadata": { 351 | "slideshow": { 352 | "slide_type": "subslide" 353 | } 354 | }, 355 | "outputs": [ 356 | { 357 | "name": "stdout", 358 | "output_type": "stream", 359 | "text": [ 360 | "The approximate derivative of cos(x) at x = 2*pi is: 0.000000. The error is 0.000000.\n" 361 | ] 362 | } 363 | ], 364 | "source": [ 365 | "x = 2*pi\n", 366 | "f = cos\n", 367 | "derivative = diff(f, x, h)\n", 368 | "print(\"The approximate derivative of cos(x) at x = 2*pi is: %f. The error is %f.\" % (derivative, abs(derivative - 0)))" 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "execution_count": 5, 374 | "metadata": { 375 | "slideshow": { 376 | "slide_type": "subslide" 377 | } 378 | }, 379 | "outputs": [ 380 | { 381 | "name": "stdout", 382 | "output_type": "stream", 383 | "text": [ 384 | "The approximate derivative of ln(x) at x = 0 is: 1.000033. The error is 0.000033.\n" 385 | ] 386 | } 387 | ], 388 | "source": [ 389 | "x = 1\n", 390 | "f = log # By default, log(x) is the natural log (i.e. the log to the base 'e')\n", 391 | "derivative = diff(f, x, h)\n", 392 | "print(\"The approximate derivative of ln(x) at x = 0 is: %f. The error is %f.\" % (derivative, abs(derivative - 1)))" 393 | ] 394 | }, 395 | { 396 | "cell_type": "markdown", 397 | "metadata": { 398 | "slideshow": { 399 | "slide_type": "slide" 400 | } 401 | }, 402 | "source": [ 403 | "## Exercise 2.3: Compute the derivative of $\\sin(x)$\n", 404 | "\n", 405 | "Compute \n", 406 | "\n", 407 | "$$\\frac{d(\\sin x)}{dx}\\qquad\\textrm{at}\\qquad x = 0.8$$\n", 408 | "\n", 409 | "using (a) forward differencing and (b) central differencing. \n", 410 | "\n", 411 | "Write some code that evaluates these derivatives for decreasing values of $h$ (start with $h=1.0$ and keep halving) and compare the values against the exact solution.\n", 412 | "\n", 413 | "Plot the convergence of your two methods." 414 | ] 415 | }, 416 | { 417 | "cell_type": "markdown", 418 | "metadata": { 419 | "slideshow": { 420 | "slide_type": "subslide" 421 | } 422 | }, 423 | "source": [ 424 | "You should get something that looks like this\n", 425 | "\n", 426 | "![\"Convergence plot\"](images/fd_cd_convergence.png)" 427 | ] 428 | }, 429 | { 430 | "cell_type": "markdown", 431 | "metadata": { 432 | "slideshow": { 433 | "slide_type": "slide" 434 | } 435 | }, 436 | "source": [ 437 | "## Calculating second derivatives\n", 438 | "\n", 439 | "Numerical differentiation may be extended to the second derivative by noting that the second derivative is the derivative of the first derivative. That is, if we define a new function $g$ for a second, where:\n", 440 | "\n", 441 | "$$ g(x)=f'(x) $$\n", 442 | "\n", 443 | "then\n", 444 | "\n", 445 | "$$ f''(x)=g'(x) $$\n", 446 | "\n", 447 | "and so we can just apply our differencing formula twice in order to achieve a second derivative (and so on for even higher derivatives)." 448 | ] 449 | }, 450 | { 451 | "cell_type": "markdown", 452 | "metadata": { 453 | "slideshow": { 454 | "slide_type": "subslide" 455 | } 456 | }, 457 | "source": [ 458 | "We have noted above that the central difference method, being second-order accurate, is superior to the forward difference method so we will choose to extend that.\n", 459 | "\n", 460 | "In order to calculate $f''(x_0)$ using a central difference method, we first calculate $f'(x)$ for each of two half intervals, one to the left of $x_0$ and one to the right:\n", 461 | "\n", 462 | "$$ f'\\left(x_0+\\frac{h}{2}\\right)\\approx\\frac{f(x_0+h)-f(x_0)}{h},$$\n", 463 | "$$ f'\\left(x_0-\\frac{h}{2}\\right)\\approx\\frac{f(x_0)-f(x_0-h)}{h}.$$" 464 | ] 465 | }, 466 | { 467 | "cell_type": "markdown", 468 | "metadata": { 469 | "slideshow": { 470 | "slide_type": "subslide" 471 | } 472 | }, 473 | "source": [ 474 | "Of course the things on the RHS are first-order forward and backward differences if we were to consider the LHS at $x_0$. However, by considering the LHS at $x_0\\pm h/2$ they are in this case clearly second-order *central* differences where the denominator of the RHS is $2\\times (h/2)$.\n", 475 | "\n", 476 | "We can now calculate the second derivative using these two values. Note that we know $f'(x)$ at the points $x_0\\pm{h}/{2}$, which are only $h$ rather than $2h$ apart. Hence:\n", 477 | "\n", 478 | "$$\n", 479 | "\\begin{align}\n", 480 | " f''(x_0)&\\approx\\frac{f'(x_0+\\frac{h}{2})-f'(x_0-\\frac{h}{2})}{h}\\\\\n", 481 | " &\\approx\\frac{\\frac{f(x_0+h)-f(x_0)}{h}-\\frac{f(x_0)-f(x_0-h)}{h}}{h}\\\\\n", 482 | " &\\approx\\frac{f(x_0+h)-2f(x_0)+f(x_0-h)}{h^2}\n", 483 | "\\end{align}$$" 484 | ] 485 | }, 486 | { 487 | "cell_type": "markdown", 488 | "metadata": { 489 | "slideshow": { 490 | "slide_type": "slide" 491 | } 492 | }, 493 | "source": [ 494 | "## Exercise 2.4: Compute second derivative\n", 495 | "\n", 496 | "Calculate the second derivative $f''$ at $x = 1$ using the data below:\n", 497 | "\n", 498 | "$f(0.84) = 0.431711$\n", 499 | "\n", 500 | "$f(0.92) = 0.398519$\n", 501 | "\n", 502 | "$f(1.00) = 0.367879$\n", 503 | "\n", 504 | "$f(1.08) = 0.339596$\n", 505 | "\n", 506 | "$f(1.16) = 0.313486$\n", 507 | "\n", 508 | "You should get 0.36828" 509 | ] 510 | }, 511 | { 512 | "cell_type": "markdown", 513 | "metadata": { 514 | "slideshow": { 515 | "slide_type": "slide" 516 | } 517 | }, 518 | "source": [ 519 | "## Aside: Non-central differencing and differentiation by polynomial fit\n", 520 | "\n", 521 | "In this particular case we were given more data than we actually used. An alternative approach would be to use *non-centred differencing*, e.g. the following is also a valid approximation to the second derivative\n", 522 | "\n", 523 | "$$\n", 524 | "\\begin{align}\n", 525 | " f''(x_0)\\approx\\frac{f(x_0+2h)-2f(x_0+h)+f(x_0)}{h^2}\n", 526 | "\\end{align}$$\n", 527 | "\n", 528 | "This can come in handy if we need to approximate the value of derivatives at or near to a boundary where we don't have data beyond that boundary.\n", 529 | "\n", 530 | "If we wanted to use all of this data, an alternative would be to fit a polynomial to this data, and then differentiate this analytical expression exactly to approximate the derivative at any point between 0.84 and 1.16 (recalling that extrapolation is dangerous)." 531 | ] 532 | }, 533 | { 534 | "cell_type": "markdown", 535 | "metadata": { 536 | "slideshow": { 537 | "slide_type": "slide" 538 | } 539 | }, 540 | "source": [ 541 | "## Numerical methods for ODEs\n", 542 | "\n", 543 | "One of the most important applications of numerical mathematics in the sciences is the numerical solution of ordinary differential equations (ODEs). This is a vast topic which rapidly becomes somewhat advanced, so we will restrict ourselves here to a very brief introduction to the solution of first order ODEs. A much more comprehensive treatment of this subject is to be found in the Numerical Methods 2 module.\n", 544 | "\n", 545 | "Suppose we have the general first-order ODE:\n", 546 | "\n", 547 | "\\begin{align}\n", 548 | "u'(t)&=f(u(t),t) \\\\\n", 549 | "u(t_0)&=u_0\n", 550 | "\\end{align}\n", 551 | "\n", 552 | "[Notation: For $u=u(t)$, $\\frac{du}{dt}=u'=\\dot{u}$.]" 553 | ] 554 | }, 555 | { 556 | "cell_type": "markdown", 557 | "metadata": { 558 | "slideshow": { 559 | "slide_type": "slide" 560 | } 561 | }, 562 | "source": [ 563 | "That is, the derivative of $u$ with respect to $t$ is some known function of $u$ and $t$, and we also know the initial condition of $u$ at some initial time $t_0$.\n", 564 | "\n", 565 | "If we manage to solve this equation analytically, the solution will be a function $u(t)$ which is defined for every $t>t_0$. In common with all of the numerical methods we will encounter in this module, our objective is to find an approximate solution to the ODE at a finite set of points. In this case, we will attempt to find approximate solutions at $t=t_0,t_0+h,t_0+2h,t_0+3h,\\ldots$.\n", 566 | "\n", 567 | "It is frequently useful to think of the independent variable, $t$, as representing time. A numerical method steps forward in time units of $h$, attempting to calculate $u(t+h)$ in using the previously calculated value $u(t)$. " 568 | ] 569 | }, 570 | { 571 | "cell_type": "markdown", 572 | "metadata": { 573 | "slideshow": { 574 | "slide_type": "slide" 575 | } 576 | }, 577 | "source": [ 578 | "### Euler's method\n", 579 | "\n", 580 | "To derive a numerical method, we can first turn once again to the Taylor\n", 581 | "series. In this case, we could write:\n", 582 | "\n", 583 | "$$ u(t+h)=u(t)+h u'(t) + O(h^2) $$\n", 584 | "\n", 585 | "Using the definition of our ODE above, we can substitute in for $u'(t)$:\n", 586 | "\n", 587 | "$$ u(t+h)=u(t)+h f(u(t),t)+ O(h^2).$$\n", 588 | "\n", 589 | "Notice that the value of $u$ used in the evaluation of $f$ is that at time $t$. This simple scheme is named **Euler's method** after the 18th century Swiss mathematician, Leonard Euler." 590 | ] 591 | }, 592 | { 593 | "cell_type": "markdown", 594 | "metadata": { 595 | "slideshow": { 596 | "slide_type": "subslide" 597 | } 598 | }, 599 | "source": [ 600 | "This is what is known as an explicit method, because the function $f$ in this relation is evaluated at the old time level $t$\n", 601 | "-- i.e. we have all the information required at time $t$ to explicitly compute the right-hand-side,\n", 602 | "and hence easily find the new value for $u(t+h)$.\n", 603 | "\n", 604 | "This form of the method is therefore more correctly called either Explicit Euler or Forward Euler. We could also evaluate the RHS at some time between $t$ and $t+h$ (in the case of $t+h$ this method is called Implicit or Backward Euler) this is more complex to solve for the new $u(t+h)$ but can have advantageous accuracy and stability properties." 605 | ] 606 | }, 607 | { 608 | "cell_type": "markdown", 609 | "metadata": { 610 | "slideshow": { 611 | "slide_type": "subslide" 612 | } 613 | }, 614 | "source": [ 615 | "The formula given is used to calculate the value of $u(t)$ one time step forward from the last known value. The error is therefore the local truncation error. If we actually wish to know the value at some fixed time $T$ then we will have to calculate $(T-t_0)/h$ steps of the method. This sum over $O(1/h)$ steps results in a global truncation error for Euler's method of $O(h)$.\n", 616 | "\n", 617 | "In other words, Euler's method is only first-order accurate -- if we halve $h$ we will need to do double the amount of work and the error should correspondingly halve; if we had a second-order method we would expect the error to reduce by a factor of 4 for every doubling in effort!" 618 | ] 619 | }, 620 | { 621 | "cell_type": "markdown", 622 | "metadata": { 623 | "slideshow": { 624 | "slide_type": "subslide" 625 | } 626 | }, 627 | "source": [ 628 | "To illustrate Euler's method, and convey the fundamental idea of all time stepping methods, we'll use Euler's method to solve one of the simplest of all ODEs:\n", 629 | "\n", 630 | "$$ u'(t)=u(t),$$\n", 631 | "$$ u(0)=1.$$\n", 632 | "\n", 633 | "We know, of course, that the solution to this equation is $u(t)=e^t$, but let's ignore that for one moment and evaluate $u(0.1)$ using Euler's method with steps of $0.05$. The first step is:\n", 634 | "\n", 635 | "$$\\begin{align}\n", 636 | " u(0.05)&\\approx u(0)+0.05u'(0)\\\\\n", 637 | " &\\approx1+.05\\times1\\\\\n", 638 | " &\\approx 1.05\n", 639 | "\\end{align}$$" 640 | ] 641 | }, 642 | { 643 | "cell_type": "markdown", 644 | "metadata": { 645 | "slideshow": { 646 | "slide_type": "subslide" 647 | } 648 | }, 649 | "source": [ 650 | "Now that we know $u(0.05)$, we can calculate the second step:\n", 651 | "\n", 652 | "$$\n", 653 | "\\begin{align}\n", 654 | " u(0.1)&\\approx u(0.05)+0.05u'(0.05)\\\\\n", 655 | " &\\approx 1.05+.05\\times1.05\\\\\n", 656 | " &\\approx 1.1025\n", 657 | "\\end{align}$$\n", 658 | "\n", 659 | "Now the actual value of $e^{0.1}$ is around $1.1051$ so we're a couple of percent off even over a very short time interval and only a couple of steps of the algorithm." 660 | ] 661 | }, 662 | { 663 | "cell_type": "markdown", 664 | "metadata": { 665 | "slideshow": { 666 | "slide_type": "slide" 667 | } 668 | }, 669 | "source": [ 670 | "## Exercise 2.5: Implementing Forward Euler's method\n", 671 | "\n", 672 | "Write a function *euler*( *f*, *u0*, *t0*, *t_max*, *h*) that takes as arguments the function $f(u,t)$ on the RHS of our ODE,\n", 673 | "an initial value for $u$, the start and end time of the integration, and the time step.\n", 674 | "\n", 675 | "Use it to integrate the following ODE problems up to time $t=10$\n", 676 | "\n", 677 | "$$u'(t)=u(t),\\quad u(0)=1$$\n", 678 | "\n", 679 | "and \n", 680 | "\n", 681 | "$$u'(t)=\\cos(t),\\quad u(0)=0$$\n", 682 | "\n", 683 | "and plot the results. A template to get you started is below." 684 | ] 685 | }, 686 | { 687 | "cell_type": "code", 688 | "execution_count": null, 689 | "metadata": { 690 | "slideshow": { 691 | "slide_type": "subslide" 692 | } 693 | }, 694 | "outputs": [], 695 | "source": [ 696 | "%pylab inline\n", 697 | "\n", 698 | "def euler(f,u0,t0,t_max,h):\n", 699 | " u=u0; t=t0\n", 700 | " # these lists will store all solution values \n", 701 | " # and associated time levels for later plotting\n", 702 | " u_all=[u0]; t_all=[t0]\n", 703 | " \n", 704 | " \n", 705 | " while ... add your code here\n", 706 | " \n", 707 | " \n", 708 | " \n", 709 | " \n", 710 | " \n", 711 | " \n", 712 | " return(u_all,t_all)\n", 713 | "\n", 714 | "\n", 715 | "def f(u,t):\n", 716 | " val = u\n", 717 | " return val\n", 718 | "\n", 719 | "(u_all,t_all) = euler(f,1.0,0.0,10.0,0.1)\n", 720 | "\n", 721 | "plot(t_all, u_all)\n", 722 | "xlabel('t');ylabel('u(t)');grid(True)\n", 723 | "show()" 724 | ] 725 | }, 726 | { 727 | "cell_type": "markdown", 728 | "metadata": { 729 | "slideshow": { 730 | "slide_type": "slide" 731 | } 732 | }, 733 | "source": [ 734 | "### Heun's method\n", 735 | "\n", 736 | "Euler's method is first-order accurate because it calculates the derivative using only the information available at the beginning of the time step. As we observed previously, higher-order convergence can be obtained if we also employ information from other points in the interval. Heun's method may be derived by attempting to use derivative information at both the start and the end of the interval:\n", 737 | "\n", 738 | "$$\n", 739 | "\\begin{align}\n", 740 | " u(t+h)&\\approx u(t)+\\frac{h}{2}\\left(u'(t)+u'(t+h)\\right)\\\\\n", 741 | " &\\approx u(t)+\\frac{h}{2}\\big(f(u(t),t)+f(u(t+h),t+h)\\big)\n", 742 | "\\end{align}$$\n", 743 | "\n", 744 | "The difficulty with this approach is that we now require $u(t+h)$ in order to calculate the final term in the equation, and that's what we set out to calculate so we don't know it yet! So at this point we have an example of an implicit algorithm and at this stage the above ODE solver would be referred to as the trapezoidal method if we could solve it exactly for $u(t+h)$." 745 | ] 746 | }, 747 | { 748 | "cell_type": "markdown", 749 | "metadata": { 750 | "slideshow": { 751 | "slide_type": "subslide" 752 | } 753 | }, 754 | "source": [ 755 | "Perhaps the simplest solution to this dilemma, the one adopted in Heun's method, is to use a first guess at $x(t+h)$ calculated using Euler's method:\n", 756 | "\n", 757 | "$$ \\tilde{u}(t+h)=u(t)+hf(u(t),t) $$\n", 758 | "\n", 759 | "This first guess is then used to solve for $u(t+h)$ using:\n", 760 | "\n", 761 | "$$ u(t+h)\\approx u(t)+\\frac{h}{2}\\big(f(u(t),t)+f(\\tilde{u}(t+h),t+h)\\big)$$\n", 762 | "\n", 763 | "The generic term for schemes of this type is **predictor-corrector**. The initial calculation of $\\tilde{u}(t+h)$ is used to predict the new value of $u$ and then this is used in a more accurate calculation to produce a more correct value. \n", 764 | "\n", 765 | "Note that Heun's method is $O(h^2)$, i.e. a second-order method." 766 | ] 767 | }, 768 | { 769 | "cell_type": "markdown", 770 | "metadata": { 771 | "slideshow": { 772 | "slide_type": "slide" 773 | } 774 | }, 775 | "source": [ 776 | "## Exercise 2.6: Implementing Heun's method\n", 777 | "\n", 778 | "Repeat the previous exercise for this method.\n", 779 | "\n", 780 | "For some ODEs you know the exact solution to compare the errors between Euler's and Heun's method, and how they vary with time step." 781 | ] 782 | }, 783 | { 784 | "cell_type": "markdown", 785 | "metadata": { 786 | "slideshow": { 787 | "slide_type": "subslide" 788 | } 789 | }, 790 | "source": [ 791 | "You should be able to get a plot that looks like this for the case $u'=u$.\n", 792 | "\n", 793 | "![\"Comparison between the Euler and Heun method for the solution of a simple ODE.\"](images/euler_vs_heun.png)" 794 | ] 795 | } 796 | ], 797 | "metadata": { 798 | "celltoolbar": "Slideshow", 799 | "kernelspec": { 800 | "display_name": "Python 3", 801 | "language": "python", 802 | "name": "python3" 803 | }, 804 | "language_info": { 805 | "codemirror_mode": { 806 | "name": "ipython", 807 | "version": 3 808 | }, 809 | "file_extension": ".py", 810 | "mimetype": "text/x-python", 811 | "name": "python", 812 | "nbconvert_exporter": "python", 813 | "pygments_lexer": "ipython3", 814 | "version": "3.6.3" 815 | } 816 | }, 817 | "nbformat": 4, 818 | "nbformat_minor": 1 819 | } 820 | -------------------------------------------------------------------------------- /notebook/Lecture-6-Numerical-methods-1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Numerical Methods 1\n", 8 | "### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott), [Nicolas Barral](http://www.imperial.ac.uk/people/n.barral)" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Lecture 6: Numerical Linear Algebra II\n", 16 | "\n", 17 | "## Learning objectives:\n", 18 | "\n", 19 | "* More on direct methods: LU decomposition\n", 20 | "* Doolittle's algorithm\n", 21 | "* Properties of lower-triangular matrices\n", 22 | "* Partial pivoting" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## Introduction\n", 30 | "\n", 31 | "Last week we developed and implemented the Gaussian elimination method to solve the linear matrix system ($A\\pmb{x}=\\pmb{b}$). \n", 32 | "\n", 33 | "This week we will consider a closely related solution method: *LU decomposition* or *LU factorisation*.\n", 34 | "\n", 35 | "Both are examples of *direct* solution methods - next week we will consider the alternate approach to solve linear systems, namely iterative or indirect methods." 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": { 41 | "collapsed": true 42 | }, 43 | "source": [ 44 | "## LU decomposition - theory\n", 45 | "\n", 46 | "Last week we implemented Gaussian elimination to solve the matrix system with *one* RHS vector $\\pmb{b}$. \n", 47 | "\n", 48 | "We often have to deal with problems where we have multiple RHS vectors, all with the same matrix $A$. \n", 49 | "\n", 50 | "We could call the same code multiple times to solve all of these corresponding linear systems, but note that as the elimination algorithm is actually performing operations based upon (the same) $A$ each time, we would actually be wasting time repeating exactly the same operations - this is therefore clearly not an efficient solution to this problem.\n", 51 | "\n", 52 | "We could easily generalise our Gaussian elimination/back substitution algorithms to include multiple RHS column vectors in the augmented system, perform the same sequence of row operations (but now only once) to transform the matrix to upper-triangular form, and then perform back substitution on each of the transformed RHS vectors from the augmented system - cf. the use of Gaussian elimination to compute the inverse to a matrix by placing the identity on the right of the augmented system.\n", 53 | "\n", 54 | "However, it is often the case that each RHS vector depends on the solutions to the matrix systems obtained from some or all of the earlier RHS vectors, and so this generalisation would not work in this case. Note that an example of this you will see in NM2 is where you are time-stepping the solution to a differential equation, and the RHS vector depends on the solution at the previous time level.\n", 55 | "\n", 56 | "To deal with this situation efficiently we *decompose* or *factorise* the matrix $A$ in such a way that it is cheap to compute a new solution vector $\\pmb{x}$ for any given RHS vector $\\pmb{b}$. This decomposition involves a lower- and an upper-triangular matrix, hence the name LU decomposition. These matrices essentially *encode* the steps conducted in Gaussian elimination, so we don't have to explicilty conduct all of the operations again and again.\n", 57 | "\n", 58 | "Mathematically, let's assume that we have already found/constructed a lower-triangular matrix ($L$ - where all entries above the diagonal are zero) and an upper-triangular matrix ($U$ - where all entries below the diagonal are zero) such that we can write\n", 59 | "\n", 60 | "$$ A = LU $$\n", 61 | "\n", 62 | "In this case the matrix system we need to solve for $\\pmb{x}$ becomes\n", 63 | "\n", 64 | "$$ A\\pmb{x} = \\pmb{b} \\iff (LU)\\pmb{x} = L(U\\pmb{x}) = \\pmb{b} $$\n", 65 | "\n", 66 | "Notice that the matrix-vector product $U\\pmb{x}$ is itself a vector, let's call it $\\pmb{c}$ for the time-being (i.e. \n", 67 | "$\\pmb{c}=U\\pmb{x}$).\n", 68 | "\n", 69 | "The above system then reads \n", 70 | "\n", 71 | "$$ L\\pmb{c} = \\pmb{b} $$\n", 72 | "\n", 73 | "where $L$ is a matrix and $\\pmb{c}$ is an unknown. \n", 74 | "\n", 75 | "As $L$ is in lower-triangular form we can use forward substitution (generalising the back subsitution algorithm/code we developed last week) to very easily find $\\pmb{c}$ in relatively few operations (we don't need to call the entire Gaussian elimination algorithm).\n", 76 | "\n", 77 | "Once we know $\\pmb{c}$ we then solve the second linear system \n", 78 | "\n", 79 | "$$ U\\pmb{x} = \\pmb{c} $$\n", 80 | "\n", 81 | "where now we can use the fact that $U$ is upper-triangular to use our back substitution algorithm again very efficiently to give the solution $\\pmb{x}$ we require.\n", 82 | "\n", 83 | "So for a given $\\pmb{b}$ we can find the corresponding $\\pmb{x}$ very efficiently, we can therefore do this repeatedly as each new $\\pmb{b}$ is given to us.\n", 84 | "\n", 85 | "Our challenge is therefore to find the matrices $L$ and $U$ allowing us to perform the decomposition $A=LU$.\n" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": {}, 91 | "source": [ 92 | "## LU decomposition - algorithm\n", 93 | "\n", 94 | "Recall the comment above on the $L$ and $U$ matrices encoding the steps taken in Gaussian elimination. Let's see how this works through the development of the so-called Doolittle algorithm.\n", 95 | "\n", 96 | "Let's consider an example matrix:\n", 97 | "\n", 98 | "$$\n", 99 | " A=\\begin{bmatrix}\n", 100 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 101 | "{\\color{black}5} & {\\color{black}14} & {\\color{black}7} & {\\color{black}10}\\\\\n", 102 | "{\\color{black}20} & {\\color{black}77} & {\\color{black}41} & {\\color{black}48}\\\\\n", 103 | "{\\color{black}25} & {\\color{black}91} & {\\color{black}55} & {\\color{black}67}\\\\\n", 104 | " \\end{bmatrix}\n", 105 | "$$\n", 106 | "\n", 107 | "the first step of Gaussian elimination is to set the\n", 108 | "sub-diagonal elements in the first column to zero by subtracting multiples of\n", 109 | "the first row from each of the subsequent rows. \n", 110 | "\n", 111 | "For this example, using the symbolic notiation from last week\n", 112 | "this requires the row operations\n", 113 | "\n", 114 | "\\begin{align}\n", 115 | "Eq. (2) &\\leftarrow Eq. (2) - 1\\times Eq. (1)\\\\\n", 116 | "Eq. (3) &\\leftarrow Eq. (3) - 4\\times Eq. (1)\\\\\n", 117 | "Eq. (4) &\\leftarrow Eq. (4) - 5\\times Eq. (1)\\\\\n", 118 | "\\end{align}\n", 119 | "\n", 120 | "or mathematically, and for each element of the matrix (remembering that we are adding rows together - while one of\n", 121 | "the entries of a row will end up being zero, this also has the consequence of updating the rest of the values in that row, hence the iteration over $j$ below):\n", 122 | "\n", 123 | "\\begin{align}\n", 124 | "A_{2j} &\\leftarrow A_{2j} - \\frac{A_{21}}{A_{11}} A_{1j} = A_{2j} - \\frac{5}{5} \\times A_{1j}, \\quad j=1,2,3,4\\\\\n", 125 | "A_{3j} &\\leftarrow A_{3j} - \\frac{A_{31}}{A_{11}} A_{1j} = A_{3j} - \\frac{20}{5} \\times A_{1j}, \\quad j=1,2,3,4\\\\\n", 126 | "A_{4j} &\\leftarrow A_{4j} - \\frac{A_{41}}{A_{11}} A_{1j} = A_{4j} - \\frac{25}{5} \\times A_{1j}, \\quad j=1,2,3,4\\\\\n", 127 | "\\end{align}\n", 128 | "\n", 129 | "Notice that we can also write these exact operations on elements in terms of multiplication by a carefully chosen lower-triangular matrix where the non-zero's below the diagonal restricted to a single column, e.g. for the example above\n", 130 | "\n", 131 | "$$\n", 132 | " \\begin{bmatrix}\n", 133 | " {\\color{black}1} & {\\color{black}0} & {\\color{black}0} & {\\color{black}0}\\\\\n", 134 | " {\\color{Orange}{-1}} & {\\color{black}1} & {\\color{black}0} & {\\color{black}0}\\\\\n", 135 | " {\\color{Orange}{-4}} & {\\color{black}0} & {\\color{black}1} & {\\color{black}0}\\\\\n", 136 | " {\\color{Orange}{-5}} & {\\color{black}0} & {\\color{black}0} & {\\color{black}1}\\\\ \n", 137 | " \\end{bmatrix}\\qquad\\times\\qquad\\begin{bmatrix}\n", 138 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 139 | " {\\color{black}5} & {\\color{black}14} & {\\color{black}7} & {\\color{black}10}\\\\\n", 140 | " {\\color{black}20} & {\\color{black}77} & {\\color{black}41} & {\\color{black}48}\\\\\n", 141 | " {\\color{black}25} & {\\color{black}91} & {\\color{black}55} & {\\color{black}67}\\\\ \n", 142 | " \\end{bmatrix}\\qquad=\\qquad\\begin{bmatrix}\n", 143 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 144 | " {\\color{blue}{0}} & {\\color{blue}{7}} & {\\color{blue}{2}} & {\\color{blue}{1}}\\\\\n", 145 | " {\\color{blue}{0}} & {\\color{blue}{49}} & {\\color{blue}{21}} & {\\color{blue}{12}}\\\\\n", 146 | " {\\color{blue}{0}} & {\\color{blue}{56}} & {\\color{blue}{30}} & {\\color{blue}{22}}\\\\ \n", 147 | " \\end{bmatrix}\n", 148 | "$$\n", 149 | "\n", 150 | "The lower-triangular matrix (let's call this one $L_0$) is thus encoding the first step in Gaussian elimination.\n", 151 | "\n", 152 | "The next step involves taking the second row of the updated matrix as the new pivot (we will ignore partial pivoting for simplicity), and subtracting multiples of this row from those below in order to set the zeros below the diagonal in the second column to zero. \n", 153 | "\n", 154 | "This can be achieved here with the multiplication by the following lower-triangular matrix (call this one $L_1$)\n", 155 | "\n", 156 | "\\begin{equation*}\n", 157 | " \\begin{bmatrix}\n", 158 | " {\\color{black}1} & {\\color{black}0} & {\\color{black}0} & {\\color{black}0}\\\\\n", 159 | " {\\color{black}0} & {\\color{black}1} & {\\color{black}0} & {\\color{black}0}\\\\\n", 160 | " {\\color{black}0} & {\\color{Orange}{-7}} & {\\color{black}1} & {\\color{black}0}\\\\\n", 161 | " {\\color{black}0} & {\\color{Orange}{-8}} & {\\color{black}0} & {\\color{black}1}\\\\\n", 162 | " \\end{bmatrix}\\qquad\\times\\qquad\\begin{bmatrix}\n", 163 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 164 | " {\\color{black}0} & {\\color{black}7} & {\\color{black}2} & {\\color{black}1}\\\\\n", 165 | " {\\color{black}0} & {\\color{black}49} & {\\color{black}21} & {\\color{black}12}\\\\\n", 166 | " {\\color{black}0} & {\\color{black}56} & {\\color{black}30} & {\\color{black}22}\\\\\n", 167 | " \\end{bmatrix}\\qquad=\\qquad\\begin{bmatrix}\n", 168 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 169 | " {\\color{black}0} & {\\color{black}7} & {\\color{black}2} & {\\color{black}1}\\\\\n", 170 | " {\\color{black}0} & {\\color{blue}{0}} & {\\color{blue}{7}} & {\\color{blue}{5}}\\\\\n", 171 | " {\\color{black}0} & {\\color{blue}{0}} & {\\color{blue}{14}} & {\\color{blue}{14}}\\\\\n", 172 | " \\end{bmatrix}\n", 173 | "\\end{equation*}\n", 174 | "\n", 175 | "\n", 176 | "and finally for this example\n", 177 | "\n", 178 | "\\begin{equation*}\n", 179 | " \\begin{bmatrix}\n", 180 | " {\\color{black}1} & {\\color{black}0} & {\\color{black}0} & {\\color{black}0}\\\\\n", 181 | " {\\color{black}0} & {\\color{black}1} & {\\color{black}0} & {\\color{black}0}\\\\\n", 182 | " {\\color{black}0} & {\\color{black}0} & {\\color{black}1} & {\\color{black}0}\\\\\n", 183 | " {\\color{black}0} & {\\color{black}0} & {\\color{Orange}{-2}} & {\\color{black}{1}}\\\\\n", 184 | " \\end{bmatrix}\\qquad\\times\\qquad\\begin{bmatrix}\n", 185 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 186 | " {\\color{black}0} & {\\color{black}7} & {\\color{black}2} & {\\color{black}1}\\\\\n", 187 | " {\\color{black}0} & {\\color{black}0} & {\\color{black}7} & {\\color{black}5}\\\\\n", 188 | " {\\color{black}0} & {\\color{black}0} & {\\color{black}14} & {\\color{black}14}\\\\\n", 189 | " \\end{bmatrix}\\qquad=\\qquad\\begin{bmatrix}\n", 190 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 191 | " {\\color{black}0} & {\\color{black}7} & {\\color{black}2} & {\\color{black}1}\\\\\n", 192 | " {\\color{black}0} & {\\color{black}0} & {\\color{black}7} & {\\color{black}5}\\\\\n", 193 | " {\\color{black}0} & {\\color{black}0} & {\\color{blue}{0}} & {\\color{blue}{4}}\\\\\n", 194 | " \\end{bmatrix}\n", 195 | "\\end{equation*}\n", 196 | "\n", 197 | "where this lower triangular matrix we will call $L_2$, and the RHS matrix is now in upper-triangular form as we expect from Gaussian elimination (call this $U$).\n", 198 | "\n", 199 | "In summary, the above operations can be written as \n", 200 | "\n", 201 | "$$ L_2(L_1(L_0A)) = U $$\n", 202 | "\n", 203 | "(where $A$ here is the original matrix).\n", 204 | "\n", 205 | "Note that these lower-triangular matrices are examples of what is known as an *atomic* lower-triangular matrix: a special form of unitriangular matrix - the diagonals are unity, where the off-diagonal entries are all zero apart from in a single column. The inverse of such a matrix is the original with the sign of those off-diagnonals changed:\n", 206 | "\n", 207 | "$$\n", 208 | "\\left[\n", 209 | " \\begin{array}{rrrrrrrrr}\n", 210 | " 1 & 0 & \\cdots & & & & & & 0 \\\\\n", 211 | " 0 & 1 & 0 & \\cdots & & & & & 0 \\\\\n", 212 | " 0 & \\ddots & \\ddots & \\ddots & & & & & \\vdots \\\\\n", 213 | " \\vdots & \\ddots & \\ddots & \\ddots & & & & & \\\\\n", 214 | " & & & 0 & 1 & 0 & & & & \\\\ \n", 215 | " & & & 0 & l_{i+1,i} & 1 & \\ddots & & & \\\\ \n", 216 | " & & & 0 & l_{i+2,i} & 0 & \\ddots & & & \\\\ \n", 217 | " & & & \\vdots & \\vdots & \\vdots & \\ddots & & 0 & \\\\ \n", 218 | " 0 & \\cdots & & 0 & l_{n,i} & 0 & \\cdots & 0 & 1 & \\\\ \n", 219 | "\\end{array}\n", 220 | "\\right]^{-1}\n", 221 | "=\n", 222 | "\\left[\n", 223 | " \\begin{array}{rrrrrrrrr}\n", 224 | " 1 & 0 & \\cdots & & & & & & 0 \\\\\n", 225 | " 0 & 1 & 0 & \\cdots & & & & & 0 \\\\\n", 226 | " 0 & \\ddots & \\ddots & \\ddots & & & & & \\vdots \\\\\n", 227 | " \\vdots & \\ddots & \\ddots & \\ddots & & & & & \\\\\n", 228 | " & & & 0 & 1 & 0 & & & & \\\\ \n", 229 | " & & & 0 & -l_{i+1,i} & 1 & \\ddots & & & \\\\ \n", 230 | " & & & 0 & -l_{i+2,i} & 0 & \\ddots & & & \\\\ \n", 231 | " & & & \\vdots & \\vdots & \\vdots & \\ddots & & 0 & \\\\ \n", 232 | " 0 & \\cdots & & 0 & -l_{n,i} & 0 & \\cdots & 0 & 1 & \\\\ \n", 233 | "\\end{array}\n", 234 | "\\right]\n", 235 | "$$\n" 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "metadata": {}, 241 | "source": [ 242 | "### Exercise 6.1: lower-triangular matrices\n", 243 | "\n", 244 | "Convince yourselves of the following facts:\n", 245 | "\n", 246 | "* The multiplication of arbitrary lower-triangular square matrices is also lower-triangular.\n", 247 | "\n", 248 | "* $L_2(L_1(L_0A)) = U \\implies A = L_0^{-1}(L_1^{-1}(L_2^{-1}U))$\n", 249 | "\n", 250 | "* and hence that $A=LU$ where $U$ is the upper-triangular matrix found at the end of Guassian elimination, and where $L$ is the \n", 251 | "following matrix\n", 252 | "$$ L = L_0^{-1}L_1^{-1}L_2^{-1} $$\n", 253 | "\n", 254 | "* Finally, compute this product of these lower-triangular matrices to show that \n", 255 | "$$L = \n", 256 | " \\begin{bmatrix}\n", 257 | " {\\color{black}1} & {\\color{black}0} & {\\color{black}0} & {\\color{black}0}\\\\\n", 258 | " {\\color{black}{1}} & {\\color{black}1} & {\\color{black}0} & {\\color{black}0}\\\\\n", 259 | " {\\color{black}{4}} & {\\color{black}7} & {\\color{black}1} & {\\color{black}0}\\\\\n", 260 | " {\\color{black}{5}} & {\\color{black}8} & {\\color{black}2} & {\\color{black}1}\\\\ \n", 261 | " \\end{bmatrix}\n", 262 | "$$\n", 263 | "i.e. that the multiplication of these individual atomic matrices (importantly in this order) simply merges the entries from the non-zero columns of each atomic matrix, and hence is both lower-triangular, as well as trivial to compute." 264 | ] 265 | }, 266 | { 267 | "cell_type": "code", 268 | "execution_count": null, 269 | "metadata": { 270 | "collapsed": true 271 | }, 272 | "outputs": [], 273 | "source": [] 274 | }, 275 | { 276 | "cell_type": "markdown", 277 | "metadata": {}, 278 | "source": [ 279 | "## LU decomposition - implementation\n", 280 | "\n", 281 | "So we can build an LU code easily from our Gaussian elimination code. The final $U$ matrix we need here is as was already constructed through Gaussian elimination, the entries of $L$ we need are simply the ${A_{ik}}/{A_{kk}}$ multipliers we computed as part of the elimination, but threw away previously.\n", 282 | "\n", 283 | "For a given pivot row $k$, for each of these multipliers (for every row below the pivot), as we compute them we know that we are going to transform the augmented matrix in order to achieve a new zero below the diagonal - we can store each multiplier in this position before moving on to the following row, we implicitly know that the diagonals of $L$ will be unity and so don't need to store these (and noting that we don't actually have a space for them anyway!). We then move on to the following pivot row, replacing the zeros in the corresponding column we are zero'ing, but again using the now spare space to store the multipliers.\n", 284 | "\n", 285 | "For example, for the case above \n", 286 | "\n", 287 | "$$ A = \n", 288 | " \\begin{bmatrix}\n", 289 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 290 | " {\\color{black}5} & {\\color{black}14} & {\\color{black}7} & {\\color{black}10}\\\\\n", 291 | " {\\color{black}20} & {\\color{black}77} & {\\color{black}41} & {\\color{black}48}\\\\\n", 292 | " {\\color{black}25} & {\\color{black}91} & {\\color{black}55} & {\\color{black}67}\\\\ \n", 293 | " \\end{bmatrix}\\quad\\rightarrow\\quad\n", 294 | " \\begin{bmatrix}\n", 295 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 296 | " {\\color{blue}{1}} & {\\color{black}{7}} & {\\color{black}{2}} & {\\color{black}{1}}\\\\\n", 297 | " {\\color{blue}{4}} & {\\color{black}{49}} & {\\color{black}{21}} & {\\color{black}{12}}\\\\\n", 298 | " {\\color{blue}{5}} & {\\color{black}{56}} & {\\color{black}{30}} & {\\color{black}{22}}\\\\ \n", 299 | " \\end{bmatrix}\\quad\\rightarrow\\quad\n", 300 | " \\begin{bmatrix}\n", 301 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 302 | " {\\color{blue}1} & {\\color{black}7} & {\\color{black}2} & {\\color{black}1}\\\\\n", 303 | " {\\color{blue}4} & {\\color{blue}{7}} & {\\color{black}{7}} & {\\color{black}{5}}\\\\\n", 304 | " {\\color{blue}5} & {\\color{blue}{8}} & {\\color{black}{14}} & {\\color{black}{14}}\\\\\n", 305 | " \\end{bmatrix}\\quad\\rightarrow\\quad\n", 306 | " \\begin{bmatrix}\n", 307 | " {\\color{black}5} & {\\color{black}7} & {\\color{black}5} & {\\color{black}9}\\\\\n", 308 | " {\\color{blue}1} & {\\color{black}7} & {\\color{black}2} & {\\color{black}1}\\\\\n", 309 | " {\\color{blue}4} & {\\color{blue}7} & {\\color{black}7} & {\\color{black}5}\\\\\n", 310 | " {\\color{blue}5} & {\\color{blue}8} & {\\color{blue}{2}} & {\\color{black}{4}}\\\\\n", 311 | " \\end{bmatrix}\n", 312 | " = [\\color{blue}L\\backslash U]\n", 313 | "$$\n", 314 | "\n" 315 | ] 316 | }, 317 | { 318 | "cell_type": "markdown", 319 | "metadata": {}, 320 | "source": [ 321 | "### Exercise 6.2: LU decomposition\n", 322 | "\n", 323 | "Starting from your Gaussian elimination code produce a new code to compute the LU decomposition of a matrix. First, store L and U in two different matrices, then store L and U in A as in above." 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": null, 329 | "metadata": { 330 | "collapsed": true 331 | }, 332 | "outputs": [], 333 | "source": [] 334 | }, 335 | { 336 | "cell_type": "markdown", 337 | "metadata": {}, 338 | "source": [ 339 | "## Partial pivoting\n", 340 | "\n", 341 | "At the end of last week we commented that a problem could occur where the $A_{kk}$ we divide through by in the Gaussian elimination and/or back substitution algorithms might be (near) zero.\n", 342 | "\n", 343 | "Using Gaussian elimination as an example, let's again consider the algorithm mid-way working on an arbitrary matrix system, i.e. assume that the first $k$ rows have already been transformed into upper-triangular form, while the equations/rows below are not yet in this form:\n", 344 | "\n", 345 | "$$\n", 346 | "\\left[\n", 347 | " \\begin{array}{rrrrrrr|r}\n", 348 | " A_{11} & A_{12} & A_{13} & \\cdots & A_{1k} & \\cdots & A_{1n} & b_1 \\\\\n", 349 | " 0 & A_{22} & A_{23} & \\cdots & A_{2k} & \\cdots & A_{2n} & b_2 \\\\\n", 350 | " 0 & 0 & A_{33} & \\cdots & A_{3k} & \\cdots & A_{3n} & b_3 \\\\\n", 351 | " \\vdots & \\vdots & \\vdots & \\ddots & \\vdots & \\ddots & \\vdots & \\vdots \\\\\n", 352 | "\\hdashline \n", 353 | " 0 & 0 & 0 & \\cdots & A_{kk} & \\cdots & A_{kn} & b_k \\\\ \n", 354 | " \\vdots & \\vdots & \\vdots & \\ddots & \\vdots & \\ddots & \\vdots & \\vdots \\\\\n", 355 | " 0 & 0 & 0 & \\cdots & A_{nk} & \\cdots & A_{nn} & b_n \\\\\n", 356 | "\\end{array}\n", 357 | "\\right]\n", 358 | "$$\n", 359 | "\n", 360 | "Note we have drawn the horizontal dashed line one row higher, as we are not going to blindly asssume that it is wise to take the current row $k$ as the pivot row, and $A_{kk}$ as the so-called pivot element.\n", 361 | "\n", 362 | "*Partial pivoting* selects the best pivot (row or element) as the one where the $A_{ik}$ (for $i\\ge k$) value is largest (relative to the other values of components in its own row $i$), and then swaps this row with the current $k$ row.\n", 363 | "\n", 364 | "To generalise our codes above we would simply need to search for this row, and perform the row swap operation.\n", 365 | "\n", 366 | "Python's `scipy.linalg` library has its own implementation of the LU decomposition, that uses partial pivoting." 367 | ] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "execution_count": 1, 372 | "metadata": {}, 373 | "outputs": [ 374 | { 375 | "name": "stdout", 376 | "output_type": "stream", 377 | "text": [ 378 | "[[ 5. 7. 5. 9.]\n", 379 | " [ 5. 14. 7. 10.]\n", 380 | " [ 20. 77. 41. 48.]\n", 381 | " [ 25. 91. 55. 67.]]\n" 382 | ] 383 | } 384 | ], 385 | "source": [ 386 | "import numpy\n", 387 | "from scipy import linalg\n", 388 | "A=numpy.array([[ 5., 7., 5., 9.],\n", 389 | " [ 5., 14., 7., 10.],\n", 390 | " [20., 77., 41., 48.],\n", 391 | " [25., 91. ,55., 67.]])\n", 392 | "print(A)" 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 2, 398 | "metadata": {}, 399 | "outputs": [ 400 | { 401 | "name": "stdout", 402 | "output_type": "stream", 403 | "text": [ 404 | "[[ 0. 1. 0. 0.]\n", 405 | " [ 0. 0. 0. 1.]\n", 406 | " [ 0. 0. 1. 0.]\n", 407 | " [ 1. 0. 0. 0.]]\n" 408 | ] 409 | } 410 | ], 411 | "source": [ 412 | "P,L,U=linalg.lu(A)\n", 413 | "\n", 414 | "# P here is a 'permutation matrix' that performs swaps based upon partial pivoting\n", 415 | "print(P) " 416 | ] 417 | }, 418 | { 419 | "cell_type": "code", 420 | "execution_count": 3, 421 | "metadata": {}, 422 | "outputs": [ 423 | { 424 | "name": "stdout", 425 | "output_type": "stream", 426 | "text": [ 427 | "[[ 1. 0. 0. 0. ]\n", 428 | " [ 0.2 1. 0. 0. ]\n", 429 | " [ 0.8 -0.375 1. 0. ]\n", 430 | " [ 0.2 0.375 0.33333333 1. ]]\n" 431 | ] 432 | } 433 | ], 434 | "source": [ 435 | "# the lower-triangular matrix\n", 436 | "print(L) " 437 | ] 438 | }, 439 | { 440 | "cell_type": "code", 441 | "execution_count": 4, 442 | "metadata": {}, 443 | "outputs": [ 444 | { 445 | "name": "stdout", 446 | "output_type": "stream", 447 | "text": [ 448 | "[[ 25. 91. 55. 67. ]\n", 449 | " [ 0. -11.2 -6. -4.4 ]\n", 450 | " [ 0. 0. -5.25 -7.25 ]\n", 451 | " [ 0. 0. 0. 0.66666667]]\n" 452 | ] 453 | } 454 | ], 455 | "source": [ 456 | "# the upper-triangular matrix\n", 457 | "print(U) " 458 | ] 459 | }, 460 | { 461 | "cell_type": "code", 462 | "execution_count": 5, 463 | "metadata": {}, 464 | "outputs": [ 465 | { 466 | "name": "stdout", 467 | "output_type": "stream", 468 | "text": [ 469 | "[[ 5. 7. 5. 9.]\n", 470 | " [ 5. 14. 7. 10.]\n", 471 | " [ 20. 77. 41. 48.]\n", 472 | " [ 25. 91. 55. 67.]]\n" 473 | ] 474 | } 475 | ], 476 | "source": [ 477 | "# double check that P*L*U does indeed equal A\n", 478 | "print(numpy.dot(P, numpy.dot(L, U)))" 479 | ] 480 | }, 481 | { 482 | "cell_type": "markdown", 483 | "metadata": {}, 484 | "source": [ 485 | "Looking at the form of $P$ above, we can re-order the rows in advance and consider the LU decomposition of the matrix where $P=I$, as below. As we haven't bothered implementing pivoting ourselves, check that your LU implementation recreates the $A$, $L$ and $U$ below." 486 | ] 487 | }, 488 | { 489 | "cell_type": "code", 490 | "execution_count": 6, 491 | "metadata": {}, 492 | "outputs": [ 493 | { 494 | "name": "stdout", 495 | "output_type": "stream", 496 | "text": [ 497 | "[[ 25. 91. 55. 67.]\n", 498 | " [ 5. 7. 5. 9.]\n", 499 | " [ 20. 77. 41. 48.]\n", 500 | " [ 5. 14. 7. 10.]]\n" 501 | ] 502 | } 503 | ], 504 | "source": [ 505 | "import numpy\n", 506 | "from scipy import linalg\n", 507 | "A=numpy.array([[25. ,91. ,55. ,67.],\n", 508 | " [ 5., 7., 5., 9.], \n", 509 | " [20., 77., 41., 48.],\n", 510 | " [ 5., 14., 7., 10.]])\n", 511 | "print(A)" 512 | ] 513 | }, 514 | { 515 | "cell_type": "code", 516 | "execution_count": 7, 517 | "metadata": {}, 518 | "outputs": [ 519 | { 520 | "name": "stdout", 521 | "output_type": "stream", 522 | "text": [ 523 | "[[ 1. 0. 0. 0.]\n", 524 | " [ 0. 1. 0. 0.]\n", 525 | " [ 0. 0. 1. 0.]\n", 526 | " [ 0. 0. 0. 1.]]\n" 527 | ] 528 | } 529 | ], 530 | "source": [ 531 | "P,L,U=linalg.lu(A)\n", 532 | "# P now should be the identity as pivoting no longer actually actions any row swaps with this A\n", 533 | "print(P)" 534 | ] 535 | }, 536 | { 537 | "cell_type": "code", 538 | "execution_count": 8, 539 | "metadata": {}, 540 | "outputs": [ 541 | { 542 | "name": "stdout", 543 | "output_type": "stream", 544 | "text": [ 545 | "[[ 1. 0. 0. 0. ]\n", 546 | " [ 0.2 1. 0. 0. ]\n", 547 | " [ 0.8 -0.375 1. 0. ]\n", 548 | " [ 0.2 0.375 0.33333333 1. ]]\n" 549 | ] 550 | } 551 | ], 552 | "source": [ 553 | "print(L)" 554 | ] 555 | }, 556 | { 557 | "cell_type": "code", 558 | "execution_count": 9, 559 | "metadata": {}, 560 | "outputs": [ 561 | { 562 | "name": "stdout", 563 | "output_type": "stream", 564 | "text": [ 565 | "[[ 25. 91. 55. 67. ]\n", 566 | " [ 0. -11.2 -6. -4.4 ]\n", 567 | " [ 0. 0. -5.25 -7.25 ]\n", 568 | " [ 0. 0. 0. 0.66666667]]\n" 569 | ] 570 | } 571 | ], 572 | "source": [ 573 | "print(U)" 574 | ] 575 | }, 576 | { 577 | "cell_type": "code", 578 | "execution_count": 10, 579 | "metadata": {}, 580 | "outputs": [ 581 | { 582 | "name": "stdout", 583 | "output_type": "stream", 584 | "text": [ 585 | "[[ 25. 91. 55. 67.]\n", 586 | " [ 5. 7. 5. 9.]\n", 587 | " [ 20. 77. 41. 48.]\n", 588 | " [ 5. 14. 7. 10.]]\n" 589 | ] 590 | } 591 | ], 592 | "source": [ 593 | "print(numpy.dot(P, numpy.dot(L, U)))" 594 | ] 595 | }, 596 | { 597 | "cell_type": "markdown", 598 | "metadata": {}, 599 | "source": [ 600 | "### Exercise 6.3: Partial pivoting\n", 601 | "\n", 602 | "Implement partial pivoting." 603 | ] 604 | }, 605 | { 606 | "cell_type": "code", 607 | "execution_count": null, 608 | "metadata": { 609 | "collapsed": true 610 | }, 611 | "outputs": [], 612 | "source": [] 613 | } 614 | ], 615 | "metadata": { 616 | "kernelspec": { 617 | "display_name": "Python 3", 618 | "language": "python", 619 | "name": "python3" 620 | }, 621 | "language_info": { 622 | "codemirror_mode": { 623 | "name": "ipython", 624 | "version": 3 625 | }, 626 | "file_extension": ".py", 627 | "mimetype": "text/x-python", 628 | "name": "python", 629 | "nbconvert_exporter": "python", 630 | "pygments_lexer": "ipython3", 631 | "version": "3.5.4" 632 | } 633 | }, 634 | "nbformat": 4, 635 | "nbformat_minor": 1 636 | } 637 | -------------------------------------------------------------------------------- /notebook/Lecture-7-Numerical-methods-1-extra-exercises-Solutions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Numerical Methods 1\n", 8 | "### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott), [Nicolas Barral](http://www.imperial.ac.uk/people/n.barral)" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Numerical Linear Algebra: Extra exercises" 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "### Exercise 1: diagonally dominant matrices ($*$)\n", 23 | "\n", 24 | "A matrix $A$ is said to be diagonally dominant if for each row $i$ the absolute value of the diagonal element is larger than the sum of the absolute values of all the other terms of the row.\n", 25 | "\n", 26 | "- write this definition in a mathematical form.\n", 27 | "- write a code that checks if a matrix is diagonally dominant.\n", 28 | "- test it with well chosen 2x2 and 3x3 examples.\n" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "*Def*: $A = (A_{i,j})_{i=1..n, j=1..n}$ is diagonally dominant if: \n", 36 | "$$\\forall i \\in [|1,n|]\\,,\\quad \\sum_{\\substack{j=1\\\\j\\neq i}}^n |A_{i,j}| \\leq |A_{i,i}|$$" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 1, 42 | "metadata": {}, 43 | "outputs": [ 44 | { 45 | "name": "stdout", 46 | "output_type": "stream", 47 | "text": [ 48 | "A1 is diagonally dominant\n", 49 | "A2 is diagonally dominant\n", 50 | "A3 is diagonally dominant\n", 51 | "!! Row 0 is not diagonally dominant\n" 52 | ] 53 | } 54 | ], 55 | "source": [ 56 | "import numpy as np\n", 57 | "\n", 58 | "def is_diag_dom(A):\n", 59 | " \n", 60 | " (n,m) = A.shape\n", 61 | " if n != m: \n", 62 | " print(\"Error: matrix must be square, not %dx%d\" % (n,m))\n", 63 | " exit(1)\n", 64 | " \n", 65 | " for i in range(n):\n", 66 | " abs_row = np.sum(np.abs(A[i,0:i])) + np.sum(np.abs(A[i,i+1:n]))\n", 67 | " if abs_row > abs(A[i,i]):\n", 68 | " print(\"!! Row %d is not diagonally dominant\" %i)\n", 69 | " return False\n", 70 | " \n", 71 | " return True\n", 72 | "\n", 73 | "\n", 74 | "A1 = np.array([[2,1],[2,3]])\n", 75 | "if is_diag_dom(A1): \n", 76 | " print(\"A1 is diagonally dominant\")\n", 77 | " \n", 78 | "A2 = np.array([[-2,1],[2,3]])\n", 79 | "if is_diag_dom(A2): \n", 80 | " print(\"A2 is diagonally dominant\")\n", 81 | " \n", 82 | "A3 = np.array([[-2,2],[2,3]])\n", 83 | "if is_diag_dom(A3): \n", 84 | " print(\"A3 is diagonally dominant\")\n", 85 | " \n", 86 | "A4 = np.array([[-2,4],[2,3]])\n", 87 | "if is_diag_dom(A4): \n", 88 | " print(\"A4 is diagonally dominant\")" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "### Exercise 2: singular matrices and ill-conditioning ($*$)\n", 96 | "\n", 97 | "For the following matrixes, compute the determinant and the condition number, and classify them as singular, ill conditioned or well conditioned:\n", 98 | "$$ (i)\\quad A = \n", 99 | " \\begin{pmatrix}\n", 100 | " 1 & 2 & 3 \\\\\n", 101 | " 2 & 3 & 4 \\\\\n", 102 | " 3 & 4 & 5 \\\\\n", 103 | " \\end{pmatrix}\n", 104 | "\\quad\\quad\\quad\\quad\n", 105 | "(ii)\\quad A = \n", 106 | " \\begin{pmatrix}\n", 107 | " 2.11 & -0.80 & 1.72 \\\\\n", 108 | " -1.84 & 3.03 & 1.29 \\\\\n", 109 | " -1.57 & 5.25 & 4.30 \\\\\n", 110 | " \\end{pmatrix}\n", 111 | "$$\n", 112 | "$$ (iii)\\quad A = \n", 113 | " \\begin{pmatrix}\n", 114 | " 2 & -1 & 0 \\\\\n", 115 | " -1 & 2 & -1 \\\\\n", 116 | " 0 & -1 & 2 \\\\\n", 117 | " \\end{pmatrix}\n", 118 | "\\quad\\quad\\quad\\quad\n", 119 | "(iv)\\quad A = \n", 120 | " \\begin{pmatrix}\n", 121 | " 4 & 3 & -1 \\\\\n", 122 | " 7 & -2 & 3 \\\\\n", 123 | " 5 & -18 & 13 \\\\\n", 124 | " \\end{pmatrix}\\,.\n", 125 | "$$\n" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 2, 131 | "metadata": {}, 132 | "outputs": [ 133 | { 134 | "name": "stdout", 135 | "output_type": "stream", 136 | "text": [ 137 | "determinant: -7.40148683083e-17 condition number: 4.06529463355e+16\n", 138 | "determinant: 0.058867 condition number: 3218.33254148\n", 139 | "determinant: 4.0 condition number: 5.82842712475\n", 140 | "determinant: 290.0 condition number: 10.2951336645\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "A = np.array([[1.,2.,3.],[2.,3.,4.],[3.,4.,5.]])\n", 146 | "print(\"determinant: \",np.linalg.det(A), \" condition number: \", np.linalg.cond(A))\n", 147 | "# => singular\n", 148 | "\n", 149 | "A = np.array([[2.11,-0.8,1.72],[-1.84,3.03,1.29],[-1.57,5.25,4.30]])\n", 150 | "print(\"determinant: \",np.linalg.det(A), \" condition number: \", np.linalg.cond(A))\n", 151 | "# => ill conditioned\n", 152 | "\n", 153 | "A = np.array([[2,-1,0],[-1,2,-1],[0,-1,2]])\n", 154 | "print(\"determinant: \",np.linalg.det(A), \" condition number: \", np.linalg.cond(A))\n", 155 | "# => well conditioned\n", 156 | "\n", 157 | "A = np.array([[4,3,-1],[7,-2,3],[5,-18,3]])\n", 158 | "print(\"determinant: \",np.linalg.det(A), \" condition number: \", np.linalg.cond(A))\n", 159 | "# => well conditioned\n" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "### Exercise 3: Hilbert matrices ($**$) \n", 167 | "\n", 168 | "The *Hilbert matrix* is a classic example of ill-conditioned matrix:\n", 169 | "\n", 170 | "$$\n", 171 | "A = \n", 172 | " \\begin{pmatrix}\n", 173 | " 1 & 1/2 & 1/3 & \\cdots \\\\\n", 174 | " 1/2 & 1/3 & 1/4 & \\cdots \\\\\n", 175 | " 1/3 & 1/4 & 1/5 & \\cdots \\\\\n", 176 | " \\vdots & \\vdots & \\vdots & \\ddots \\\\\n", 177 | "\\end{pmatrix}\\,.\n", 178 | "$$\n", 179 | "\n", 180 | "Let's consider the linear system $A\\pmb{x}=\\pmb{b}$ where \n", 181 | "$$ b_i = \\sum_{j=1}^n A_{ij},\\quad \\textrm{for}\\quad i=1,2,\\ldots, n.$$\n", 182 | "\n", 183 | " - How can you write entry $A_{ij}$ for any $i$ and $j$ ?\n", 184 | " - Convince yourself by pen and paper that $ \\pmb{x} = \\left[ 1, 1, \\cdots 1\\right]^T$ is the solution of the system.\n", 185 | " - Write a function that returns $A$ and $b$ for a given $n$.\n", 186 | " - For a range of $n$, compute the condition number of $A$, solve the linear system and compute the error ($err = \\sum_{i=1}^n \\left|x_{computed, i}-x_{exact, i}\\right|$). What do you observe ?" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 3, 192 | "metadata": {}, 193 | "outputs": [ 194 | { 195 | "name": "stdout", 196 | "output_type": "stream", 197 | "text": [ 198 | "0.0\n", 199 | "7.77156117238e-16\n", 200 | "2.09832151654e-14\n", 201 | "6.70352662269e-13\n", 202 | "2.11604067601e-11\n", 203 | "1.44516076883e-09\n", 204 | "3.66414778519e-08\n", 205 | "8.40174469996e-07\n", 206 | "3.85210817418e-05\n", 207 | "0.000201421110662\n", 208 | "0.0491608428805\n", 209 | "2.15017646028\n", 210 | "33.4805161651\n", 211 | "82.1156811794\n", 212 | "57.8465442914\n", 213 | "50.3157179178\n", 214 | "184.669016639\n", 215 | "717.132665317\n", 216 | "85.0752753201\n", 217 | "132.220624402\n", 218 | "205.685184874\n", 219 | "558.112735072\n", 220 | "1494.06697171\n", 221 | "251.210568653\n", 222 | "474.557513307\n", 223 | "3568.99989388\n", 224 | "473.777975789\n", 225 | "966.807705233\n", 226 | "1962.26407119\n" 227 | ] 228 | } 229 | ], 230 | "source": [ 231 | "import numpy as np\n", 232 | "\n", 233 | "def hilbert(n):\n", 234 | " A = np.zeros((n,n))\n", 235 | " b = np.zeros(n)\n", 236 | " for i in range(n):\n", 237 | " for j in range(n):\n", 238 | " A[i,j] = 1./(i+j+1)\n", 239 | " b[i] = np.sum(A[i,:])\n", 240 | " return A,b\n", 241 | "\n", 242 | "for n in range(1,30):\n", 243 | " A,b = hilbert(n)\n", 244 | " x = np.linalg.solve(A,b)\n", 245 | " x_exact = np.ones(n)\n", 246 | " error = np.sum(abs(x-x_exact))\n", 247 | " print(error)\n", 248 | " \n", 249 | "# Error between numerical solution and exact solution is large and varies a lot for n > 14 => ill conditioned" 250 | ] 251 | }, 252 | { 253 | "cell_type": "markdown", 254 | "metadata": {}, 255 | "source": [ 256 | "### Exercise 4: Vandermonde matrices ($**$) \n", 257 | "\n", 258 | "A *Vandermonde matrix* is defined as follows, for any $\\alpha_1, \\dots, \\alpha_n$ real numbers:\n", 259 | "$$V=\\begin{pmatrix}\n", 260 | "1 & \\alpha_1 & {\\alpha_1}^2 & \\dots & {\\alpha_1}^{n-1}\\\\\n", 261 | "1 & \\alpha_2 & {\\alpha_2}^2 & \\dots & {\\alpha_2}^{n-1}\\\\\n", 262 | "1 & \\alpha_3 & {\\alpha_3}^2 & \\dots & {\\alpha_3}^{n-1}\\\\\n", 263 | "\\vdots & \\vdots & \\vdots & &\\vdots \\\\\n", 264 | "1 & \\alpha_n & {\\alpha_n}^2 & \\dots & {\\alpha_n}^{n-1}\\\\\n", 265 | "\\end{pmatrix}$$\n", 266 | "\n", 267 | " - Write a function that takes a real number $\\alpha$ and an integer $n$ as input, and returns a **vector** $v = \\left(1, \\alpha, \\alpha^2, \\dots, \\alpha^{n-1}\\right)$\n", 268 | " - Using this function, write a function that takes a vector $a = \\left(\\alpha_1, \\alpha_2, \\dots, \\alpha_n\\right)$ as input and returns the corresponding Vandermonde matrix.\n", 269 | " - For different sets of randomly chosen $(\\alpha_i)$, compute the determinant of the corresponding Vandermonde matrix. What does it tell us regarding the matrix conditioning ?\n" 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": 4, 275 | "metadata": {}, 276 | "outputs": [ 277 | { 278 | "data": { 279 | "text/plain": [ 280 | "287.99999999999676" 281 | ] 282 | }, 283 | "execution_count": 4, 284 | "metadata": {}, 285 | "output_type": "execute_result" 286 | } 287 | ], 288 | "source": [ 289 | "import scipy.linalg as la\n", 290 | "from pprint import pprint\n", 291 | "\n", 292 | "def vdm_row(alpha,n):\n", 293 | " row = np.zeros(n)\n", 294 | " cur_alpha = 1\n", 295 | " for i in range(n):\n", 296 | " row[i] = cur_alpha\n", 297 | " cur_alpha *= alpha\n", 298 | " return row\n", 299 | "\n", 300 | "def vdm(alpha_vec):\n", 301 | " n = alpha_vec.size\n", 302 | " A = np.zeros((n,n))\n", 303 | " for i in range(n):\n", 304 | " A[i,:] = vdm_row(alpha_vec[i],n)\n", 305 | " return A\n", 306 | "\n", 307 | "alphas = np.array([1,2,3, 4, 5])\n", 308 | "V = vdm(alphas)\n", 309 | "\n", 310 | "la.det(V)\n", 311 | "\n", 312 | "# det is large compared to matruix entries = not very well conditioned" 313 | ] 314 | }, 315 | { 316 | "cell_type": "markdown", 317 | "metadata": {}, 318 | "source": [ 319 | "### Exercise 5: LU solve ($**$) \n", 320 | "\n", 321 | "Write a function that solves a linear system $A\\pmb{x}=\\pmb{b}$ using the LU decomposition method.\n", 322 | "\n", 323 | "Hint: you can re-use the function you have written in lecture 6, or use the built-in function *linalg.lu* to compute the LU decomposition. Write code that performs the forward substitution and backward substitution. Compare your result to the one given by *linalg.solve*.\n" 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": 5, 329 | "metadata": {}, 330 | "outputs": [], 331 | "source": [ 332 | "def LU_dec(A):\n", 333 | " # upper triangular matrix contains gaussian elimination result\n", 334 | " # we won't change A in-place but create a local copy\n", 335 | " A = A.copy()\n", 336 | " m, n = A.shape\n", 337 | " # lower triangular matrix has identity diagonal and scaling factors\n", 338 | " L = np.identity(n)\n", 339 | " # Loop over each pivot row.\n", 340 | " for k in range(n):\n", 341 | " # Loop over each equation below the pivot row.\n", 342 | " for i in range(k+1, n):\n", 343 | " # Define the scaling factor outside the innermost\n", 344 | " # loop otherwise its value gets changed.\n", 345 | " s = (A[i,k]/A[k,k])\n", 346 | " for j in range(k, n):\n", 347 | " A[i,j] = A[i,j] - s*A[k,j]\n", 348 | " # scaling factors make up the lower matrix \n", 349 | " L[i,k] = s\n", 350 | " # A now is the upper triangular matrix U\n", 351 | " return L, A\n", 352 | "\n", 353 | "# This function assumes that A is already an upper triangular matrix.\n", 354 | "def backward_substitution(A, b):\n", 355 | " n = np.size(b)\n", 356 | " \n", 357 | " x = np.zeros(n)\n", 358 | " for k in range(n-1, -1, -1):\n", 359 | " s = 0.\n", 360 | " for j in range(k+1, n):\n", 361 | " s = s + A[k, j]*x[j]\n", 362 | " x[k] = (b[k] - s)/A[k, k]\n", 363 | " \n", 364 | " return x\n", 365 | "\n", 366 | "\n", 367 | "# This function assumes that A is already a lower triangular matrix.\n", 368 | "def forward_substitution(A, b):\n", 369 | " n = np.size(b)\n", 370 | " \n", 371 | " x = np.zeros(n)\n", 372 | " for k in range(n):\n", 373 | " s = 0.\n", 374 | " for j in range(k):\n", 375 | " s = s + A[k, j]*x[j]\n", 376 | " x[k] = (b[k] - s)/A[k, k]\n", 377 | " \n", 378 | " return x" 379 | ] 380 | }, 381 | { 382 | "cell_type": "markdown", 383 | "metadata": {}, 384 | "source": [ 385 | "*Reminder*: if $A = LU$ then:\n", 386 | "$$ A {\\bf x} = {\\bf b} \\Leftrightarrow LU{\\bf x} = {\\bf b} \\Leftrightarrow L(U{\\bf x}) = {\\bf b} \\Leftrightarrow \\left\\{\\begin{align*}L{\\bf y} = {\\bf b} \\quad (1) \\\\ U{\\bf x} = {\\bf y} \\quad (2) \\end{align*} \\right. $$\n", 387 | "Hence the algorithm to solve $A {\\bf x} = {\\bf b}$: first, solve (1) with forward substitution, then solve (2) with backward substitution." 388 | ] 389 | }, 390 | { 391 | "cell_type": "code", 392 | "execution_count": 6, 393 | "metadata": {}, 394 | "outputs": [ 395 | { 396 | "name": "stdout", 397 | "output_type": "stream", 398 | "text": [ 399 | "True\n" 400 | ] 401 | } 402 | ], 403 | "source": [ 404 | "def LU_solve(A,b):\n", 405 | " \n", 406 | " L,U = LU_dec(A)\n", 407 | " y = forward_substitution(L,b)\n", 408 | " x = backward_substitution(U,y)\n", 409 | " \n", 410 | " return x\n", 411 | "\n", 412 | "\n", 413 | "A = np.array([[1.,0.,3.,7.],[2.,1.,0.,4.],[5.,4.,1.,-2.],[4.,1.,6.,2.]])\n", 414 | "b = np.array([1.,2.,-3.,2.])\n", 415 | "\n", 416 | "# Always check that our implemtation produces the right result\n", 417 | "print(np.allclose(np.linalg.solve(A,b), LU_solve(A,b)))" 418 | ] 419 | }, 420 | { 421 | "cell_type": "markdown", 422 | "metadata": {}, 423 | "source": [ 424 | "### Exercise 6: Gauss-Seidel relaxation ($***$)\n", 425 | "\n", 426 | "Convergence of the Gauss-Seidel method can be improved by a technique known\n", 427 | "as relaxation. The idea is to take the new value of x i as a weighted average of its previous value and the value predicted by the regular Gauss-Seidel iteration. \n", 428 | "\n", 429 | "The corresponding formula for the $k^{th}$ iteration of the algorithm and the $i^{th}$ row is:\n", 430 | "\n", 431 | "$$x_i^{(k)} = \\frac{\\omega}{A_{ii}}\\left(b_i- \\sum_{\\substack{j=1}}^{i-1} A_{ij}x_j^{(k)} - \\sum_{\\substack{j=i+1}}^n A_{ij}x_j^{(k-1)}\\right) + (1-\\omega)x_i^{(k-1)},\\quad i=1,2,\\ldots, n.$$\n", 432 | "\n", 433 | "where the weight $\\omega$ is called the relaxation factor and is usually positive.\n", 434 | "\n", 435 | "- What does the algorithm give for $\\omega = 0$ ? for $\\omega = 1$ ? for $0 < \\omega < 1$ ? When $\\omega > 1$, the method is called \"over-relaxation\".\n", 436 | "- Write a function that solves a system with the relaxed Gauss-Seidel's algorithm, for a given $\\omega$.\n", 437 | "- Use this function to solve the system from Lecture 7, for different values of $\\omega$. How many iterations are necessary to reach a tolerance of $10^{-6}$ for each value of $\\omega$ ?\n", 438 | "\n", 439 | "\n" 440 | ] 441 | }, 442 | { 443 | "cell_type": "code", 444 | "execution_count": 7, 445 | "metadata": {}, 446 | "outputs": [ 447 | { 448 | "name": "stdout", 449 | "output_type": "stream", 450 | "text": [ 451 | "For omega=0.50 48 relaxed GS iterations are needed to reach a tolerance of 1e-6\n", 452 | "For omega=0.90 18 relaxed GS iterations are needed to reach a tolerance of 1e-6\n", 453 | "For omega=0.95 16 relaxed GS iterations are needed to reach a tolerance of 1e-6\n", 454 | "For omega=1.00 14 relaxed GS iterations are needed to reach a tolerance of 1e-6\n", 455 | "For omega=1.10 12 relaxed GS iterations are needed to reach a tolerance of 1e-6\n", 456 | "For omega=1.50 29 relaxed GS iterations are needed to reach a tolerance of 1e-6\n" 457 | ] 458 | } 459 | ], 460 | "source": [ 461 | "A = np.array([[10., 2., 3., 5.],\n", 462 | " [1., 14., 6., 2.],\n", 463 | " [-1., 4., 16.,-4],\n", 464 | " [5. ,4. ,3. ,11.]])\n", 465 | "b = np.array([1., 2., 3., 4.])\n", 466 | "\n", 467 | "def gauss_seidel_rel_V1(A, b, omega, maxit=500, tol=1.e-6):\n", 468 | " m, n = A.shape\n", 469 | " x = np.zeros_like(b)\n", 470 | " for k in range(maxit):\n", 471 | " for i in range(m):\n", 472 | " x[i] = omega/A[i,i] * (b[i] - np.dot(A[i,:i], x[:i]) - np.dot(A[i,i+1:], x[i+1:])) + (1-omega)*x[i]\n", 473 | " residual = np.linalg.norm(np.dot(A, x) - b)\n", 474 | "# print(\"iteration: %d residual: %e\" %(k,residual))\n", 475 | " if (residual < tol): break\n", 476 | " \n", 477 | " return x,k\n", 478 | "\n", 479 | "# for omega = 0: nothing happens and the system is not solved\n", 480 | "# for omega = 0: standard Gauss-Seidel\n", 481 | "# for 0 < omega < 1: the new x is taken as an average of the old x and the new one. \n", 482 | "# Instead of moving directly from x^(k) to the x^(k+1) given by GS, you stop somewhere in the middle\n", 483 | "# ==> usually slows-down the convergence\n", 484 | "\n", 485 | "for omega in [0.5, 0.9, 0.95, 1, 1.1, 1.5]:\n", 486 | " x,i = gauss_seidel_rel_V1(A, b, omega)\n", 487 | " print(\"For omega=%1.2f %d relaxed GS iterations are needed to reach a tolerance of 1e-6\" % (omega,i))" 488 | ] 489 | }, 490 | { 491 | "cell_type": "markdown", 492 | "metadata": {}, 493 | "source": [ 494 | "$\\omega$ cannot be determined beforehand for an arbitrary system, \n", 495 | "however, an estimate can be computed during run time. \n", 496 | "\n", 497 | "Let $\\Delta_x^{(k)} = | x^{(k)} - x^{(k-1)} |$ be the magnitude of the change in x during the $k^{th}$ iteration. \n", 498 | "If $k$ is sufficiently large (say $k \\geq 5$), it can be shown that an approximation of the optimal value of \\omega is:\n", 499 | "$$\n", 500 | "\\omega_{opt} \\approx \\frac{2}{1+\\sqrt{1-\\Delta x^{(k+1)} / \\Delta x^{(k)}}} \\,.\n", 501 | "$$\n", 502 | "\n", 503 | "The relaxed Gauss-Seidel algorithm can be summarised as follows: \n", 504 | "Carry out $k$ iterations with $\\omega = 1$ (usually $k=10$ for big systems) \n", 505 | "Record \t$\\Delta x^{(k)}$ \n", 506 | "Perform an additional iteration \n", 507 | "Record \t$\\Delta x^{(k+1)}$ \n", 508 | "Compute $\\omega_{opt}$ \n", 509 | "Perform all subsequent iterations with $\\omega = \\omega_{opt}$\n", 510 | "\n", 511 | "\n", 512 | " - Modify previous function to compute automatically the relaxation parameter $\\omega$. Compute $\\omega_{opt}$ after $k=6$ iterations as the system is small.\n", 513 | " - Solve the previous system with this new function. What is the value of $\\omega$ ? How many iterations are necessary to reach a tolerance of $10^{-6}$ ?\n", 514 | " \n", 515 | " \n" 516 | ] 517 | }, 518 | { 519 | "cell_type": "code", 520 | "execution_count": 8, 521 | "metadata": {}, 522 | "outputs": [ 523 | { 524 | "name": "stdout", 525 | "output_type": "stream", 526 | "text": [ 527 | "Omega given by the formula after 6 iterations: 1.100. 12 relaxed GS iterations are needed to reach a tolerance of 1e-6\n" 528 | ] 529 | } 530 | ], 531 | "source": [ 532 | "from math import sqrt\n", 533 | "\n", 534 | "def gauss_seidel_rel_V2(A, b, k_relax=10, omega_ini=1., maxit=500, tol=1.e-6):\n", 535 | " m, n = A.shape\n", 536 | " x = np.zeros_like(b)\n", 537 | " omega = omega_ini\n", 538 | " for k in range(maxit):\n", 539 | " for i in range(m):\n", 540 | " x[i] = omega/A[i,i] * (b[i] - np.dot(A[i,:i], x[:i]) - np.dot(A[i,i+1:], x[i+1:])) + (1-omega)*x[i]\n", 541 | " residual = np.linalg.norm(np.dot(A, x) - b)\n", 542 | "# print(\"iteration: %d residual: %e\" %(k,residual))\n", 543 | " if (residual < tol): break\n", 544 | " \n", 545 | " if (k == k_relax): \n", 546 | " res_prev = residual\n", 547 | " if (k == k_relax+1): \n", 548 | " res = residual\n", 549 | " omega = 2/(1+sqrt(1-res/res_prev))\n", 550 | " return x,k,omega\n", 551 | "\n", 552 | "k = 6\n", 553 | "x,i,omega = gauss_seidel_rel_V2(A, b, k_relax=k)\n", 554 | "print(\"Omega given by the formula after %d iterations: %1.3f. %d relaxed GS iterations are needed to reach a tolerance of 1e-6\" % (k,omega,i))" 555 | ] 556 | }, 557 | { 558 | "cell_type": "markdown", 559 | "metadata": {}, 560 | "source": [ 561 | "#### A bigger example\n", 562 | "\n", 563 | "Let's consider $A\\pmb{x}=\\pmb{b}$ where:\n", 564 | "\n", 565 | "$$\n", 566 | "A = \\begin{pmatrix}\n", 567 | "5 & -2 & 0 & 0 & \\cdots & 0 \\\\\n", 568 | "-2 & 5 & -2 & 0 & \\cdots & 0 \\\\\n", 569 | "0 & -2 & 5 & -2 & \\cdots & 0 \\\\\n", 570 | "\\vdots & & & \\ddots & & \\vdots \\\\ \n", 571 | " & & & & 5 & -2 \\\\\n", 572 | "0 & \\cdots & & & -2 & 5 \\\\ \n", 573 | "\\end{pmatrix}\n", 574 | "$$\n", 575 | "and\n", 576 | "$$\n", 577 | "b = \\left(0, 0, \\cdots 0, 1000 \\right)^T\n", 578 | "$$\n", 579 | "\n", 580 | " - Solve $A\\pmb{x}=\\pmb{b}$ using the relaxed Gauss-Seidel algorithm for $n=3000$. Compare the number of iterations with the algorithm without relaxation." 581 | ] 582 | }, 583 | { 584 | "cell_type": "code", 585 | "execution_count": 9, 586 | "metadata": {}, 587 | "outputs": [ 588 | { 589 | "name": "stdout", 590 | "output_type": "stream", 591 | "text": [ 592 | "Without relaxation (omega=1.000) 44 iterations are required to reach a tolerance of 1e-6\n", 593 | "With relaxation (omega=1.250) 31 iterations are required to reach a tolerance of 1e-6\n" 594 | ] 595 | } 596 | ], 597 | "source": [ 598 | "n = 3000\n", 599 | "A_big = 5*np.eye(n)\n", 600 | "for i in range(n-1):\n", 601 | " A_big[i,i+1] = -2.\n", 602 | " A_big[i+1,i] = -2.\n", 603 | "b_big = np.zeros(n)\n", 604 | "b_big[n-1] = 1000.\n", 605 | "\n", 606 | "x,i,omega = gauss_seidel_rel_V2(A_big, b_big, k_relax=-2)\n", 607 | "print(\"Without relaxation (omega=%1.3f) %d iterations are required to reach a tolerance of 1e-6\" % (omega,i))\n", 608 | "\n", 609 | "x,i,omega = gauss_seidel_rel_V2(A_big, b_big, k_relax=10)\n", 610 | "print(\"With relaxation (omega=%1.3f) %d iterations are required to reach a tolerance of 1e-6\" % (omega,i))\n", 611 | "\n" 612 | ] 613 | } 614 | ], 615 | "metadata": { 616 | "kernelspec": { 617 | "display_name": "Python 3", 618 | "language": "python", 619 | "name": "python3" 620 | }, 621 | "language_info": { 622 | "codemirror_mode": { 623 | "name": "ipython", 624 | "version": 3 625 | }, 626 | "file_extension": ".py", 627 | "mimetype": "text/x-python", 628 | "name": "python", 629 | "nbconvert_exporter": "python", 630 | "pygments_lexer": "ipython3", 631 | "version": "3.6.2" 632 | } 633 | }, 634 | "nbformat": 4, 635 | "nbformat_minor": 1 636 | } 637 | -------------------------------------------------------------------------------- /notebook/Lecture-7-Numerical-methods-1-extra-exercises.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Numerical Methods 1\n", 8 | "### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott), [Nicolas Barral](http://www.imperial.ac.uk/people/n.barral)" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "# Numerical Linear Algebra: Extra exercises" 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "### Exercise 1: diagonally dominant matrices ($*$)\n", 23 | "\n", 24 | "A matrix $A$ is said to be diagonally dominant if for each row $i$ the absolute value of the diagonal element is larger than the sum of the absolute values of all the other terms of the row.\n", 25 | "\n", 26 | "- write this definition in a mathematical form.\n", 27 | "- write a code that checks if a matrix is diagonally dominant.\n", 28 | "- test it with well chosen 2x2 and 3x3 examples.\n" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": { 35 | "collapsed": true 36 | }, 37 | "outputs": [], 38 | "source": [] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "### Exercise 2: singular matrices and ill-conditioning ($*$)\n", 45 | "\n", 46 | "For the following matrixes, compute the determinant and the condition number, and classify them as singular, ill conditioned or well conditioned:\n", 47 | "$$ (i)\\quad A = \n", 48 | " \\begin{pmatrix}\n", 49 | " 1 & 2 & 3 \\\\\n", 50 | " 2 & 3 & 4 \\\\\n", 51 | " 3 & 4 & 5 \\\\\n", 52 | " \\end{pmatrix}\n", 53 | "\\quad\\quad\\quad\\quad\n", 54 | "(ii)\\quad A = \n", 55 | " \\begin{pmatrix}\n", 56 | " 2.11 & -0.80 & 1.72 \\\\\n", 57 | " -1.84 & 3.03 & 1.29 \\\\\n", 58 | " -1.57 & 5.25 & 4.30 \\\\\n", 59 | " \\end{pmatrix}\n", 60 | "$$\n", 61 | "$$ (iii)\\quad A = \n", 62 | " \\begin{pmatrix}\n", 63 | " 2 & -1 & 0 \\\\\n", 64 | " -1 & 2 & -1 \\\\\n", 65 | " 0 & -1 & 2 \\\\\n", 66 | " \\end{pmatrix}\n", 67 | "\\quad\\quad\\quad\\quad\n", 68 | "(iv)\\quad A = \n", 69 | " \\begin{pmatrix}\n", 70 | " 4 & 3 & -1 \\\\\n", 71 | " 7 & -2 & 3 \\\\\n", 72 | " 5 & -18 & 13 \\\\\n", 73 | " \\end{pmatrix}\\,.\n", 74 | "$$\n" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": { 81 | "collapsed": true 82 | }, 83 | "outputs": [], 84 | "source": [] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": {}, 89 | "source": [ 90 | "### Exercise 3: Hilbert matrices ($**$) \n", 91 | "\n", 92 | "The *Hilbert matrix* is a classic example of ill-conditioned matrix:\n", 93 | "\n", 94 | "$$\n", 95 | "A = \n", 96 | " \\begin{pmatrix}\n", 97 | " 1 & 1/2 & 1/3 & \\cdots \\\\\n", 98 | " 1/2 & 1/3 & 1/4 & \\cdots \\\\\n", 99 | " 1/3 & 1/4 & 1/5 & \\cdots \\\\\n", 100 | " \\vdots & \\vdots & \\vdots & \\ddots \\\\\n", 101 | "\\end{pmatrix}\\,.\n", 102 | "$$\n", 103 | "\n", 104 | "Let's consider the linear system $A\\pmb{x}=\\pmb{b}$ where \n", 105 | "$$ b_i = \\sum_{j=1}^n A_{ij},\\quad \\textrm{for}\\quad i=1,2,\\ldots, n.$$\n", 106 | "\n", 107 | " - How can you write entry $A_{ij}$ for any $i$ and $j$ ?\n", 108 | " - Convince yourself by pen and paper that $ \\pmb{x} = \\left[ 1, 1, \\cdots 1\\right]^T$ is the solution of the system.\n", 109 | " - Write a function that returns $A$ and $b$ for a given $n$.\n", 110 | " - For a range of $n$, compute the condition number of $A$, solve the linear system and compute the error ($err = \\sum_{i=1}^n \\left|x_{computed, i}-x_{exact, i}\\right|$). What do you observe ?" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": {}, 117 | "outputs": [], 118 | "source": [] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "### Exercise 4: Vandermonde matrices ($**$) \n", 125 | "\n", 126 | "A *Vandermonde matrix* is defined as follows, for any $\\alpha_1, \\dots, \\alpha_n$ real numbers:\n", 127 | "$$V=\\begin{pmatrix}\n", 128 | "1 & \\alpha_1 & {\\alpha_1}^2 & \\dots & {\\alpha_1}^{n-1}\\\\\n", 129 | "1 & \\alpha_2 & {\\alpha_2}^2 & \\dots & {\\alpha_2}^{n-1}\\\\\n", 130 | "1 & \\alpha_3 & {\\alpha_3}^2 & \\dots & {\\alpha_3}^{n-1}\\\\\n", 131 | "\\vdots & \\vdots & \\vdots & &\\vdots \\\\\n", 132 | "1 & \\alpha_n & {\\alpha_n}^2 & \\dots & {\\alpha_n}^{n-1}\\\\\n", 133 | "\\end{pmatrix}$$\n", 134 | "\n", 135 | " - Write a function that takes a real number $\\alpha$ and an integer $n$ as input, and returns a **vector** $v = \\left(1, \\alpha, \\alpha^2, \\dots, \\alpha^{n-1}\\right)$\n", 136 | " - Using this function, write a function that takes a vector $a = \\left(\\alpha_1, \\alpha_2, \\dots, \\alpha_n\\right)$ as input and returns the corresponding Vandermonde matrix.\n", 137 | " - For different sets of randomly chosen $(\\alpha_i)$, compute the determinant of the corresponding Vandermonde matrix. What does it tell us regarding the matrix conditioning ?\n" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "### Exercise 5: LU solve ($**$) \n", 152 | "\n", 153 | "Write a function that solves a linear system $A\\pmb{x}=\\pmb{b}$ using the LU decomposition method.\n", 154 | "\n", 155 | "Hint: you can re-use the function you have written in lecture 6, or use the built-in function *linalg.lu* to compute the LU decomposition. Write code that performs the forward substitution and backward substitution. Compare your result to the one given by *linalg.solve*.\n" 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "metadata": {}, 161 | "source": [ 162 | "### Exercise 6: Gauss-Seidel relaxation ($***$)\n", 163 | "\n", 164 | "Convergence of the Gauss-Seidel method can be improved by a technique known\n", 165 | "as relaxation. The idea is to take the new value of x i as a weighted average of its previous value and the value predicted by the regular Gauss-Seidel iteration. \n", 166 | "\n", 167 | "The corresponding formula for the $k^{th}$ iteration of the algorithm and the $i^{th}$ row is:\n", 168 | "\n", 169 | "$$x_i^{(k)} = \\frac{\\omega}{A_{ii}}\\left(b_i- \\sum_{\\substack{j=1}}^{i-1} A_{ij}x_j^{(k)} - \\sum_{\\substack{j=i+1}}^n A_{ij}x_j^{(k-1)}\\right) + (1-\\omega)x_i^{(k-1)},\\quad i=1,2,\\ldots, n.$$\n", 170 | "\n", 171 | "where the weight $\\omega$ is called the relaxation factor and is usually positive.\n", 172 | "\n", 173 | "- What does the algorithm give for $\\omega = 0$ ? for $\\omega = 1$ ? for $0 < \\omega < 1$ ? When $\\omega > 1$, the method is called \"over-relaxation\".\n", 174 | "- Write a function that solves a system with the relaxed Gauss-Seidel's algorithm, for a given $\\omega$.\n", 175 | "- Use this function to solve the system from Lecture 7, for different values of $\\omega$. How many iterations are necessary to reach a tolerance of $10^{-6}$ for each value of $\\omega$ ?\n", 176 | "\n", 177 | "$\\omega$ cannot be determined beforehand for an arbitrary system, \n", 178 | "however, an estimate can be computed during run time. \n", 179 | "\n", 180 | "Let $\\Delta_x^{(k)} = | x^{(k)} - x^{(k-1)} |$ be the magnitude of the change in x during the $k^{th}$ iteration. \n", 181 | "If $k$ is sufficiently large (say $k \\geq 5$), it can be shown that an approximation of the optimal value of \\omega is:\n", 182 | "$$\n", 183 | "\\omega_{opt} \\approx \\frac{2}{1+\\sqrt{1-\\Delta x^{(k+1)} / \\Delta x^{(k)}}} \\,.\n", 184 | "$$\n", 185 | "\n", 186 | "The relaxed Gauss-Seidel algorithm can be summarised as follows: \n", 187 | "Carry out $k$ iterations with $\\omega = 1$ (usually $k=10$ for big systems) \n", 188 | "Record \t$\\Delta x^{(k)}$ \n", 189 | "Perform an additional iteration \n", 190 | "Record \t$\\Delta x^{(k+1)}$ \n", 191 | "Compute $\\omega_{opt}$ \n", 192 | "Perform all subsequent iterations with $\\omega = \\omega_{opt}$\n", 193 | "\n", 194 | "\n", 195 | " - Modify previous function to compute automatically the relaxation parameter $\\omega$. Compute $\\omega_{opt}$ after $k=6$ iterations as the system is small.\n", 196 | " - Solve the previous system with this new function. What is the value of $\\omega$ ? How many iterations are necessary to reach a tolerance of $10^{-6}$ ?\n", 197 | " \n", 198 | " \n", 199 | "#### A bigger example\n", 200 | "\n", 201 | "Let's consider $A\\pmb{x}=\\pmb{b}$ where:\n", 202 | "\n", 203 | "$$\n", 204 | "A = \\begin{pmatrix}\n", 205 | "5 & -2 & 0 & 0 & \\cdots & 0 \\\\\n", 206 | "-2 & 5 & -2 & 0 & \\cdots & 0 \\\\\n", 207 | "0 & -2 & 5 & -2 & \\cdots & 0 \\\\\n", 208 | "\\vdots & & & \\ddots & & \\vdots \\\\ \n", 209 | " & & & & 5 & -2 \\\\\n", 210 | "0 & \\cdots & & & -2 & 5 \\\\ \n", 211 | "\\end{pmatrix}\n", 212 | "$$\n", 213 | "and\n", 214 | "$$\n", 215 | "b = \\left(0, 0, \\cdots 0, 1000 \\right)^T\n", 216 | "$$\n", 217 | "\n", 218 | " - Solve $A\\pmb{x}=\\pmb{b}$ using the relaxed Gauss-Seidel algorithm for $n=3000$. Compare the number of iterations with the algorithm without relaxation.\n" 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": null, 224 | "metadata": {}, 225 | "outputs": [], 226 | "source": [] 227 | } 228 | ], 229 | "metadata": { 230 | "kernelspec": { 231 | "display_name": "Python 3", 232 | "language": "python", 233 | "name": "python3" 234 | }, 235 | "language_info": { 236 | "codemirror_mode": { 237 | "name": "ipython", 238 | "version": 3 239 | }, 240 | "file_extension": ".py", 241 | "mimetype": "text/x-python", 242 | "name": "python", 243 | "nbconvert_exporter": "python", 244 | "pygments_lexer": "ipython3", 245 | "version": "3.6.2" 246 | } 247 | }, 248 | "nbformat": 4, 249 | "nbformat_minor": 1 250 | } 251 | -------------------------------------------------------------------------------- /notebook/auxilliary_plots.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib 3 | import matplotlib.pyplot as plt 4 | import scipy.optimize as scop 5 | import scipy.interpolate as scip 6 | from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes 7 | from mpl_toolkits.axes_grid1.inset_locator import mark_inset 8 | 9 | 10 | matplotlib.rcParams['font.size'] = 14 11 | matplotlib.rcParams['font.family'] = 'sans-serif' 12 | matplotlib.rcParams['font.sans-serif'] = ['Arial'] 13 | 14 | 15 | def picard_convergence_pattern(fct, x, max_labels=6, tol=1.0E-4, flabel=''): 16 | fig, ax = plt.subplots(figsize=(8,6)) 17 | 18 | # solve 19 | fp_p = [] 20 | h_p = [] 21 | while 1: 22 | print('x_{0:0>2d} = {1:7.5f}'.format(len(h_p), x)) 23 | h_p.append(x) 24 | x = fct(x) 25 | fp_p.append(x) 26 | if abs(h_p[-1]-fp_p[-1]) < tol: 27 | break 28 | 29 | # the iteration results 30 | ax.scatter(h_p, fp_p, marker='x', color='blue', s=30) 31 | 32 | # the convergence pattern 33 | x_pattern = [h_p[0]] 34 | fp_pattern = [fp_p[0]] 35 | for i in range(1,len(h_p)): 36 | x_pattern.append(h_p[i]) 37 | fp_pattern.append(fp_p[i-1]) 38 | x_pattern.append(h_p[i]) 39 | fp_pattern.append(fp_p[i]) 40 | ax.plot(x_pattern, fp_pattern, color='green', ls='--') 41 | 42 | # the function 43 | idx_sort = np.argsort(h_p) 44 | ax.plot(np.array(h_p)[idx_sort], np.array(fp_p)[idx_sort], color='red', alpha=0.5) 45 | 46 | # initial guess 47 | dxt = (np.max(x_pattern)-np.min(x_pattern))/35. 48 | dyt = (np.max(fp_pattern)-np.min(fp_pattern))/35. 49 | ax.text(h_p[0]+dxt, fp_p[0]+dyt, '$x_0$') 50 | for i in range(1, min(max_labels+1, len(h_p))): 51 | label = ''.join(['$x_', str(i), '$']) 52 | ax.text(h_p[i]+dxt, fp_p[i]+dyt, label) 53 | 54 | ax.set_xlabel('$x$', fontsize=16) 55 | if not flabel: 56 | fl = '$f(x)$' 57 | else: 58 | fl = flabel 59 | ax.set_ylabel(fl, fontsize=16) 60 | plt.title('Successive Approximations', fontsize=20) 61 | plt.show() 62 | 63 | 64 | def root_bracketing_pattern(f, a, b, dx, xbounds=(-0.1,1.4), ybounds=(-5,6), flabel=''): 65 | x = np.linspace(a, b, int((b-a)/dx)+1) 66 | y = f(x) 67 | fig, ax = plt.subplots(figsize=(14,6)) 68 | ax.plot(x, y, marker='x', color='r') 69 | switched = False 70 | for i, xt in enumerate(x): 71 | ft = str('$f_' + str(i) + '=$' + '${:4.2f}$'.format(y[i])) 72 | ax.text(xt, y[i], ft) 73 | if i > 0: 74 | if not switched and np.sign(y[i]) != np.sign(y[i-1]): 75 | pass 76 | ax.plot([x[i],x[i-1]], [y[i],y[i-1]], color='b') 77 | ax.set_xlabel('$x$', fontsize=16) 78 | if not flabel: 79 | fl = '$f(x)$' 80 | else: 81 | fl = flabel 82 | ax.set_ylabel(fl, fontsize=16) 83 | ax.plot([a,b],[0.,0.],color='gray',ls='-.',alpha=0.75) 84 | ax.set_xlim(a,b) 85 | ax.set_ylim(-4,1) 86 | plt.title('Bracketing', fontsize=20) 87 | plt.show() 88 | 89 | 90 | def bisection_pattern(f, x1, x2, tol=1.0e-5, inset=True, ixmin=0.4, ixmax=0.7, iymin=-0.25, iymax=0.2, zoom=5, loc0=4, loc1=2, loc2=1, flabel=''): 91 | fig, ax = plt.subplots(figsize=(10,6)) 92 | plt.title('Bisection', fontsize=20) 93 | x = np.linspace(x1, x2, 100) 94 | y = f(x) 95 | ax.plot(x, y, color='r') 96 | f1 = f(x1) 97 | f2 = f(x2) 98 | x1s = x1 99 | x2s = x2 100 | ax.scatter( [x1,x2], [f1,f2], marker='x', s=50) 101 | virgin = True 102 | while abs(x1-x2) > tol: 103 | x3 = 0.5*(x1 + x2) 104 | f3 = f(x3) 105 | ax.scatter( [x3], [f3], marker='x', s=50) 106 | if f2*f3 < 0.0: 107 | x1 = x3 108 | f1 = f3 109 | else: 110 | x2 = x3 111 | f2 = f3 112 | 113 | # zero line 114 | xlim = ax.get_xlim() 115 | ax.plot([xlim[0],xlim[1]],[0.,0.],color='gray',ls='-.',alpha=0.75) 116 | ax.set_xlim(xlim) 117 | 118 | # zoomed inset 119 | if inset: 120 | x1, x2 = x1s, x2s 121 | axins = zoomed_inset_axes(ax, zoom, loc=loc0) 122 | x = np.linspace(x1, x2, 100) 123 | y = f(x) 124 | axins.plot(x, y, color='r') 125 | f1 = f(x1) 126 | f2 = f(x2) 127 | axins.scatter( [x1,x2], [f1,f2], marker='x', s=50) 128 | virgin = True 129 | while abs(x1-x2) > tol: 130 | x3 = 0.5*(x1 + x2) 131 | f3 = f(x3) 132 | axins.scatter( [x3], [f3], marker='x', s=50) 133 | if f2*f3 < 0.0: 134 | x1 = x3 135 | f1 = f3 136 | else: 137 | x2 = x3 138 | f2 = f3 139 | axins.plot([ixmin,ixmax],[0.,0.],color='gray',ls='-.',alpha=0.75) 140 | axins.set_xlim(ixmin, ixmax) 141 | axins.set_ylim(iymin, iymax) 142 | axins.get_xaxis().set_visible(False) 143 | axins.get_yaxis().set_visible(False) 144 | mark_inset(ax, axins, loc1=loc1, loc2=loc2, fc="none", ec="0.5") 145 | 146 | xf = (x1 + x2)/2.0 147 | ax.set_xlabel('$x$', fontsize=16) 148 | if not flabel: 149 | fl = '$f(x)$' 150 | else: 151 | fl = flabel 152 | ax.set_ylabel(fl, fontsize=16) 153 | plt.show() 154 | 155 | 156 | def newton_convergence_pattern(fn, dx, x_0, tol, max_labels0=1, max_labels1=2, inset=True, ixmin=3.0, ixmax=3.2, iymin=-0.1, iymax=0.1, zoom=8, loc0=1, loc1=3, loc2=2, maxiter=100, resfct=100, flabel=''): 157 | fig, ax = plt.subplots(figsize=(10,6)) 158 | plt.title('Newton', fontsize=20) 159 | x_n = [x_0] 160 | y_n = [fn(x_0)] 161 | fx_n = [x_0] 162 | fy_n = [fn(x_0)] 163 | 164 | # solving 165 | i = 0 166 | while 1: 167 | dfdx = (fn(x_n[-1]+dx) - fn(x_n[-1])) / dx 168 | a = fn(x_n[-1])-(dfdx*x_n[-1]) 169 | x_zero = -a/dfdx 170 | x_n.append(x_zero) 171 | y_n.append(0.) 172 | fx_n.append(x_zero) 173 | fy_n.append(fn(x_zero)) 174 | if abs(x_n[-1]-x_n[-2]) < tol: 175 | break 176 | x_n.append(x_zero) 177 | y_n.append(fn(x_zero)) 178 | i = i+1 179 | if i >= maxiter: 180 | break 181 | 182 | # the iteration results 183 | ax.scatter(fx_n, fy_n, marker='x', color='blue', s=30) 184 | 185 | # the convergence pattern 186 | ax.plot(x_n, y_n, color='green', ls='--') 187 | 188 | # the function 189 | x = np.linspace( np.min(x_n), np.max(x_n), resfct) 190 | f = fn(x) 191 | ax.plot(x, f, color='red', alpha=0.5) 192 | 193 | # initial guess 194 | dxt = (np.max(x_n)-np.min(x_n))/35. 195 | dyt = (np.max(y_n)-np.min(y_n))/35. 196 | ax.text(fx_n[0]+dxt, fy_n[0]+dyt, '$x_0$') 197 | for i in range(1, min(max_labels0+1, len(fx_n))): 198 | label = ''.join(['$x_', str(i), '$']) 199 | ax.text(fx_n[i]+dxt, fy_n[i]+dyt, label) 200 | 201 | # zero line 202 | xlim = ax.get_xlim() 203 | ax.plot([xlim[0],xlim[1]],[0.,0.],color='gray',ls='-.',alpha=0.75) 204 | ax.set_xlim(xlim) 205 | 206 | # zoomed inset 207 | if inset: 208 | axins = zoomed_inset_axes(ax, zoom, loc=loc0) 209 | axins.scatter(fx_n, fy_n, marker='x', color='blue', s=30) 210 | axins.plot(x_n, y_n, color='green', ls='--') 211 | axins.plot(x, f, color='red', alpha=0.5) 212 | for i in range(max_labels0+1, min(max_labels0+1+max_labels1,len(fx_n))): 213 | label = ''.join(['$x_', str(i), '$']) 214 | axins.text(fx_n[i]+dxt/8., fy_n[i]+dyt/8., label) 215 | axins.plot([ixmin,ixmax],[0.,0.],color='gray',ls='-.',alpha=0.75) 216 | axins.set_xlim(ixmin, ixmax) 217 | axins.set_ylim(iymin, iymax) 218 | axins.get_xaxis().set_visible(False) 219 | axins.get_yaxis().set_visible(False) 220 | mark_inset(ax, axins, loc1=loc1, loc2=loc2, fc="none", ec="0.5") 221 | 222 | ax.set_xlabel('$x$', fontsize=16) 223 | if not flabel: 224 | fl = '$f(x)$' 225 | else: 226 | fl = flabel 227 | ax.set_ylabel(fl, fontsize=16) 228 | plt.show() 229 | 230 | 231 | def secant_convergence_pattern(fn, x_0, x_1, tol, max_labels0=1, max_labels1=2, inset=True, ixmin=3.0, ixmax=3.2, iymin=-0.1, iymax=0.1, zoom=8, loc0=1, loc1=3, loc2=2, maxiter=100, resfct=100, flabel=''): 232 | fig, ax = plt.subplots(figsize=(10,6)) 233 | plt.title('Secant', fontsize=20) 234 | x_n = [x_0, x_1] # conv pattern 235 | y_n = [fn(x_0), fn(x_1)] # conv pattern 236 | fx_n = [x_0, x_1] # x_n 237 | fy_n = [fn(x_0), fn(x_1)]# f(x_n) 238 | 239 | # solving 240 | i = 0 241 | while 1: 242 | b = (fn(fx_n[-1]) - fn(fx_n[-2])) / (fx_n[-1]-fx_n[-2]) 243 | a = fn(fx_n[-1])-b*fx_n[-1] 244 | x_zero = -a/b 245 | x_n.append(x_zero) 246 | y_n.append(0.) 247 | 248 | fx_n.append(x_zero) 249 | fy_n.append(fn(x_zero)) 250 | if abs(x_n[-1]-x_n[-2]) < tol: 251 | break 252 | x_n.append(x_zero) 253 | y_n.append(fn(x_zero)) 254 | i = i+1 255 | if i >= maxiter: 256 | break 257 | 258 | # the iteration results 259 | ax.scatter(fx_n, fy_n, marker='x', color='blue', s=30) 260 | 261 | # the convergence pattern 262 | ax.plot(x_n, y_n, color='green', ls='--') 263 | 264 | # the function 265 | x = np.linspace( np.min(x_n), np.max(x_n), resfct) 266 | f = fn(x) 267 | ax.plot(x, f, color='red', alpha=0.5) 268 | 269 | # initial guess 270 | dxt = (np.max(x_n)-np.min(x_n))/35. 271 | dyt = (np.max(y_n)-np.min(y_n))/35. 272 | ax.text(fx_n[0]+dxt, fy_n[0]+dyt, '$x_0$') 273 | for i in range(1, min(max_labels0+1, len(fx_n))): 274 | label = ''.join(['$x_', str(i), '$']) 275 | ax.text(fx_n[i]+dxt, fy_n[i]+dyt, label) 276 | 277 | # zero line 278 | xlim = ax.get_xlim() 279 | ax.plot([xlim[0],xlim[1]],[0.,0.],color='gray',ls='-.',alpha=0.75) 280 | ax.set_xlim(xlim) 281 | 282 | # zoomed inset 283 | if inset: 284 | axins = zoomed_inset_axes(ax, zoom, loc=loc0) 285 | axins.scatter(fx_n, fy_n, marker='x', color='blue', s=30) 286 | axins.plot(x_n, y_n, color='green', ls='--') 287 | axins.plot(x, f, color='red', alpha=0.5) 288 | for i in range(max_labels0+1, min(max_labels0+1+max_labels1,len(fx_n))): 289 | label = ''.join(['$x_', str(i), '$']) 290 | axins.text(fx_n[i]+dxt/8., fy_n[i]+dyt/8., label) 291 | axins.plot([ixmin,ixmax],[0.,0.],color='gray',ls='-.',alpha=0.75) 292 | axins.set_xlim(ixmin, ixmax) 293 | axins.set_ylim(iymin, iymax) 294 | axins.get_xaxis().set_visible(False) 295 | axins.get_yaxis().set_visible(False) 296 | mark_inset(ax, axins, loc1=loc1, loc2=loc2, fc="none", ec="0.5") 297 | 298 | ax.set_xlabel('$x$', fontsize=16) 299 | if not flabel: 300 | fl = '$f(x)$' 301 | else: 302 | fl = flabel 303 | ax.set_ylabel(fl, fontsize=16) 304 | plt.show() 305 | -------------------------------------------------------------------------------- /notebook/data/Length-Width.dat: -------------------------------------------------------------------------------- 1 | 2.69142357103 0.303460755321 2 | 1.00657132848 0.401874533619 3 | 0.692383317399 0.505228773903 4 | 0.4865575686 0.5 5 | 0.294389750668 0.59055338197 6 | 0.196108459755 0.5 7 | 1.20718029124 0.5 8 | 1.00657132848 0.697506593912 9 | 1.68150785858 0.59055338197 10 | 3.99728956383 0.596729122194 11 | 7.6731968653 0.895327764846 12 | 2.90055859801 1.00387779414 13 | 2.47081176508 1.00387779414 14 | 2.69142357103 1.18568685283 15 | 1.59398748404 0.798515328441 16 | 1.19434373985 0.858835056352 17 | 1.18164368593 1.01437589417 18 | 0.848320132225 1.01437589417 19 | 0.635629733271 1.01437589417 20 | 0.804166133583 1.28858948353 21 | 0.894898475184 1.4906222335 22 | 0.995867952457 1.3715859142 23 | 1.18164368593 1.50621048677 24 | 1.31496601087 1.47519530804 25 | 1.34338385663 1.38592933947 26 | 1.00657132848 1.76058400246 27 | 1.73631012572 1.47519530804 28 | 1.71784707235 1.76058400246 29 | 2.36738267561 1.4906222335 30 | 2.99509112509 1.47519530804 31 | 3.82996155214 1.17341580099 32 | 5.45012893873 1.50621048677 33 | 3.33302083854 2.03661887013 34 | 4.21676699592 2.40546432308 35 | 0.795615033162 2.40546432308 36 | 1.79289846153 2.53389889102 37 | 2.29266218882 2.53389889102 38 | 2.47081176508 2.53389889102 39 | 2.86971550874 2.6691909451 40 | 2.60647554743 2.6691909451 41 | 2.69142357103 2.99280511933 42 | 2.26828316589 3.02410252194 43 | 1.87122878695 3.02410252194 44 | 1.91166807634 3.60914008668 45 | 2.29266218882 3.72355656923 46 | 2.47081176508 3.46203496801 47 | 1.99518735293 4.35240239373 48 | 2.24416337729 4.30735799161 49 | 2.52420869478 4.30735799161 50 | 2.55133835062 5.0874496586 51 | 2.24416337729 5.03479801763 52 | 1.73631012572 5.0874496586 53 | 1.99518735293 5.9466339937 54 | 2.47081176508 6.00882120298 55 | 2.60647554743 6.73733398146 56 | 2.96324282421 6.66760716082 57 | 3.29757916211 5.9466339937 58 | 3.40505104413 5.14065190662 59 | 3.78923565674 4.08903291772 60 | 4.1719280049 4.0467142104 61 | 3.44164780538 3.60914008668 62 | 3.36884343453 2.99280511933 63 | 3.78923565674 3.05572721863 64 | 4.95018679233 2.96183162353 65 | 5.93675556276 2.99280511933 66 | 5.93675556276 4.0467142104 67 | 7.04423684722 5.03479801763 68 | 7.43101168291 5.41512590327 69 | 9.91752978731 4.08903291772 70 | 7.6731968653 4.9311238222 71 | 0.804166133583 4.0467142104 72 | 1.51102243519 7.55417204879 73 | 2.0166312004 8.92228370279 74 | 2.0166312004 9.90049141055 75 | 2.03830552176 11.9393876323 76 | 3.33302083854 8.64812207839 77 | 3.33302083854 7.71299440604 78 | 5.11151904995 7.47599151017 79 | 5.45012893873 7.55417204879 80 | 6.74936250132 6.80778997366 81 | 7.43101168291 7.79365341377 82 | 8.26943680714 6.80778997366 83 | 12.6820040497 6.80778997366 84 | 14.8877775338 5.03479801763 85 | 5.87362707367 9.20513676281 86 | 7.04423684722 12.9755755544 87 | 3.99728956383 11.9393876323 88 | 8.35831494747 16.6556032017 89 | 32.1448489335 24.7313095175 90 | 5.45012893873 33.4403804044 91 | 4.4482951795 33.4403804044 92 | 6.46683170402 39.4966594843 93 | 2.22030006647 33.7900847811 94 | 11.1551214021 42.0405897294 95 | 13.962818005 60.5064021621 96 | 25.4079609926 49.6544248894 97 | 34.2742675302 67.1401106313 98 | 14.4178822372 99.6939490548 99 | 4.95018679233 85.2899215961 100 | 24.6060225339 217.532535154 101 | 31.1302775315 188.049078315 102 | 49.2973238516 188.049078315 103 | 1.75497161605 9.79802806366 104 | 3.44164780538 11.8158230974 105 | 10.0241212789 19.8777669563 106 | 121.009216449 14.8546208251 107 | 8.53894722468 60.5064021621 108 | 22.1112568103 69.2685775546 109 | 22.1112568103 111.78089812 110 | 1.18164368593 151.14427129 111 | 13.6674495543 143.483291111 112 | 13.962818005 160.879284026 113 | 55.4490527537 125.33327552 114 | 54.8594352623 198.089552369 115 | 69.4053434512 190.015610541 116 | 210.981312641 143.483291111 117 | -------------------------------------------------------------------------------- /notebook/data/shot.txt: -------------------------------------------------------------------------------- 1 | 0.000000 2 | 0.000000 3 | 0.000000 4 | 0.000000 5 | 0.000000 6 | 0.000000 7 | 0.000000 8 | 0.000000 9 | 0.000000 10 | 0.000000 11 | 0.000000 12 | 0.000000 13 | 0.000000 14 | 0.000000 15 | 0.000000 16 | 0.000000 17 | 0.000000 18 | 0.000000 19 | 0.000000 20 | 0.000000 21 | 0.000000 22 | 0.000000 23 | 0.000000 24 | 0.000000 25 | 0.000000 26 | 0.000000 27 | 0.000000 28 | 0.000000 29 | 0.000000 30 | 0.000000 31 | 0.000000 32 | 0.000000 33 | 0.000000 34 | 0.000000 35 | 0.000000 36 | 0.000000 37 | 0.000000 38 | 0.000000 39 | 0.000000 40 | 0.000000 41 | 0.000000 42 | 0.000000 43 | 0.000000 44 | 0.000000 45 | 0.000000 46 | 0.000000 47 | 0.000000 48 | 0.000000 49 | 0.000000 50 | 0.000000 51 | 0.000000 52 | 0.000000 53 | 0.000000 54 | 0.000000 55 | 0.000000 56 | 0.000000 57 | 0.000000 58 | 0.000000 59 | 0.000000 60 | 0.000000 61 | 0.000000 62 | 0.000000 63 | 0.000000 64 | 0.000000 65 | 0.000000 66 | 0.000000 67 | -0.000000 68 | -0.000000 69 | 0.000000 70 | 0.000000 71 | -0.000000 72 | -0.000000 73 | 0.000000 74 | 0.000000 75 | 0.000000 76 | -0.000000 77 | -0.000000 78 | 0.000000 79 | 0.000000 80 | -0.000000 81 | -0.000000 82 | -0.000000 83 | 0.000000 84 | 0.000000 85 | -0.000000 86 | -0.000000 87 | -0.000000 88 | 0.000000 89 | 0.000000 90 | -0.000000 91 | -0.000000 92 | -0.000000 93 | 0.000000 94 | 0.000000 95 | -0.000000 96 | -0.000000 97 | -0.000000 98 | 0.000000 99 | 0.000000 100 | 0.000000 101 | -0.000000 102 | -0.000000 103 | -0.000000 104 | 0.000000 105 | 0.000000 106 | -0.000000 107 | -0.000000 108 | -0.000000 109 | 0.000000 110 | 0.000000 111 | 0.000000 112 | 0.000000 113 | -0.000000 114 | -0.000000 115 | -0.000000 116 | 0.000000 117 | 0.000000 118 | 0.000000 119 | -0.000000 120 | -0.000000 121 | -0.000000 122 | -0.000000 123 | 0.000000 124 | 0.000000 125 | 0.000000 126 | -0.000000 127 | -0.000002 128 | -0.000007 129 | -0.000011 130 | -0.000006 131 | 0.000028 132 | 0.000114 133 | 0.000274 134 | 0.000486 135 | 0.000602 136 | 0.000155 137 | -0.001967 138 | -0.007898 139 | -0.020893 140 | -0.044616 141 | -0.081151 142 | -0.127841 143 | -0.174200 144 | -0.201148 145 | -0.184999 146 | -0.107042 147 | 0.033723 148 | 0.210889 149 | 0.371727 150 | 0.453517 151 | 0.411432 152 | 0.245086 153 | 0.007640 154 | -0.211709 155 | -0.326387 156 | -0.293291 157 | -0.133844 158 | 0.076645 159 | 0.245917 160 | 0.308257 161 | 0.252047 162 | 0.119071 163 | -0.020905 164 | -0.105026 165 | -0.104518 166 | -0.033102 167 | 0.064563 168 | 0.136442 169 | 0.146038 170 | 0.084413 171 | -0.031295 172 | -0.168600 173 | -0.292850 174 | -0.377234 175 | -0.407153 176 | -0.380042 177 | -0.303098 178 | -0.190487 179 | -0.060536 180 | 0.066849 181 | 0.172700 182 | 0.241798 183 | 0.265426 184 | 0.243225 185 | 0.183549 186 | 0.101808 187 | 0.016864 188 | -0.053555 189 | -0.097158 190 | -0.109910 191 | -0.096083 192 | -0.065411 193 | -0.028806 194 | 0.005756 195 | 0.035652 196 | 0.062941 197 | 0.091144 198 | 0.121581 199 | 0.150887 200 | 0.171021 201 | 0.172198 202 | 0.147154 203 | 0.094689 204 | 0.021455 205 | -0.059066 206 | -0.130407 207 | -0.177953 208 | -0.193191 209 | -0.176264 210 | -0.135303 211 | -0.082927 212 | -0.032105 213 | 0.007530 214 | 0.032055 215 | 0.042949 216 | 0.044887 217 | 0.043217 218 | 0.041498 219 | 0.040163 220 | 0.037203 221 | 0.029866 222 | 0.016460 223 | -0.002368 224 | -0.023631 225 | -0.042951 226 | -0.056068 227 | -0.060358 228 | -0.055284 229 | -0.042122 230 | -0.023342 231 | -0.001236 232 | 0.022853 233 | 0.047942 234 | 0.072705 235 | 0.094807 236 | 0.110290 237 | 0.114475 238 | 0.104106 239 | 0.078820 240 | 0.041925 241 | 0.000379 242 | -0.037121 243 | -0.062785 244 | -0.071757 245 | -0.063716 246 | -0.043387 247 | -0.019046 248 | 0.000215 249 | 0.007314 250 | -0.000442 251 | -0.020345 252 | -0.045531 253 | -0.067669 254 | -0.079531 255 | -0.077151 256 | -0.060909 257 | -0.034757 258 | -0.004778 259 | 0.022324 260 | 0.041043 261 | 0.048404 262 | 0.044516 263 | 0.032326 264 | 0.016608 265 | 0.002324 266 | -0.007056 267 | -0.010264 268 | -0.007961 269 | -0.001968 270 | 0.005651 271 | 0.013344 272 | 0.019916 273 | 0.023965 274 | 0.024058 275 | 0.018771 276 | 0.007110 277 | -0.010302 278 | -0.030898 279 | -0.050374 280 | -0.063177 281 | -0.063940 282 | -0.049264 283 | -0.018946 284 | 0.023335 285 | 0.070287 286 | 0.112571 287 | 0.141011 288 | 0.148995 289 | 0.133985 290 | 0.098226 291 | 0.048278 292 | -0.007018 293 | -0.058465 294 | -0.098384 295 | -0.122387 296 | -0.129763 297 | -0.122836 298 | -0.106135 299 | -0.084824 300 | -0.062873 301 | -0.042503 302 | -0.023866 303 | -0.005334 304 | 0.014944 305 | 0.037850 306 | 0.062602 307 | 0.086346 308 | 0.104719 309 | 0.113200 310 | 0.108138 311 | 0.087765 312 | 0.053040 313 | 0.007764 314 | -0.041685 315 | -0.087481 316 | -0.121827 317 | -0.138593 318 | -0.134729 319 | -0.110445 320 | -0.068915 321 | -0.016027 322 | 0.040349 323 | 0.091108 324 | 0.126972 325 | 0.141048 326 | 0.131090 327 | 0.100203 328 | 0.056230 329 | 0.009091 330 | -0.032498 331 | -0.062815 332 | -0.079504 333 | -0.083230 334 | -0.076475 335 | -0.062781 336 | -0.046373 337 | -0.030863 338 | -0.018131 339 | -0.007814 340 | 0.002508 341 | 0.015760 342 | 0.033222 343 | 0.053442 344 | 0.072466 345 | 0.085215 346 | 0.087413 347 | 0.077033 348 | 0.054940 349 | 0.024488 350 | -0.009284 351 | -0.040767 352 | -0.065226 353 | -0.079844 354 | -0.084114 355 | -0.079667 356 | -0.069227 357 | -0.055395 358 | -0.040055 359 | -0.024020 360 | -0.007341 361 | 0.010012 362 | 0.027901 363 | 0.045730 364 | 0.061994 365 | 0.074364 366 | 0.080053 367 | 0.076488 368 | 0.062596 369 | 0.039600 370 | 0.010854 371 | -0.019066 372 | -0.045864 373 | -0.066628 374 | -0.079866 375 | -0.084967 376 | -0.081585 377 | -0.069503 378 | -0.049327 379 | -0.023196 380 | 0.005086 381 | 0.030806 382 | 0.049759 383 | 0.059464 384 | 0.059660 385 | 0.052005 386 | 0.039250 387 | 0.024419 388 | 0.010300 389 | -0.000946 390 | -0.007960 391 | -0.010332 392 | -0.008722 393 | -0.004628 394 | 0.000029 395 | 0.003390 396 | 0.004338 397 | 0.002849 398 | -0.000138 399 | -0.003044 400 | -0.004479 401 | -0.003940 402 | -0.001941 403 | 0.000319 404 | 0.001632 405 | 0.001503 406 | 0.000199 407 | -0.001787 408 | -0.004152 409 | -0.007146 410 | -0.011232 411 | -0.016314 412 | -0.021373 413 | -0.024578 414 | -0.023914 415 | -0.018227 416 | -0.007810 417 | 0.005602 418 | 0.019264 419 | 0.030235 420 | 0.036059 421 | 0.035334 422 | 0.028044 423 | 0.015779 424 | 0.001580 425 | -0.010810 426 | -0.017983 427 | -0.018045 428 | -0.011396 429 | -0.000499 430 | 0.010974 431 | 0.019208 432 | 0.021538 433 | 0.016980 434 | 0.006396 435 | -0.007622 436 | -0.021569 437 | -0.031888 438 | -0.035953 439 | -0.032907 440 | -0.023724 441 | -0.010697 442 | 0.003415 443 | 0.016415 444 | 0.026864 445 | 0.033832 446 | 0.036376 447 | 0.033055 448 | 0.022436 449 | 0.004034 450 | -0.020832 451 | -0.048515 452 | -0.073686 453 | -0.090687 454 | -0.094741 455 | -0.083204 456 | -0.056079 457 | -0.015968 458 | 0.032016 459 | 0.080978 460 | 0.123223 461 | 0.151424 462 | 0.160073 463 | 0.146698 464 | 0.112565 465 | 0.062868 466 | 0.005933 467 | -0.048467 468 | -0.091441 469 | -0.117208 470 | -0.124259 471 | -0.114976 472 | -0.093907 473 | -0.065808 474 | -0.034530 475 | -0.003108 476 | 0.025640 477 | 0.048582 478 | 0.062842 479 | 0.066831 480 | 0.060969 481 | 0.047897 482 | 0.031609 483 | 0.016283 484 | 0.005174 485 | -0.000041 486 | 0.000422 487 | 0.004573 488 | 0.009026 489 | 0.009605 490 | 0.002358 491 | -0.015140 492 | -0.042546 493 | -0.076203 494 | -0.109365 495 | -0.133823 496 | -0.142130 497 | -0.129490 498 | -0.095350 499 | -0.043798 500 | 0.017226 501 | 0.077235 502 | 0.125577 503 | 0.154175 504 | 0.159405 505 | 0.142903 506 | 0.110637 507 | 0.070528 508 | 0.030088 509 | -0.004991 510 | -0.031352 511 | -0.047626 512 | -0.053948 513 | -0.051734 514 | -0.043512 515 | -0.032383 516 | -0.021335 517 | -0.012992 518 | -0.009218 519 | -0.011068 520 | -0.018809 521 | -0.031034 522 | -0.044185 523 | -0.052924 524 | -0.050973 525 | -0.033449 526 | 0.000688 527 | 0.047648 528 | 0.098974 529 | 0.143583 530 | 0.171104 531 | 0.174616 532 | 0.152368 533 | 0.108209 534 | 0.050273 535 | -0.011146 536 | -0.066119 537 | -0.107619 538 | -0.133086 539 | -0.144469 540 | -0.146471 541 | -0.143925 542 | -0.139480 543 | -0.132521 544 | -0.120058 545 | -0.098525 546 | -0.065454 547 | -0.021077 548 | 0.031018 549 | 0.084144 550 | 0.129494 551 | 0.158595 552 | 0.166414 553 | 0.153244 554 | 0.124745 555 | 0.089782 556 | 0.056665 557 | 0.030274 558 | 0.011094 559 | -0.004028 560 | -0.019726 561 | -0.039707 562 | -0.065221 563 | -0.094344 564 | -0.122205 565 | -0.142586 566 | -0.149842 567 | -0.140699 568 | -0.115426 569 | -0.077640 570 | -0.033082 571 | 0.012157 572 | 0.053325 573 | 0.088034 574 | 0.116151 575 | 0.138605 576 | 0.155892 577 | 0.166889 578 | 0.168696 579 | 0.157765 580 | 0.131554 581 | 0.089782 582 | 0.035173 583 | -0.026512 584 | -0.087212 585 | -0.137623 586 | -0.169060 587 | -0.175903 588 | -0.157508 589 | -0.118726 590 | -0.068626 591 | -0.017612 592 | 0.025409 593 | 0.054560 594 | 0.067470 595 | 0.064834 596 | 0.049835 597 | 0.027820 598 | 0.005586 599 | -0.010207 600 | -0.015078 601 | -0.008433 602 | 0.005965 603 | 0.021362 604 | 0.030652 605 | 0.029071 606 | 0.015786 607 | -0.006095 608 | -0.031075 609 | -0.053409 610 | -0.068656 611 | -0.074438 612 | -0.070310 613 | -0.057114 614 | -0.036829 615 | -0.012647 616 | 0.011178 617 | 0.030016 618 | 0.040600 619 | 0.042820 620 | 0.040210 621 | 0.038567 622 | 0.043059 623 | 0.055211 624 | 0.071746 625 | 0.085929 626 | 0.090595 627 | 0.081340 628 | 0.058156 629 | 0.024959 630 | -0.012258 631 | -0.047549 632 | -0.076511 633 | -0.096738 634 | -0.107579 635 | -0.109498 636 | -0.103371 637 | -0.089945 638 | -0.069429 639 | -0.041718 640 | -0.007282 641 | 0.031834 642 | 0.071273 643 | 0.104820 644 | 0.126063 645 | 0.130494 646 | 0.117093 647 | 0.088508 648 | 0.050027 649 | 0.008170 650 | -0.030433 651 | -0.059739 652 | -0.075086 653 | -0.074522 654 | -0.060131 655 | -0.038338 656 | -0.018255 657 | -0.008135 658 | -0.011934 659 | -0.027645 660 | -0.048308 661 | -0.065411 662 | -0.072332 663 | -0.066254 664 | -0.048245 665 | -0.021849 666 | 0.008477 667 | 0.038229 668 | 0.063127 669 | 0.079474 670 | 0.084602 671 | 0.077659 672 | 0.060426 673 | 0.036980 674 | 0.012622 675 | -0.007592 676 | -0.020469 677 | -0.025311 678 | -0.023369 679 | -0.016937 680 | -0.008273 681 | 0.000977 682 | 0.009501 683 | 0.016043 684 | 0.019415 685 | 0.018736 686 | 0.014019 687 | 0.006470 688 | -0.001980 689 | -0.009431 690 | -0.014745 691 | -0.017921 692 | -0.019835 693 | -0.021383 694 | -0.022811 695 | -0.023493 696 | -0.022072 697 | -0.017113 698 | -0.007823 699 | 0.005659 700 | 0.022082 701 | 0.039069 702 | 0.053523 703 | 0.061993 704 | 0.061481 705 | 0.050569 706 | 0.030178 707 | 0.003648 708 | -0.023931 709 | -0.047140 710 | -0.061781 711 | -0.065715 712 | -0.059176 713 | -0.044452 714 | -0.025169 715 | -0.005504 716 | 0.010511 717 | 0.019913 718 | 0.021534 719 | 0.016148 720 | 0.006325 721 | -0.004620 722 | -0.013892 723 | -0.019933 724 | -0.022615 725 | -0.022649 726 | -0.020641 727 | -0.016686 728 | -0.010669 729 | -0.002899 730 | 0.005296 731 | 0.011686 732 | 0.014154 733 | 0.011799 734 | 0.005699 735 | -0.001329 736 | -0.005824 737 | -0.005028 738 | 0.001936 739 | 0.013880 740 | 0.027906 741 | 0.040473 742 | 0.048628 743 | 0.050675 744 | 0.046460 745 | 0.037094 746 | 0.024304 747 | 0.009977 748 | -0.004015 749 | -0.016074 750 | -0.024980 751 | -0.030117 752 | -0.031703 753 | -0.030554 754 | -0.027636 755 | -0.023564 756 | -0.018391 757 | -0.011895 758 | -0.004030 759 | 0.004704 760 | 0.013342 761 | 0.020750 762 | 0.025838 763 | 0.027842 764 | 0.026298 765 | 0.021157 766 | 0.013159 767 | 0.003812 768 | -0.004980 769 | -0.011474 770 | -0.014907 771 | -0.015819 772 | -0.015519 773 | -0.015442 774 | -0.016346 775 | -0.017936 776 | -0.019264 777 | -0.019293 778 | -0.017459 779 | -0.014086 780 | -0.010243 781 | -0.007283 782 | -0.006433 783 | -0.008197 784 | -0.012009 785 | -0.016301 786 | -0.018834 787 | -0.017408 788 | -0.010801 789 | 0.000734 790 | 0.015520 791 | 0.030948 792 | 0.044368 793 | 0.053593 794 | 0.057106 795 | 0.054127 796 | 0.044757 797 | 0.030294 798 | 0.013244 799 | -0.003116 800 | -0.015773 801 | -0.023001 802 | -0.024726 803 | -0.022257 804 | -0.017601 805 | -0.012807 806 | -0.009608 807 | -0.009168 808 | -0.011691 809 | -0.016203 810 | -0.020858 811 | -0.023603 812 | -0.023219 813 | -0.019784 814 | -0.014208 815 | -0.007503 816 | -0.000015 817 | 0.008539 818 | 0.018287 819 | 0.028513 820 | 0.037450 821 | 0.042733 822 | 0.042391 823 | 0.035626 824 | 0.022966 825 | 0.006066 826 | -0.012830 827 | -0.031270 828 | -0.046638 829 | -0.056361 830 | -0.058298 831 | -0.051458 832 | -0.036785 833 | -0.017195 834 | 0.003145 835 | 0.020172 836 | 0.031179 837 | 0.035258 838 | 0.033062 839 | 0.026187 840 | 0.016695 841 | 0.006890 842 | -0.000924 843 | -0.005004 844 | -0.004774 845 | -0.001102 846 | 0.004091 847 | 0.008736 848 | 0.011354 849 | 0.011309 850 | 0.008532 851 | 0.003160 852 | -0.004390 853 | -0.013058 854 | -0.020810 855 | -0.024929 856 | -0.023037 857 | -0.014265 858 | 0.000032 859 | 0.016517 860 | 0.031049 861 | 0.040125 862 | 0.041863 863 | 0.036309 864 | 0.025030 865 | 0.010389 866 | -0.004915 867 | -0.018184 868 | -0.027128 869 | -0.030245 870 | -0.027418 871 | -0.020141 872 | -0.011047 873 | -0.003026 874 | 0.001855 875 | 0.002929 876 | 0.000803 877 | -0.003233 878 | -0.007753 879 | -0.011473 880 | -0.013291 881 | -0.012324 882 | -0.008206 883 | -0.001384 884 | 0.006765 885 | 0.014363 886 | 0.019706 887 | 0.021799 888 | 0.020622 889 | 0.016712 890 | 0.010736 891 | 0.003392 892 | -0.004556 893 | -0.011895 894 | -0.017018 895 | -0.018557 896 | -0.016166 897 | -0.010982 898 | -0.005351 899 | -0.001769 900 | -0.001665 901 | -0.004903 902 | -0.009921 903 | -0.014583 904 | -0.017102 905 | -0.016381 906 | -0.012122 907 | -0.004688 908 | 0.004873 909 | 0.014837 910 | 0.023040 911 | 0.027419 912 | 0.026891 913 | 0.021875 914 | 0.014150 915 | 0.006136 916 | -0.000120 917 | -0.003596 918 | -0.004234 919 | -0.002589 920 | 0.000556 921 | 0.004448 922 | 0.008229 923 | 0.010843 924 | 0.011354 925 | 0.009267 926 | 0.004785 927 | -0.001082 928 | -0.006927 929 | -0.011434 930 | -0.013631 931 | -0.013091 932 | -0.009926 933 | -0.004561 934 | 0.002162 935 | 0.008908 936 | 0.014100 937 | 0.016035 938 | 0.013407 939 | 0.005967 940 | -0.005282 941 | -0.018135 942 | -0.029589 943 | -0.036761 944 | -0.037676 945 | -0.031734 946 | -0.019985 947 | -0.004819 948 | 0.010713 949 | 0.023550 950 | 0.031310 951 | 0.032838 952 | 0.028293 953 | 0.018988 954 | 0.007105 955 | -0.004833 956 | -0.014562 957 | -0.020345 958 | -0.021330 959 | -0.017698 960 | -0.010347 961 | -0.000792 962 | 0.009121 963 | 0.017639 964 | 0.023179 965 | 0.024459 966 | 0.020830 967 | 0.012312 968 | -0.000236 969 | -0.014898 970 | -0.029017 971 | -0.039643 972 | -0.044136 973 | -0.041020 974 | -0.030554 975 | -0.014683 976 | 0.003423 977 | 0.020293 978 | 0.033239 979 | 0.040884 980 | 0.043084 981 | 0.040721 982 | 0.035122 983 | 0.027697 984 | 0.020015 985 | 0.013452 986 | 0.008771 987 | 0.005906 988 | 0.003795 989 | 0.000876 990 | -0.003954 991 | -0.010841 992 | -0.018858 993 | -0.026411 994 | -0.031994 995 | -0.034586 996 | -0.033712 997 | -0.029403 998 | -0.022188 999 | -0.013061 1000 | -0.003343 1001 | 0.000000 1002 | -------------------------------------------------------------------------------- /notebook/exam-2017.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 3.09 Numerical methods 1 exam - 14:00-16:00 5th May 2017" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "# Test instructions\n", 15 | "\n", 16 | "* This test contains **3** questions each of which should be answered.\n", 17 | "* Write your program in a Python cell just under each question.\n", 18 | "* You should write an explanation of your solution as comments in your code.\n", 19 | "* In each case your solution program must fulfil all of the instructions. Please check the instructions carefully and double check that your program fulfils all of the given instructions.\n", 20 | "* Save your work regularly.\n", 21 | "* At the end of the test you should email your IPython notebook document (i.e. this document) to [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman) at **and also** [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott) at \n", 22 | " " 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## Question 1\n", 30 | "\n", 31 | "Consider the matrix:\n", 32 | "\n", 33 | "\\begin{equation}\n", 34 | "\\bf{A} = \n", 35 | "\\begin{pmatrix}\n", 36 | "1 & 3 & −9 & 6 & 4\\\\\n", 37 | "2 &−1 & 6 & 7 & 1\\\\\n", 38 | "3 & 2 & −3 & 15 & 5\\\\\n", 39 | "8 &−1 & 1 & 4 & 2\\\\\n", 40 | "11 & 1 & −2 & 18 & 7\n", 41 | "\\end{pmatrix}.\n", 42 | "\\end{equation}\n", 43 | "\n", 44 | " 1. Write a Python Gaussian function that takes as input a matrix and returns the upper diagonal matrix. Demonstrate the correctness by applying it to the matrix A. [17 marks]\n", 45 | " 2. Determine whether $\\bf{A}$ is invertible and explain why. [17 marks]" 46 | ] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": {}, 51 | "source": [ 52 | "## Question 2\n", 53 | "\n", 54 | "Consider the expression\n", 55 | "\\begin{equation}\n", 56 | "f(x) = x \\mathrm{sin}(\\pi x)-\\mathrm{e}^{-x}.\n", 57 | "\\end{equation}\n", 58 | "\n", 59 | " 1. Write a function, $f$, to evaluate this expression for a given $x$. Show that it works by plotting the function with respect to $100$ evenly spaced points between $x=-3$ and $x=3$. [8 marks]\n", 60 | " 2. Write another function, $g$ that uses central finite differences to numerically evaluate the derivative of an arbitrary given function with respect to $x$ (you may assume $dx\\approx0.01$). Show that it works by plotting the derivative of the function defined above between $x=-3$ and $x=3$. [8 marks]\n", 61 | " 3. Write a function that implements Newton's method for root finding. It should take as input parameters a function, $f$, whose root is to be found, its derivative function, $g$, and an initial guess, $x0$. [9 marks]\n", 62 | " 4. Use your implementation of Newton's method to calculate all the roots of $f$ between $-3$ and $3$.[8 marks]" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "## Question 3\n", 70 | "\n", 71 | "The discrete function $y$ is defined as:\n", 72 | "\n", 73 | "y = [0.0109439010693,0.292339081461,0.925277149533,\n", 74 | " 2.01332091034,3.37454077451,4.68171817154,\n", 75 | " 5.64468727334,6.14851510939,6.28859790888,\n", 76 | " 6.2961542923,6.4,6.69882629738,7.11010256048,\n", 77 | " 7.42182227368,7.42089923772,7.03212055883,\n", 78 | " 6.39346348534,5.82192391325,5.67641305593,\n", 79 | " 6.17668765765,7.26466471676]\n", 80 | "\n", 81 | "for positions $x$:\n", 82 | "\n", 83 | "x = [-2.0,-1.8,-1.6,-1.4,-1.2,-1.0,-0.8,-0.6,-0.4,-0.2,\n", 84 | " 0.0,0.2,0.4,0.6,0.8,1.0,1.2,1.4,1.6,1.8,2.0]\n", 85 | "\n", 86 | " \n", 87 | " 1. Fit a polynomial to the data such that the polynomial passes through each point. Plot the polynomial fit with the original data.[11 marks]\n", 88 | " 2. Write a Python function that integrates a given function using Simpson's rule. The function should look like *simpson_rule(start_point, end_point, f, atol=1.0e-6)* where *start_point* and *end_point* are the bound of the integral, *f* is the function to be integrated and *atol* is the target error in the integral (you may approximate the error as the amount the integral changes when the number of segments is doubled).[11 marks]\n", 89 | " 3. Plot the value of the integral as a function of bin width.[11 marks]" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": { 96 | "collapsed": true 97 | }, 98 | "outputs": [], 99 | "source": [] 100 | } 101 | ], 102 | "metadata": { 103 | "kernelspec": { 104 | "display_name": "Python 3", 105 | "language": "python", 106 | "name": "python3" 107 | }, 108 | "language_info": { 109 | "codemirror_mode": { 110 | "name": "ipython", 111 | "version": 3 112 | }, 113 | "file_extension": ".py", 114 | "mimetype": "text/x-python", 115 | "name": "python", 116 | "nbconvert_exporter": "python", 117 | "pygments_lexer": "ipython3", 118 | "version": "3.6.2" 119 | } 120 | }, 121 | "nbformat": 4, 122 | "nbformat_minor": 1 123 | } 124 | -------------------------------------------------------------------------------- /notebook/exam_2016.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 3.09 Numerical methods 1 exam - 14:00-16:00 26 April 2016" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "# Test instructions\n", 15 | "\n", 16 | "* This test contains **5** questions each of which should be answered.\n", 17 | "* Write your program in a Python cell just under each question.\n", 18 | "* You can write an explanation of your solution as comments in your code.\n", 19 | "* In each case your solution program must fulfil all of the instructions. Please check the instructions carefully and double check that your program fulfils all of the given instructions.\n", 20 | "* Save your work regularly.\n", 21 | "* At the end of the test you should email your IPython notebook document (i.e. this document) to [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman) at **and also** [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott) at\n", 22 | " " 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## Question 1\n", 30 | "\n", 31 | "Given the data points\n", 32 | "\n", 33 | "| x | y |\n", 34 | "|-----|-----|\n", 35 | "| 0.1 | 0.3 |\n", 36 | "| 0.3 | 0.1 |\n", 37 | "| 1.0 | 0.2 |\n", 38 | "| 1.4 | 0.3 |\n", 39 | "| 2.8 | 0.7 |\n", 40 | "| 4.5 | 0.9 |\n", 41 | "\n", 42 | " 1. Calculate the Lagrange polynomial for these points. Plot both the Lagrange polynomial and the raw data points. [10 marks]\n", 43 | " 2. Fit a $3^{rd}$ order polynomial (cubic function) to fit a curve to the given data points. Plot the resulting polynomial function as well as the raw data points. [10 marks]" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "## Question 2\n", 51 | "\n", 52 | "For this exercise, consider the system of linear equations\n", 53 | "\n", 54 | "\\begin{align*}\n", 55 | " 2x + -3y + 4z &= 5 \\\\\n", 56 | " x - y + 5z &= 0 \\\\\n", 57 | " -x + 2y + 7z &= -2\n", 58 | "\\end{align*}\n", 59 | "\n", 60 | " 1. Rewriting the above equation in matrix form, $\\pmb{A} \\pmb{x} = \\pmb{b}$, show that $\\pmb{A}$ has an inverse and therefore the above system as a solution. [5 marks]\n", 61 | " 2. Write a function that takes a matrix $\\pmb{A}$ and a vector $\\pmb{b}$ and converts it into upper-triangular form. [5 marks]\n", 62 | " 3. Write a python function that solves for $\\pmb{x}$ given $\\pmb{A}$ and $\\pmb{b}$ as inputs. This python function should call the function you developed in the previous question, 2.2. [5 marks]\n", 63 | " 4. Use `numpy.dot(linalg.inv(A),b)` to verify your solution. [5 marks]" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "## Question 3\n", 71 | "\n", 72 | "Consider the function $f(x) = \\cos(x)$.\n", 73 | " 1. Write a function that computes the derivative $df/dx$ using central differencing. The interface to your function should look like *central_diff(f, x, h)* where $f$ is the function to be differentiated, $x$ is the position at which the derivative should be estimated, and $h (= \\Delta x)$ is the step size. [10 marks]\n", 74 | " 2. Use this function to compute the derivative at $x = 2.5$ for decreasing values of $\\Delta x$. Start with $\\Delta x=1.2$ and keep halving until the relative difference between solutions falls below $1.0^{-6}$ Plot the convergence of the method, i.e. plot $\\Delta x$ against the absolute difference between the analytical value and the finite difference approximation. [10 marks]" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "## Question 4\n", 82 | "\n", 83 | " 1. Write a Python function that integrates a given function using the trapezoidal rule. The function should look like *trapezoid_rule(start_point, end_point, f, atol=1.0e-6)* where *start_point* and *end_point* are the bound of the integral, *f* is the function to be integrated and *atol* is the target error in the integral (approximate the error as the absolute difference between successive estimates of the integral for increasing numbers of segments). [10 marks]\n", 84 | " 2. Use this function to evaluate the integral $\\int_0^\\pi \\sqrt{x}\\cos(x) dx$ to two decimal places only. How many subdivisions are needed to achieve this result? [10 marks]" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "## Question 5\n", 92 | "\n", 93 | "Consider the function:\n", 94 | "$$f(x) = \\dfrac{1}{(x − 0.3)^2 + 0.01} - \\dfrac{1}{(x − 0.8)^2 + 0.04}$$.\n", 95 | "\n", 96 | " 1. Write a general Python function that computes the root of a function using the bisection method. The function interface should look like *bisection(f, lower_bound, upper_bound, atol=1.0E-4, max_iterations=100)* where *f* is the function to whose root is sought, *lower_bound* and the *upper_bound* are two values that bracket the root, *atol* is the target absolute error in the solution and *max_iterations* is the maximum number of iterations to be executed. The function should return as a tuple the root and number of iterations required to achieve the specified tolerance. [5 marks]\n", 97 | " 2. Write a general Python function that computes the root of a function using the Newton-Raphson method. The function interface should look like *newton(fxn, x0, atol=1.0E-4, max_iterations=100)* and return as a tuple the root and number of iterations required to achieve the specified tolerance. For this you should evaluate numerically the first derivative of $f(x)$ using finite differencing. [5 marks]\n", 98 | " 3. Using both approaches, calculate the root of $f(x)$ and the number of iterations required in both cases. [5 marks]\n", 99 | " 4. Give an example where the Newton-Raphson method will fail in this case and explain why. [5 marks]" 100 | ] 101 | } 102 | ], 103 | "metadata": { 104 | "kernelspec": { 105 | "display_name": "Python 3", 106 | "language": "python", 107 | "name": "python3" 108 | }, 109 | "language_info": { 110 | "codemirror_mode": { 111 | "name": "ipython", 112 | "version": 3 113 | }, 114 | "file_extension": ".py", 115 | "mimetype": "text/x-python", 116 | "name": "python", 117 | "nbconvert_exporter": "python", 118 | "pygments_lexer": "ipython3", 119 | "version": "3.6.2" 120 | } 121 | }, 122 | "nbformat": 4, 123 | "nbformat_minor": 1 124 | } 125 | -------------------------------------------------------------------------------- /notebook/images/Width-Length.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/notebook/images/Width-Length.png -------------------------------------------------------------------------------- /notebook/images/central_diff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/notebook/images/central_diff.png -------------------------------------------------------------------------------- /notebook/images/circuit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/notebook/images/circuit.png -------------------------------------------------------------------------------- /notebook/images/circuit.tex: -------------------------------------------------------------------------------- 1 | \documentclass{article} 2 | \usepackage{tikz} 3 | \usepackage[european]{circuitikz} 4 | \begin{document} 5 | \thispagestyle{empty} 6 | \begin{circuitikz} 7 | \foreach \y in {0,-2,-4,-6,-8} 8 | \draw (2,\y) 9 | to[R, o-o] (4,\y) 10 | to[R, o-o] (6,\y) 11 | to[short] (6.5,\y) 12 | (7,\y) node {$\cdots$} 13 | (7.5,\y) to[short] (8,\y) 14 | to[R, o-o] (10,\y) 15 | ; 16 | 17 | \draw 18 | (2,0) node[anchor=south] {$V_0$} 19 | (4,0) node[anchor=south] {$V_1$} 20 | (6,0) node[anchor=south] {$V_2$} 21 | (8,0) node[anchor=south east] {$V_{m-2}$} 22 | (10,0) node[anchor=south] {$V_{m-1}$} 23 | 24 | (2,-2) node[anchor=south east] {$V_{m}$} 25 | (4,-2)node[anchor=south] {$V_{m+1}$} 26 | (6,-2) node[anchor=south] {$V_{m+2}$} 27 | (8,-2) node[anchor=south east] {$V_{2m-2}$} 28 | (10,-2) node[anchor=south west] {$V_{2m-1}$} 29 | 30 | (2,-4) node[anchor=south east] {$V_{2m}$} 31 | (4,-4) node[anchor=south] {$V_{2m+1}$} 32 | (6,-4) node[anchor=south] {$V_{2m+2}$} 33 | (8,-4) node[anchor=south east] {$V_{3m-2}$} 34 | (10,-4) node[anchor=south west] {$V_{3m-1}$} 35 | 36 | (2,-5.8) node[anchor=south] {$V_{(n-2)m}$} 37 | (4,-5.8) node[anchor=south] {$V_{(n-2)2m+1}$} 38 | (6,-5.8) node[anchor=south] {$V_{(n-2)m+2}$} 39 | (8,-5.8) node[anchor=south] {$V_{(n-1)m-2}$} 40 | (10,-5.8) node[anchor=south] {$V_{(n-1)m-1}$} 41 | 42 | (2,-8.2) node[anchor=north] {$V_{(n-1)m}$} 43 | (4,-8.2) node[anchor=north] {$V_{(n-1)m+1}$} 44 | (6,-8.2) node[anchor=north] {$V_{(n-1)m+2}$} 45 | (8,-8.2) node[anchor=north] {$V_{nm-2}$} 46 | (10,-8.2) node[anchor=north] {$V_{nm-1}$}; 47 | 48 | \foreach \x in {2,4,6,8,10} 49 | \draw 50 | (\x,0) 51 | to[R, o-o] (\x,-2) 52 | to[R, o-o] (\x,-4) 53 | to[short] (\x,-4.5) 54 | (\x,-5) node {$\vdots$} 55 | (\x,-5.5) to[short] (\x,-6) 56 | to[R, o-o] (\x,-8) 57 | ; 58 | 59 | \draw 60 | (2,0) to [short,out=180,in=180,o-] (2,-10) 61 | to [R, -] +(4,0) 62 | node [ground] {} 63 | to [battery1] +(2,0) 64 | to [R, -] +(2,0) 65 | to [short,out=0,in=0,-o] +(0,2) 66 | ; 67 | \end{circuitikz} 68 | \end{document} 69 | -------------------------------------------------------------------------------- /notebook/images/euler_vs_heun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/notebook/images/euler_vs_heun.png -------------------------------------------------------------------------------- /notebook/images/fd_cd_convergence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/notebook/images/fd_cd_convergence.png -------------------------------------------------------------------------------- /notebook/images/forward_diff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/notebook/images/forward_diff.png -------------------------------------------------------------------------------- /notebook/revision_exercises.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Numerical Methods 1\n", 8 | "### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman), [Matthew Piggott](http://www.imperial.ac.uk/people/m.d.piggott), [Nicolas Barral](http://www.imperial.ac.uk/people/n.barral)" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": { 14 | "collapsed": true 15 | }, 16 | "source": [ 17 | "# Revision exercises" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "## Question 1\n", 25 | "\n", 26 | "Below is a extract of shot data from a seismic survey.\n", 27 | "\n", 28 | "| Time (ms) | Shot |\n", 29 | "|-----|-----|\n", 30 | "| 0 | -0.021373 |\n", 31 | "| 4 | -0.024578 |\n", 32 | "| 8 | -0.023914 |\n", 33 | "| 12 | -0.018227 |\n", 34 | "| 16 | -0.00781 |\n", 35 | "| 20 | 0.005602 |\n", 36 | "| 24 | 0.019264 |\n", 37 | "| 28 | 0.030235 |\n", 38 | "| 32 | 0.036059 |\n", 39 | "| 36 | 0.035334 |\n", 40 | "\n", 41 | " 1. Calculate the Lagrange polynomial for these points. Plot both the Lagrange polynomial and the raw data points.\n", 42 | " 2. The full shot is available in the file [shot.txt](data/shot.txt) (in the data folder) - where the sample interval is 4ms as above. Note that the file only contains one column as you can calculate the time column yourself. Use cubic-polynomial splines to re-interpolate the data for a sample interval of 7.07ms. Plot both the original shot data and the interpolated time series." 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "## Question 2\n", 50 | "\n", 51 | "1. Calculate the upper triangular form for the matrix:\n", 52 | "\\begin{align*}\n", 53 | "A = \\begin{bmatrix}\n", 54 | "−5 & 3 & 4\\\\\n", 55 | "10 & −8 & −9\\\\\n", 56 | "15 & 1 & 2\n", 57 | "\\end{bmatrix}\n", 58 | "\\end{align*}\n", 59 | "\n", 60 | "2. Consider the matrix:\n", 61 | "\\begin{align*}\n", 62 | "C = \\begin{bmatrix}\n", 63 | "−5 & 3 & 4\\\\\n", 64 | "10 & −8 & −9\\\\\n", 65 | "15 & 1 & 2\n", 66 | "\\end{bmatrix}\n", 67 | "\\end{align*}\n", 68 | "Does matrix $C$ have an inverse? If not, then why not? " 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "## Question 3\n", 76 | "\n", 77 | "Consider the function\n", 78 | "$$f(x) = \\dfrac{1}{(x − 0.3)^2 + 0.01} - \\dfrac{1}{(x − 0.8)^2 + 0.04}$$.\n", 79 | "\n", 80 | "\n", 81 | " 1. Write a function that computes the second derivative of $f(x)$ using central differencing. The interface to your function should look like *central_diff2(f, x, h)* where $f$ is the function to be differentiated, $x$ is the position at which the derivative should be estimated, and $h (= \\Delta x)$ is the step size.\n", 82 | " 2. Use this function to compute the derivative at $x = 0.5$ for decreasing values of $\\Delta x$. Start with $\\Delta x=1.2$ and keep halving until the relative difference between solutions falls below $1.0^{-6}$ Plot the convergence of the method, i.e. plot $\\Delta x$ against the absolute difference between the analytical value and the finite difference approximation." 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## Question 4\n", 90 | "\n", 91 | "Consider the integral:\n", 92 | "$$\\int_0^{2\\pi} x^2\\cos(x) dx.$$\n", 93 | "\n", 94 | "Show how the absolute error between the exact solution and the numerical solution varies with the size of the integration step for the four numerical integration methods: trapezoid rule; Simpson's rule; composite Simpson's rule; and Weddle's rule. Show your result by plotting error against integration step, $dx$." 95 | ] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": "Python 3", 101 | "language": "python", 102 | "name": "python3" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 3 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython3", 114 | "version": "3.6.2" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 1 119 | } 120 | -------------------------------------------------------------------------------- /past_exams/assessed-coursework-2016.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 3.09 Numerical methods 1 - coursework assessment 10:00-16:00 1 June 2016" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "# Instructions\n", 15 | "\n", 16 | "* This test contains **4** questions each of which should be answered.\n", 17 | "* Write your program in a Python cell just under each question.\n", 18 | "* You should write an explanation of your solution as comments in your code.\n", 19 | "* In each case your solution program must fulfil all of the instructions. Please check the instructions carefully and double check that your program fulfils all of the given instructions.\n", 20 | "* Save your work regularly.\n", 21 | "* Before 16:00 you should submit your IPython notebook on **ESESIS**." 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "## Question 1\n", 29 | "\n", 30 | "Below is a extract of shot data from a seismic survey.\n", 31 | "\n", 32 | "| Time (ms) | Shot |\n", 33 | "|-----|-----|\n", 34 | "| 0 | -0.021373 |\n", 35 | "| 4 | -0.024578 |\n", 36 | "| 8 | -0.023914 |\n", 37 | "| 12 | -0.018227 |\n", 38 | "| 16 | -0.00781 |\n", 39 | "| 20 | 0.005602 |\n", 40 | "| 24 | 0.019264 |\n", 41 | "| 28 | 0.030235 |\n", 42 | "| 32 | 0.036059 |\n", 43 | "| 36 | 0.035334 |\n", 44 | "\n", 45 | " 1. Calculate the Lagrange polynomial for these points. Plot both the Lagrange polynomial and the raw data points.\n", 46 | " 2. The full shot is available in the file [shot.txt](https://raw.githubusercontent.com/ggorman/Numerical-methods-1/master/notebook/data/shot.txt) - where the sample interval is 4ms as above. Note that the file only contains one column as you can calculate the time column yourself. Use cubic-polynomial splines to re-interpolate the data for a sample interval of 7.07ms. Plot both the original shot data and the interpolated time series." 47 | ] 48 | }, 49 | { 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "## Question 2\n", 54 | "\n", 55 | "1. Calculate the upper triangular form for the matrix:\n", 56 | "\\begin{align*}\n", 57 | "A = \\begin{bmatrix}\n", 58 | "−5 & 3 & 4\\\\\n", 59 | "10 & −8 & −9\\\\\n", 60 | "15 & 1 & 2\n", 61 | "\\end{bmatrix}\n", 62 | "\\end{align*}\n", 63 | "\n", 64 | "2. Consider the matrix:\n", 65 | "\\begin{align*}\n", 66 | "C = \\begin{bmatrix}\n", 67 | "−5 & 3 & 4\\\\\n", 68 | "10 & −8 & −9\\\\\n", 69 | "15 & 1 & 2\n", 70 | "\\end{bmatrix}\n", 71 | "\\end{align*}\n", 72 | "Does matrix $C$ have an inverse? If not, then why not? " 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "## Question 3\n", 80 | "\n", 81 | "Consider the function\n", 82 | "$$f(x) = \\dfrac{1}{(x − 0.3)^2 + 0.01} - \\dfrac{1}{(x − 0.8)^2 + 0.04}$$.\n", 83 | "\n", 84 | "\n", 85 | " 1. Write a function that computes the second derivative of $f(x)$ using central differencing. The interface to your function should look like *central_diff2(f, x, h)* where $f$ is the function to be differentiated, $x$ is the position at which the derivative should be estimated, and $h (= \\Delta x)$ is the step size.\n", 86 | " 2. Use this function to compute the derivative at $x = 0.5$ for decreasing values of $\\Delta x$. Start with $\\Delta x=1.2$ and keep halving until the relative difference between solutions falls below $1.0^{-6}$ Plot the convergence of the method, i.e. plot $\\Delta x$ against the absolute difference between the analytical value and the finite difference approximation." 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "metadata": {}, 92 | "source": [ 93 | "## Question 4\n", 94 | "\n", 95 | "Consider the integral:\n", 96 | "$$\\int_0^\\pi \\sqrt{x}\\cos(x) dx.$$\n", 97 | "\n", 98 | "Show how the absolute error between the exact solution and the numerical solution varies with the size of the integration step for the four numerical integration methods: trapezoid rule; Simpson's rule; composite Simpson's rule; and Weddle's rule. Show your result by plotting error against integration step, $dx$." 99 | ] 100 | } 101 | ], 102 | "metadata": { 103 | "kernelspec": { 104 | "display_name": "Python 2", 105 | "language": "python", 106 | "name": "python2" 107 | }, 108 | "language_info": { 109 | "codemirror_mode": { 110 | "name": "ipython", 111 | "version": 2 112 | }, 113 | "file_extension": ".py", 114 | "mimetype": "text/x-python", 115 | "name": "python", 116 | "nbconvert_exporter": "python", 117 | "pygments_lexer": "ipython2", 118 | "version": "2.7.12" 119 | } 120 | }, 121 | "nbformat": 4, 122 | "nbformat_minor": 0 123 | } 124 | -------------------------------------------------------------------------------- /past_exams/exam-2.18-2012.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/past_exams/exam-2.18-2012.pdf -------------------------------------------------------------------------------- /past_exams/exam-3.08-2012.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ggorman/Numerical-methods-1/72ae6dc6877e4a2b7ecfe9059d42e5caac4eda45/past_exams/exam-3.08-2012.pdf --------------------------------------------------------------------------------