├── cover.jpg ├── Programs.pdf ├── Matlab_code ├── randomTridiag.m ├── HagersAlg.m ├── evaluate_polynomial_by_Horners_rule.m ├── BackSub.m ├── testDC.m ├── testRayleigh.m ├── Negcount.m ├── testSVDJacobi.m ├── ForwSub.m ├── fihatt.m ├── LLSSVD.m ├── TestHagersCondAlg.m ├── DiscretePoisson2D.m ├── RayleighQuotient.m ├── newtonIR.m ├── testClassicalJacobi.m ├── Cholesky.m ├── fbell.m ├── LLSQR.m ├── LU_PP.m ├── LU_factor.m ├── CGS.m ├── ChangeRowInWorklist.m ├── LLSChol.m ├── Bisection.m ├── RunJacobi.m ├── FitFunctionQRCGS.m ├── PowerM.m ├── Hornersrule_5roots.m ├── FitFunctionNormaleq.m ├── InverseIteration.m ├── RunSVDJacobi.m ├── Hornersrule_10roots.m ├── Poisson2D_Chol.m ├── MethodQR_shift.m ├── MethodQR_iter.m ├── MainBellspline.m ├── HessenbergQR.m ├── MethodOrtIter.m ├── MethodQR_Wshift.m ├── Hornersrule_bisection_classic.m ├── Poisson2D_LU.m ├── MainHatFit.m ├── Poisson2D_ConjugateGrad.m ├── Poisson2D_Gauss_Seidel.m ├── Poisson2D_PrecConjugateGrad.m ├── DivideandConq.m ├── Poisson2D_Gauss_SeidelRedBlack.m ├── Poisson2D_Jacobi.m └── Poisson2D_SOR.m ├── PETSc_code └── PoissonIterative │ ├── Jacobi.cpp │ ├── Jacobi.cpp~ │ ├── SOR.cpp │ ├── SOR.cpp~ │ ├── Makefile │ ├── GaussSeidel.cpp │ ├── GaussSeidel.cpp~ │ ├── Create.cpp │ ├── Create.cpp~ │ ├── include │ └── Poisson.h │ ├── Solver.cpp~ │ ├── Solver.cpp │ ├── CG.cpp │ ├── CG.cpp~ │ ├── DiscretePoisson2D.cpp │ ├── DiscretePoisson2D.cpp~ │ ├── Main.cpp │ ├── Main.cpp~ │ ├── PCG.cpp │ └── PCG.cpp~ ├── README.md └── README_programs.txt /cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/springer-math/Numerical_Linear_Algebra_Theory_and_Applications/HEAD/cover.jpg -------------------------------------------------------------------------------- /Programs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/springer-math/Numerical_Linear_Algebra_Theory_and_Applications/HEAD/Programs.pdf -------------------------------------------------------------------------------- /Matlab_code/randomTridiag.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % generation of the random tridiagonal symmetric matrix 3 | % ---------------------------------------- 4 | 5 | function [A] = randomTridiag(n) 6 | 7 | A=zeros(n); 8 | 9 | for i=2:n 10 | num = rand*30; 11 | A(i,i)=rand*20; 12 | A(i,i-1)=num; 13 | A(i-1,i)=num; 14 | end 15 | A(1,1)=22*rand; 16 | end 17 | 18 | % ---------------------------------------- 19 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Jacobi.cpp: -------------------------------------------------------------------------------- 1 | 2 | 3 | /* Jacobi's method */ 4 | 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | /** 13 | * Returns the preconditioner used for Jacobi's method 14 | */ 15 | PetscErrorCode Jacobi(PC preconditioner) { 16 | PetscErrorCode ierr; 17 | ierr = PCSetType(preconditioner, PCJACOBI); CHKERRQ(ierr); 18 | 19 | return 0; 20 | } 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Springer Source Code 2 | 3 | This repository accompanies [*Numerical Linear Algebra Theory*](http://www.springer.com/us/book/9783319573021) by Larisa Beilina, Evgenii Karchevskii, and Mikhail Karchevskii (Springer, 2017). 4 | 5 | ![Cover image](cover.jpg) 6 | 7 | Download the files as a zip using the green button, or clone the repository to your machine using Git. 8 | 9 | ## Releases 10 | 11 | Release v1.0 corresponds to the code on SpringerLink, without corrections or updates. 12 | 13 | -------------------------------------------------------------------------------- /Matlab_code/HagersAlg.m: -------------------------------------------------------------------------------- 1 | 2 | % ---------------------------------------- 3 | % Run Hager's algorithm. 4 | % ---------------------------------------- 5 | 6 | function [LowerBound] = HagersAlg(B) 7 | 8 | x=(1/length(B))*ones(length(B),1); 9 | 10 | iter=1; 11 | while iter < 1000 12 | w=B*x; xi=sign(w); z = B'*xi; 13 | if max(abs(z)) <= z'*x 14 | break 15 | else 16 | x= (max(abs(z))== abs(z)); 17 | end 18 | iter = iter + 1; 19 | end 20 | LowerBound = norm(w,1); 21 | end 22 | 23 | -------------------------------------------------------------------------------- /Matlab_code/evaluate_polynomial_by_Horners_rule.m: -------------------------------------------------------------------------------- 1 | function [P,bp] = evaluate_polynomial_by_Horners_rule(a,x,eps) 2 | % Parameters: a contains the coeficients of the plynomial p(x) 3 | % P is the value of p(x) at x. 4 | % eps is the mechine epsilon 5 | d = numel(a); 6 | 7 | P = a(d); 8 | bp = abs(a(d)); 9 | 10 | for i = d - 1:(-1):1 11 | 12 | P = x*P + a(i); 13 | bp = abs(x)*bp + abs(a(i)); 14 | 15 | end 16 | 17 | %error bound 18 | bp = 2*(d - 1)*eps*bp; 19 | 20 | end 21 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Jacobi.cpp~: -------------------------------------------------------------------------------- 1 | \begin{lstlisting} 2 | 3 | /*Program for using Jacobi's method */ 4 | 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | /** 13 | * Returns the preconditioner used for Jacobi's method 14 | */ 15 | PetscErrorCode Jacobi(PC preconditioner) { 16 | PetscErrorCode ierr; 17 | ierr = PCSetType(preconditioner, PCJACOBI); CHKERRQ(ierr); 18 | 19 | return 0; 20 | } 21 | \end{lstlisting} 22 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/SOR.cpp: -------------------------------------------------------------------------------- 1 | 2 | /* Program for computing SOR */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "Poisson.h" 10 | 11 | const PetscScalar omega = 1.5; 12 | 13 | PetscErrorCode SOR(PC preconditioner) { 14 | PetscErrorCode ierr; 15 | 16 | ierr = PCSetType(preconditioner, PCSOR); CHKERRQ(ierr); 17 | ierr = PCSORSetOmega(preconditioner, omega); CHKERRQ(ierr); 18 | 19 | return 0; 20 | } 21 | -------------------------------------------------------------------------------- /Matlab_code/BackSub.m: -------------------------------------------------------------------------------- 1 | function x=BackSub(U,b) 2 | % This function computes the vector $x$ by backward substitution. 3 | % We solve $Ux=b$, where $U$ is an $n \times n$ nonsingular upper triangular matrix 4 | % and $b$ is a known vector of the length $n$, finding the vector $x$. 5 | 6 | %% Compute x by backward substitution. 7 | s=size(U); 8 | n=s(1); 9 | x=zeros(n,1); 10 | % $U(i,i)*x(i) = b(i) - \sum_{j=i+1}^{n}$ 11 | x(n)=b(n)/U(n,n); 12 | for i=n-1:-1:1 13 | x(i)=(b(i)-U(i,(i+1):n)*x((i+1):n))/U(i,i); 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /Matlab_code/testDC.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Program which generates predefined random tridiagonal matrices A of dim(A)=n 3 | % and then calls the function DivideandConq.m 4 | % ---------------------------------------- 5 | 6 | %Program which generates some random symmetric tridiagonal matrices 7 | 8 | n=5; 9 | A=zeros(n); 10 | 11 | for i=2:n 12 | tal = rand*30; 13 | A(i,i)=rand*20; 14 | A(i,i-1)=tal; 15 | A(i-1,i)=tal; 16 | end 17 | A(1,1)=22*rand; 18 | 19 | %run Divide-and-Conquer algorithm 20 | [Q,L]=DivideandConq(A) 21 | 22 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/SOR.cpp~: -------------------------------------------------------------------------------- 1 | \begin{lstlisting} 2 | 3 | /* Program implementing SOR */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "Poisson.h" 11 | 12 | const PetscScalar omega = 1.5; 13 | 14 | PetscErrorCode SOR(PC preconditioner) { 15 | PetscErrorCode ierr; 16 | 17 | ierr = PCSetType(preconditioner, PCSOR); CHKERRQ(ierr); 18 | ierr = PCSORSetOmega(preconditioner, omega); CHKERRQ(ierr); 19 | 20 | return 0; 21 | } 22 | 23 | \end{lstlisting} 24 | -------------------------------------------------------------------------------- /Matlab_code/testRayleigh.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Program which generates predefined random tridiagonal matrices A of dim(A)=n 3 | % and then calls the function RayleighQuotient.m 4 | % ---------------------------------------- 5 | 6 | n=10; 7 | A=zeros(n); 8 | 9 | for i=2:n 10 | tal = rand*30; 11 | A(i,i)=rand*20; 12 | A(i,i-1)=tal; 13 | A(i-1,i)=tal; 14 | end 15 | A(1,1)=22*rand; 16 | 17 | %run algorithm of Rayleigh Quotient Iteration 18 | 19 | [rq]=RayleighQuotient(A); 20 | 21 | disp('Computed Rayleigh Quotient is:') 22 | disp(rq) 23 | 24 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile 2 | 3 | PETSC_ARCH=/chalmers/sw/sup64/petsc-3.7.4 4 | 5 | include ${PETSC_ARCH}/lib/petsc/conf/variables 6 | include ${PETSC_ARCH}/lib/petsc/conf/rules 7 | 8 | CXX=g++ 9 | CXXFLAGS=-Wall -Wextra -g -O0 -c -Iinclude -I${PETSC_ARCH}/include 10 | LD=g++ 11 | LFLAGS= 12 | 13 | OBJECTS=Main.o CG.o Create.o DiscretePoisson2D.o GaussSeidel.o Jacobi.o PCG.o Solver.o SOR.o 14 | Run=Main 15 | 16 | all: $(Run) 17 | 18 | %.o: %.cpp 19 | $(CXX) $(CXXFLAGS) -o $@ $< 20 | 21 | $(Run): $(OBJECTS) 22 | $(LD) $(LFLAGS) $(OBJECTS) $(PETSC_LIB) -o $@ 23 | -------------------------------------------------------------------------------- /Matlab_code/Negcount.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | %Compute number of eigenvalues of a tridiagonal matrix A 3 | %(without pivoting) which are less then z 4 | % ---------------------------------------- 5 | 6 | function [ neg ] = Negcount( A,z ) 7 | 8 | d=zeros(length(A),1); 9 | d(1)=A(1,1)-z; 10 | for i = 2:length(A) 11 | d(i)=(A(i,i)-z)-(A(i,i-1)^2)/d(i-1); 12 | end 13 | 14 | %compute number of negative eigenvalues of A 15 | neg=0; 16 | for i = 1:length(A) 17 | if d(i)<0 18 | neg = neg+1; 19 | end 20 | end 21 | 22 | end 23 | 24 | -------------------------------------------------------------------------------- /Matlab_code/testSVDJacobi.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Program which generates predefined random tridiagonal matrices A 3 | % and the calls the function RunSVDJacobi.m 4 | % ---------------------------------------- 5 | 6 | n=5; 7 | A=zeros(n); 8 | 9 | for i=2:n 10 | tal = rand*30; 11 | A(i,i)=rand*20; 12 | A(i,i-1)=tal; 13 | A(i-1,i)=tal; 14 | end 15 | A(1,1)=22*rand; 16 | 17 | Ainit=A 18 | 19 | disp('computed by one-sided Jacobi algorithm SVD decomposition:'); 20 | [U,S,V]= RunSVDJacobi(Ainit) 21 | 22 | disp('computed SVD decomposition using svd command (for comparison):'); 23 | [u,sigma,v]=svd(Ainit) 24 | 25 | -------------------------------------------------------------------------------- /Matlab_code/ForwSub.m: -------------------------------------------------------------------------------- 1 | function x=ForwSub(L,b) 2 | % This function computes the vector $x$, of length $n$, 3 | % given $Lx=b$ where $L$ is an $n \times n$ nonsingular lower triangular matrix 4 | % and $b$ is a known vector of length $n$, by using forward substitution. 5 | 6 | %% Compute $x$ by forward substitution. 7 | s=size(L); 8 | n=s(1); 9 | x=zeros(n,1); 10 | % $L(i,i)*x(i)=b(i) - \sum_{j=1}^{i-1}$ 11 | % First, set $x(i)=b(i)$, then subtract the known values. 12 | % Lastly, divide by diagonal entry $L(i,i)$ 13 | x(1)=b(1)/L(1,1); 14 | for i=2:n 15 | x(i)=(b(i)-L(i,1:(i-1))*x(1:(i-1)))/L(i,i); 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /Matlab_code/fihatt.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Construction of columns in matrix A using linear splines. 3 | % Input arguments: T - column vector with junction points, 4 | % x are measurement ponts (discretization points). 5 | % Returns column with number k to the matrix A. 6 | % ---------------------------------------- 7 | 8 | function f=fihatt(k,x,T) 9 | 10 | h=diff(T); 11 | N=length(T); 12 | f=zeros(size(x)); 13 | if k>1 14 | I=find(x>=T(k-1) & x<=T(k)); 15 | f(I)=(x(I)-T(k-1))/h(k-1); 16 | end 17 | if k=T(k) & x<=T(k+1)); 19 | f(I)=(T(k+1)-x(I))/h(k); 20 | end 21 | 22 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/GaussSeidel.cpp: -------------------------------------------------------------------------------- 1 | 2 | 3 | /* Gauss-Seidel method */ 4 | 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "Poisson.h" 12 | 13 | PetscErrorCode GaussSeidel(PC preconditioner) { 14 | PetscErrorCode ierr; 15 | ierr = PCSetType(preconditioner, PCSOR); CHKERRQ(ierr); 16 | 17 | /** 18 | * To use the Gauss-Seidel method we set 19 | * omega = 1. 20 | */ 21 | // By default, omega = 1, so the below line is not necessary 22 | //ierr = PCSORSetOmega(preconditioner, 1.0); CHKERRQ(ierr); 23 | 24 | return 0; 25 | } 26 | 27 | -------------------------------------------------------------------------------- /Matlab_code/LLSSVD.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Solution of the system of linear equations Ax = b via 3 | % SVD decomposition of a matrix A. 4 | % SVD decomposition is done via matlab function svd. 5 | % Matrix A is m-by-n, m > n, the vector of the rhs b is of the size n. 6 | % ---------------------------------------- 7 | 8 | function x=LLSSVD(A,b) 9 | 10 | [U, S, V]=svd(A); 11 | 12 | UTb=U'*b; 13 | 14 | % choose tolerance 15 | tol=max(size(A))*eps(S(1,1)); 16 | s=diag(S); 17 | n=length(A(1,:)); 18 | 19 | % compute number of singular values > tol 20 | r=sum(s > tol); 21 | 22 | w=[(UTb(1:r)./s(1:r))' zeros(1,n-r)]'; 23 | 24 | x=V*w; 25 | -------------------------------------------------------------------------------- /Matlab_code/TestHagersCondAlg.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Hager's algorithm: for the input matrix A 3 | % the function HagerCond(A) computes 4 | % the lower bound of the one-norm of the matrix A. 5 | % ---------------------------------------- 6 | 7 | % First we generate some random symmetric matrices 8 | 9 | n=5; 10 | A=zeros(n); 11 | 12 | for i=1:n 13 | for j=1:n 14 | tal = rand*30; 15 | A(i,i)=rand*20; 16 | A(i,j)=tal; 17 | A(j,i)=tal; 18 | end 19 | end 20 | disp(' The input matrix A is:'); 21 | 22 | A 23 | 24 | disp(' The computed lower bound of ||A||_1 is:'); 25 | HagersEst = HagersAlg(A) 26 | 27 | disp(' result of norm(A,1) is:'); 28 | norm(A,1) 29 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/GaussSeidel.cpp~: -------------------------------------------------------------------------------- 1 | \begin{lstlisting} 2 | 3 | /*Program for using Gauss-Seidel method */ 4 | 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "Poisson.h" 12 | 13 | PetscErrorCode GaussSeidel(PC preconditioner) { 14 | PetscErrorCode ierr; 15 | ierr = PCSetType(preconditioner, PCSOR); CHKERRQ(ierr); 16 | 17 | /** 18 | * To use the Gauss-Seidel method we set 19 | * omega = 1. 20 | */ 21 | // By default, omega = 1, so the below line is not necessary 22 | //ierr = PCSORSetOmega(preconditioner, 1.0); CHKERRQ(ierr); 23 | 24 | return 0; 25 | } 26 | \end{lstlisting} 27 | -------------------------------------------------------------------------------- /Matlab_code/DiscretePoisson2D.m: -------------------------------------------------------------------------------- 1 | function A=DiscretePoisson2D(n) 2 | % The function for 2D discretization of the Laplace operator with sign minus: - laplace 3 | % Input parameters: 4 | % n - number of inner nodes, which is assumed to be the same in both 5 | % the x_1- and x_2 directions. 6 | 7 | A = zeros(n*n,n*n); 8 | 9 | % Main diagonal 10 | for i=1:n*n 11 | A(i,i)=4; 12 | end 13 | 14 | % 1st and 2nd off-diagonals 15 | for k=1:n % go through block 1 to n 16 | for i=1:(n-1) 17 | A(n*(k-1)+i,n*(k-1)+i+1)=-1; % 18 | A(n*(k-1)+i+1,n*(k-1)+i)=-1; 19 | end 20 | end 21 | 22 | % 3rd and 4th off-diagonals 23 | for i=1:n*(n-1) 24 | A(i,i+n)=-1; 25 | A(i+n,i)=-1; 26 | end 27 | 28 | end 29 | -------------------------------------------------------------------------------- /Matlab_code/RayleighQuotient.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Computes value of Rayleigh Quotient rq which is in the tolerance 3 | % tol from an eigenvalue of A 4 | % ---------------------------------------- 5 | 6 | function rq = RayleighQuotient(A) 7 | 8 | [n,~]=size(A); 9 | x0=zeros(n,1); 10 | 11 | % initialize initial vector x0 which has norm 1 12 | x0(n)=1; 13 | 14 | tol = 1e-10; 15 | xi = x0/norm(x0,2); 16 | 17 | i=0; 18 | % initialize Rayleigh Quotient for x0 19 | rq = (xi'*A*xi)/(xi'*xi); 20 | 21 | while norm((A*xi-rq*xi),2) > tol 22 | yi = (A-rq*eye(size(A)))\xi; 23 | xi=yi/norm(yi,2); 24 | rq = (xi'*A*xi)/(xi'*xi) 25 | i=i+1; 26 | end 27 | 28 | end 29 | 30 | % ---------------------------------------- 31 | -------------------------------------------------------------------------------- /Matlab_code/newtonIR.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Iterative refinement using Newton's method. 3 | % Matrix A is m-by-n, m > n, the vector of the rhs b is of the size n. 4 | % ---------------------------------------- 5 | 6 | function w=newtonIR(A,x,b,tol) 7 | 8 | relative_error=1; 9 | iter = 0; 10 | 11 | while relative_error > tol 12 | 13 | %compute residual 14 | r = A*x-b; 15 | d=A\r; 16 | x=x-d; 17 | iter = iter+1 18 | relative_error = norm(A*x - b)/norm(b) 19 | 20 | % here we introduce the maximal number of iterations 21 | % in Newton's method: if the relative error 22 | % is not rediced - we terminate computations 23 | 24 | if iter > 100 25 | break 26 | end 27 | end 28 | w=x; 29 | -------------------------------------------------------------------------------- /Matlab_code/testClassicalJacobi.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Program which generates predefined random tridiagonal matrices A 3 | % and the calls the function RunJacobi.m 4 | % ---------------------------------------- 5 | n=5; 6 | A=zeros(n); 7 | 8 | for i=2:n 9 | tal = rand*30; 10 | A(i,i)=rand*20; 11 | A(i,i-1)=tal; 12 | A(i-1,i)=tal; 13 | end 14 | A(1,1)=22*rand; 15 | 16 | % initialization of matrix 17 | %A=rand(5,5)*10; 18 | 19 | Ainit=A 20 | %Ainit =A*A' 21 | 22 | % run classical Jacobi algorithm 23 | A= RunJacobi(Ainit) 24 | 25 | %Print out computed by Jacobi algorithm eigenvalues 26 | disp('computed by Jacobi algorithm eigenvalues:'); 27 | eig(A) 28 | 29 | % Print out eigenvalues of the initial matrix A using eig(Ainit) 30 | disp('eigenvalues of the initial matrix Ainit using eig(Ainit):'); 31 | eig(Ainit) 32 | 33 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Create.cpp: -------------------------------------------------------------------------------- 1 | 2 | /* Program to create matrix and vector in PETSc. */ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | PetscErrorCode CreateMatrix(Mat *A, PetscInt rows, PetscInt cols) { 9 | PetscErrorCode ierr; 10 | ierr = MatCreate(PETSC_COMM_WORLD, A); CHKERRQ(ierr); 11 | ierr = MatSetSizes(*A, PETSC_DECIDE, PETSC_DECIDE, rows, cols); CHKERRQ(ierr); 12 | ierr = MatSetFromOptions(*A); CHKERRQ(ierr); 13 | ierr = MatSetUp(*A); CHKERRQ(ierr); 14 | 15 | return 0; 16 | } 17 | 18 | PetscErrorCode CreateVector(Vec *v, PetscInt N) { 19 | PetscErrorCode ierr; 20 | 21 | ierr = VecCreate(PETSC_COMM_WORLD, v); CHKERRQ(ierr); 22 | ierr = VecSetSizes(*v, PETSC_DECIDE, N); CHKERRQ(ierr); 23 | ierr = VecSetFromOptions(*v); CHKERRQ(ierr); 24 | 25 | return 0; 26 | } 27 | 28 | 29 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Create.cpp~: -------------------------------------------------------------------------------- 1 | 2 | \begin{lstlisting} 3 | 4 | /* Program to create matrix and vector in PETSc. */ 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | PetscErrorCode CreateMatrix(Mat *A, PetscInt rows, PetscInt cols) { 11 | PetscErrorCode ierr; 12 | ierr = MatCreate(PETSC_COMM_WORLD, A); CHKERRQ(ierr); 13 | ierr = MatSetSizes(*A, PETSC_DECIDE, PETSC_DECIDE, rows, cols); CHKERRQ(ierr); 14 | ierr = MatSetFromOptions(*A); CHKERRQ(ierr); 15 | ierr = MatSetUp(*A); CHKERRQ(ierr); 16 | 17 | return 0; 18 | } 19 | 20 | PetscErrorCode CreateVector(Vec *v, PetscInt N) { 21 | PetscErrorCode ierr; 22 | 23 | ierr = VecCreate(PETSC_COMM_WORLD, v); CHKERRQ(ierr); 24 | ierr = VecSetSizes(*v, PETSC_DECIDE, N); CHKERRQ(ierr); 25 | ierr = VecSetFromOptions(*v); CHKERRQ(ierr); 26 | 27 | return 0; 28 | } 29 | 30 | 31 | 32 | \end{lstlisting} 33 | -------------------------------------------------------------------------------- /Matlab_code/Cholesky.m: -------------------------------------------------------------------------------- 1 | function L=Cholesky(A) 2 | % Function factorizes square matrix A, assuming that A is s.p.d. matrix, 3 | % into A=LL', where L' is the transpose 4 | % of L, and L is non-singular lower triangular matrix. 5 | 6 | %% 7 | s=size(A); 8 | n=s(1); 9 | L=zeros(n); 10 | 11 | % diagonal elements i=j 12 | % a_jj=v_j*v_j'=l_j1^2+l_j2^2+...+l_jj^2 (sum has j-terms) 13 | 14 | % elements below diagonal, i>j 15 | % a_ij=v_i*v_j'=l_i1 l_j1 + l_i2 l_j2 + ... + l_ij l_jj (sum has j terms) 16 | 17 | for j=1:n % go through column 1 to n 18 | % Compute diagonal elements, i=j 19 | L(j,j)=A(j,j); 20 | for k=1:(j-1) 21 | L(j,j)=L(j,j)-L(j,k)^2; 22 | end 23 | L(j,j)=L(j,j)^(1/2); 24 | % Compute elements below diagonal, i>j 25 | for i=(j+1):n 26 | L(i,j)=A(i,j); 27 | for k=1:(j-1) 28 | L(i,j)=L(i,j)-L(i,k)*L(j,k); 29 | end 30 | L(i,j)=L(i,j)/L(j,j); 31 | end 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /Matlab_code/fbell.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Matrix B is constructed using bellsplines. 3 | % Input arguments: T - column vector with junction points, 4 | % x are measurement ponts (discretization points). 5 | % ---------------------------------------- 6 | 7 | function B=fbell(x,T) 8 | 9 | m=length(x); 10 | N=length(T); 11 | epsi=1e-14; 12 | 13 | %construct N+6 column vector 14 | a=[T(1)*[1 1 1]'; T; T(N)*(1+epsi)*[1 1 1]']; 15 | n=N+5; 16 | C=zeros(m,n); 17 | for k=1:n 18 | I=find(x>=a(k) & x n, the vector of the rhs b is of the size n. 5 | % QR decomposition of A is done via classical 6 | % Gram-Schmidt (CGM) orthogonalization procedure. 7 | % ---------------------------------------- 8 | 9 | function x=LLSQR(A,b) 10 | 11 | n=length(A(1,:)); 12 | q=[]; 13 | r=[]; 14 | 15 | for i=1:1:n 16 | q(:,i)=A(:,i); 17 | for j=1:1:i-1 18 | r(j,i)=q(:,j)'*A(:,i); 19 | q(:,i)=q(:,i)-r(j,i)*q(:,j); 20 | end 21 | r(i,i)=norm(q(:,i)); 22 | q(:,i)=q(:,i)/r(i,i); 23 | end 24 | 25 | % compute right hand side in the equation 26 | Rx=q'*b; 27 | 28 | % compute solution via backward substitution 29 | for i=n:-1:1 30 | for k=n:-1:i+1 31 | Rx(i)=Rx(i)-Rx(k)*r(i,k); 32 | end 33 | Rx(i)=Rx(i)/r(i,i); 34 | end 35 | 36 | x = Rx; 37 | -------------------------------------------------------------------------------- /Matlab_code/LU_PP.m: -------------------------------------------------------------------------------- 1 | function [L,U,P]=LU_PP(A) 2 | % LU factorization with partial pivoting 3 | % This function calculates the permutation matrix $P$, 4 | % the unit lower triangular matrix $L$, 5 | % and the nonsingular upper triangular matrix $U$ 6 | % such that $LU=PA$ for a given nonsingular $A$. 7 | 8 | [n,n]=size(A); 9 | P=eye(n); L=eye(n); U=A; 10 | for i=1:n-1 11 | [pivot m]=max(abs(U(i:n,i))); 12 | m=m+i-1; 13 | if m ~= i 14 | % swap rows $m$ and $i$ in $P$ 15 | temp=P(i,:); 16 | P(i,:)=P(m,:); 17 | P(m,:)=temp; 18 | % swap rows $m$ and $i$ in $U$ 19 | temp=U(i,:); 20 | U(i,:)=U(m,:); 21 | U(m,:)=temp; 22 | % swap elements $L(m,1:i-1)$ and $L(i,1:i-1)$ in $L$ 23 | if i >= 2 24 | temp=L(i,1:i-1); 25 | L(i,1:i-1)=L(m,1:i-1); 26 | L(m,1:i-1)=temp; 27 | end 28 | end 29 | L(i+1:n,i)=U(i+1:n,i)/U(i,i); 30 | U(i+1:n,i+1:n)=U(i+1:n,i+1:n)-L(i+1:n,i)*U(i,i+1:n); 31 | U(i+1:n,i)=0; 32 | end 33 | -------------------------------------------------------------------------------- /Matlab_code/LU_factor.m: -------------------------------------------------------------------------------- 1 | function [L,U]=LU_factor(A) 2 | % Here, factorization A=LU is done without pivoting, 3 | % permutations of the rows and columns in A. 4 | % This function overwrites L and U on A. 5 | 6 | % input A is an n by n matrix. 7 | 8 | % Output L is a unit lower triangular matrix. 9 | % Output U is non-singular upper matrix. 10 | 11 | %% Pre-defining matrices and indices 12 | s=size(A); 13 | n=s(1); 14 | 15 | %% Compute L and U, ovewrwrite on A. 16 | 17 | for i=1:(n-1) % need to do n-1 operations on A 18 | 19 | for j=(i+1):n 20 | A(j,i)=A(j,i)/A(i,i); 21 | end 22 | 23 | for j=(i+1):n 24 | for k=(i+1):n 25 | A(j,k)=A(j,k)-A(j,i)*A(i,k); 26 | end 27 | end 28 | end 29 | 30 | %% Construct L, copy values from A 31 | L=eye(n); % pre-define as identity matrix. 32 | for i=2:n 33 | for j=1:(i-1) 34 | L(i,j)=A(i,j); 35 | end 36 | end 37 | 38 | %% Construct U, copy values from A 39 | U=zeros(n); % pre-define as zero matrix. 40 | for i=1:n 41 | for j=i:n 42 | U(i,j)=A(i,j); 43 | end 44 | end 45 | 46 | end 47 | -------------------------------------------------------------------------------- /Matlab_code/CGS.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Classical Gram-Schmidt (CGS) orthogonalization process 3 | % and solution of the linear least square problem using CGS. 4 | % ---------------------------------------- 5 | 6 | % size of our matrix A is m-by-n 7 | m= 6; 8 | n=3; 9 | 10 | % vector of the right hand side 11 | y=zeros(1,m); 12 | 13 | A=[1,0,0; 14 | 0,1,0; 15 | 0,0,1; 16 | -1, 1,0; 17 | -1,0,1; 18 | 0,-1,1]; 19 | 20 | y = [1237,1941,2417,711,1177,475]; 21 | 22 | % allocate matrices q and r for QR decomposition 23 | 24 | q=[]; 25 | r=[]; 26 | 27 | %QR decomposition using classical Gram-Schmidt orthogonalization 28 | for k=1:1:n 29 | q(:,k)=A(:,k); 30 | for j=1:1:k-1 31 | r(j,k)=q(:,j)'*A(:,k); 32 | q(:,k)=q(:,k)-r(j,k)*q(:,j); 33 | end 34 | r(k,k)=norm(q(:,k)); 35 | q(:,k)=q(:,k)/r(k,k); 36 | end 37 | 38 | %compute solution of the system Ax = QR x = y 39 | % by backward substitution: R x = Q^T y 40 | 41 | b=[]; 42 | 43 | % compute right hand side Q^T y 44 | b=q'*y'; 45 | 46 | % perform backward substitution to get solution x = R^(-1) Q^T y 47 | % obtain solution in b 48 | for i=n:-1:1 49 | for k=n:-1:i+1 50 | b(i)=b(i)-b(k)*r(i,k); 51 | end 52 | b(i)=b(i)/r(i,i); 53 | end 54 | 55 | -------------------------------------------------------------------------------- /Matlab_code/ChangeRowInWorklist.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Add or remove rows to the WorkList 3 | % If action = 'add' then add a line to the Worklist, return Worklist and 4 | % new line 5 | % If action = 'delete' then delete the given line from the Worklist, return 6 | % Worklist and deleted line 7 | % ---------------------------------------- 8 | 9 | function [ Worklist , LineInQuestion] = ChangeRowInWorklist(Worklist,LINE,action) 10 | 11 | if strcmp(action,'delete') 12 | if (length(Worklist(:,1)) == 1) 13 | LineInQuestion=Worklist; 14 | Worklist=[]; 15 | elseif (LINE==length(Worklist(:,1))) 16 | LineInQuestion = Worklist(LINE,:); 17 | Worklist=Worklist(1:(end-1),:); 18 | elseif (LINE==1) 19 | LineInQuestion = Worklist(LINE,:); 20 | Worklist=Worklist(2:end,:); 21 | else 22 | LineInQuestion = Worklist(LINE,:); 23 | Worklist=[Worklist(1:(LINE-1),:);Worklist((LINE+1):end,:)]; 24 | end 25 | 26 | elseif strcmp(action,'add') 27 | LineInQuestion = LINE; 28 | if (length(Worklist) == 0) 29 | Worklist=LINE; 30 | else 31 | Worklist = [Worklist;LINE]; 32 | end 33 | 34 | else 35 | fprintf('The third argument must be either delete or add!') 36 | 37 | end 38 | end 39 | 40 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/include/Poisson.h: -------------------------------------------------------------------------------- 1 | #ifndef _CE3_H 2 | #define _CE3_H 3 | 4 | #include 5 | 6 | PetscErrorCode CreateMatrix(Mat*, PetscInt, PetscInt); 7 | PetscErrorCode CreateVector(Vec*, PetscInt); 8 | 9 | PetscErrorCode DiscretePoisson2D(PetscInt, Mat*); 10 | //PetscErrorCode DiscretePoisson2D_coeffs(PetscInt, PetscScalar, Mat*, Mat*, Vec*); 11 | PetscErrorCode DiscretePoisson2D_coeffs(PetscInt, PetscScalar, Vec*); 12 | 13 | /***********************/ 14 | /* METHODS OF SOLUTION */ 15 | /***********************/ 16 | PetscErrorCode Solve(Mat, Vec, Vec, PetscInt, bool); 17 | PetscErrorCode Jacobi(PC); 18 | PetscErrorCode GaussSeidel(PC); 19 | PetscErrorCode SOR(PC); 20 | PetscErrorCode ConjugateGradient(KSP, PC); 21 | PetscErrorCode ConjugateGradient_full(Mat, Vec, Vec, bool); 22 | PetscErrorCode ConjugateGradient_inner(Mat, Vec, Vec, Mat, bool); 23 | PetscErrorCode PreconditionedConjugateGradient(KSP, PC); 24 | PetscErrorCode PreconditionedConjugateGradient_full(Mat, Vec, Vec, bool); 25 | PetscErrorCode PreconditionedConjugateGradient_inner(Mat, Vec, Vec, Mat, bool); 26 | 27 | enum SolverMethod { 28 | METHOD_INVALID=0, 29 | METHOD_JACOBI=1, 30 | METHOD_GAUSS_SEIDEL=2, 31 | METHOD_SOR=3, 32 | METHOD_CG=4, 33 | METHOD_CG_FULL=5, 34 | METHOD_PCG=6, 35 | METHOD_PCG_FULL=7 36 | }; 37 | 38 | #endif/*_CE3_H*/ 39 | -------------------------------------------------------------------------------- /Matlab_code/LLSChol.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Solution of the system of linear equations A^T Ax = A^T b 3 | % using Cholesky factorization of A^T A. 4 | % Matrix A is m-by-n, m > n, the vector of the rhs b is of the size n. 5 | % ---------------------------------------- 6 | 7 | function x=LLSChol(A,b) 8 | 9 | ATb=A'*b; 10 | ATA=A'*A; 11 | n=length(A(1,:)); 12 | lowerChol=zeros(n); 13 | 14 | %Cholesky factorization 15 | for j=1:1:n 16 | s1=0; 17 | for k=1:1:j-1 18 | s1=s1+lowerChol(j,k)*lowerChol(j,k); 19 | end 20 | lowerChol(j,j)=(ATA(j,j)-s1)^(1/2); 21 | for i=j+1:1:n 22 | s2=0; 23 | for k=1:1:j-1 24 | s2=s2+lowerChol(i,k)*lowerChol(j,k); 25 | end 26 | lowerChol(i,j)=(ATA(i,j)-s2)/lowerChol(j,j); 27 | end 28 | end 29 | 30 | % Solver for LL^T x = A^Tb: 31 | % Define z=L^Tx, then solve 32 | % Lz=A^T b to find z. 33 | % After by known z we get x. 34 | 35 | % forward substitution Lz=A^T b to obtain z 36 | 37 | for i=1:1:n 38 | for k=1:1:i-1 39 | ATb(i)=ATb(i)-ATb(k)*lowerChol(i,k); 40 | end 41 | ATb(i)=ATb(i)/lowerChol(i,i); 42 | end 43 | 44 | % Solution of L^Tx=z , backward substitution 45 | 46 | for i=n:-1:1 47 | for k=n:-1:i+1 48 | ATb(i)=ATb(i)-ATb(k)*lowerChol(k,i); 49 | end 50 | ATb(i)=ATb(i)/lowerChol(i,i); 51 | end 52 | 53 | % Obtained solution 54 | x=ATb; 55 | 56 | -------------------------------------------------------------------------------- /Matlab_code/Bisection.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Find all eigenvalues of the matrix A ion the input interval [a,b) 3 | % ---------------------------------------- 4 | 5 | % define size n of the n-by-n matrix A 6 | n=5; 7 | 8 | % Generate the symmetric tridiagonal matrix A 9 | A=randomTridiag(n); 10 | 11 | % Set bounds for the interval [a,b) in the algorithm and the tolerance 12 | a=-100;b=100; 13 | tol=0.000001; 14 | 15 | %Define functions for the worklist 16 | 17 | DeleteRowInWorklist=@(Worklist,linenr) ChangeRowInWorklist(Worklist,linenr,'delete'); 18 | InsertRowInWorklist=@(Worklist,LineToAdd)... 19 | ChangeRowInWorklist(Worklist,LineToAdd,'add'); 20 | 21 | % Set the info for the first worklist 22 | na=Negcount(A,a); 23 | nb=Negcount(A,b); 24 | Worklist=[]; 25 | 26 | %If no eigenvalues are found on the interval [a,b) then save an empty worklist 27 | if na ~= nb 28 | Worklist=InsertRowInWorklist(Worklist,[a,na,b,nb]); 29 | end 30 | 31 | while numel(Worklist) ~= 0 32 | [Worklist, LineToWorkWith ]= DeleteRowInWorklist(Worklist,1); 33 | 34 | low=LineToWorkWith(1); 35 | n_low=LineToWorkWith(2); 36 | up=LineToWorkWith(3); 37 | n_up=LineToWorkWith(4); 38 | 39 | % if the upper and lower bounds are close enough we print out this interval 40 | if (up-low)< tol 41 | NrOfEigVal = n_up-n_low; 42 | fprintf('We have computed %3.0f eigenvalues in the interval [%4.4f,%4.4f) \n', ... 43 | NrOfEigVal,low,up); 44 | else 45 | % Perform the bisection step 46 | mid= (low+up)/2; 47 | n_mid= Negcount(A,mid); 48 | if n_mid > n_low 49 | Worklist = InsertRowInWorklist(Worklist,[low,n_low,mid,n_mid]); 50 | end 51 | if n_up>n_mid 52 | Worklist = InsertRowInWorklist(Worklist,[mid,n_mid,up,n_up]); 53 | end 54 | end 55 | end 56 | 57 | -------------------------------------------------------------------------------- /Matlab_code/RunJacobi.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Run Classical Jacobi rotation algorithm. 3 | % until the matrix A is sufficiently diagonal or off(A) < tol 4 | % ---------------------------------------- 5 | 6 | function [A] = RunJacobi(A) 7 | 8 | tol=0.005; 9 | 10 | iter=1; 11 | 12 | %compute initial off's 13 | [sum,v]=off(A); 14 | 15 | while sum >tol && iter<100000 16 | % search for maximal values of off's 17 | j=v(2,max(v(1,:)) == v(1,:)); %get index j 18 | k=v(3,max(v(1,:)) == v(1,:)); %get index k 19 | 20 | %perform Jacobi rotation for indices (j,k) 21 | A=jacobiRot(A,j,k); 22 | [sum,v]=off(A); 23 | iter=iter+1; 24 | end 25 | 26 | end 27 | 28 | % Run one Jacobi rotation 29 | 30 | function [A] = jacobiRot( A,j,k ) 31 | tol=0.0001; 32 | 33 | if abs(A(j,k))>tol 34 | tau=(A(j,j)-A(k,k))/(2*A(j,k)); 35 | t=sign(tau)/(abs(tau)+sqrt(1+tau^2)); 36 | c=1/(sqrt(1+t^2)); 37 | s=c*t; 38 | 39 | R=eye(length(A)); 40 | R(j,j)=c; 41 | R(k,k)=c; 42 | R(j,k)=-s; 43 | R(k,j)=s; 44 | 45 | A=R'*A*R; 46 | end 47 | 48 | end 49 | 50 | % Compute off's: the square root of the sum of squares 51 | % of the upper off-diagonal elements. 52 | % v is a matrix that holds the information needed. 53 | 54 | function [sum,v] = off(A) 55 | 56 | sum=0; 57 | %create array v for off's: 58 | % in the first row will be sum of square root of the squares of computed off's 59 | % in the second row: the index j 60 | % in the third row: the index k 61 | 62 | v=[0;0;0]; 63 | for i=1:(length(A)-1) 64 | for j=(i+1):length(A) 65 | sum=sum+A(i,j)*A(i,j); 66 | v=[v,[sqrt(A(i,j)*A(i,j));i;j]]; 67 | end 68 | end 69 | sum=sqrt(sum); 70 | v=v(:,2:end); 71 | 72 | end 73 | 74 | -------------------------------------------------------------------------------- /Matlab_code/FitFunctionQRCGS.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Solution of least squares problem min_x || Ax - y ||_2 3 | % using QR decomposition. QR decomposition is performed via classical 4 | % Gram-Schmidt (CGM) orthogonalization procedure. 5 | % Matrix A is constructed as a Vandermonde matrix. 6 | % Program performs fitting to the function y = sin(pi*x/5) + x/5 7 | % ---------------------------------------- 8 | 9 | d=5; % degree of polynomial 10 | m=10;%number of discretization points or rows in the matrix A 11 | p=ones(1,d+1); 12 | x=zeros(1,m); 13 | y=zeros(1,m); 14 | A=[]; 15 | for i=1:1:m 16 | x = linspace(-10.0,10.0,m); 17 | % exact function which should be approximated 18 | y(i)= sin(pi*x(i)/5) + x(i)/5; 19 | end 20 | 21 | % construction of a Vamdermonde matrix 22 | for i=1:1:m 23 | for j=1:1:d+1 24 | A(i,j)=power(x(i),j-1); 25 | end 26 | end 27 | 28 | q=[]; 29 | r=[]; 30 | 31 | %QR decomposition via CGM 32 | 33 | for i=1:1:d+1 34 | q(:,i)=A(:,i); 35 | for j=1:1:i-1 36 | r(j,i)=q(:,j)'*A(:,i); 37 | q(:,i)=q(:,i)-r(j,i)*q(:,j); 38 | end 39 | r(i,i)=norm(q(:,i)); 40 | q(:,i)=q(:,i)/r(i,i); 41 | end 42 | b=[]; 43 | b=q'*y'; 44 | for i=d+1:-1:1 45 | for k=d+1:-1:i+1 46 | b(i)=b(i)-b(k)*r(i,k); 47 | end 48 | b(i)=b(i)/r(i,i); 49 | end 50 | 51 | figure(1) 52 | plot(x,y,'o- r', 'linewidth',1) 53 | hold on 54 | 55 | % compute approximation to this exact polynomial with comp. coefficients b 56 | 57 | approx = A*b; 58 | plot(x,approx,'*- b', 'linewidth',1) 59 | hold off 60 | 61 | str_xlabel = ['poly.degree d=', num2str(d)]; 62 | 63 | legend('exact sin(pi*x(i)/5) + x(i)/5',str_xlabel ); 64 | 65 | xlabel('x') 66 | 67 | % computation of the relative error as 68 | % norm(approx. value - true value) / norm(true value) 69 | e1=norm(y'- approx)/norm(y') 70 | -------------------------------------------------------------------------------- /Matlab_code/PowerM.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Power method 3 | % ---------------------------------------- 4 | clc 5 | clear all 6 | close all 7 | eps = 1e-7; 8 | fig = figure; 9 | 10 | for i =1:4 11 | if(i==1) 12 | % Matrix not diagonalizable 13 | % n=2; 14 | % A =[0 10;0 0]; 15 | % Matrix has two real eigenvalues with the same sign 16 | n=3; 17 | A =[5 0 0;0 2 0;0 0 -5]; 18 | elseif (i==2) 19 | % Matrix has four real eigenvalues with the same sign 20 | n =4; 21 | A=[3,7,8,9;5,-7,4,-7;1,-1,1,-1;9,3,2,5]; 22 | elseif (i ==3) 23 | % Largest eigenvalue is complex 24 | n =3; 25 | A =[0 -5 2; 6 0 -12; 1 3 0]; 26 | elseif (i==4) 27 | n =2; 28 | A =[7 -2;3 0]; 29 | n=5; 30 | A=rand(n); 31 | 32 | end 33 | 34 | % get reference values of eigenvalues 35 | exact_lambda = eig(A); 36 | 37 | % set initial guess for the eigenvector x0 38 | x0=rand(n,1); 39 | x0=x0/norm(x0); 40 | lambda0 = inf ; 41 | % lambda1 = 0; 42 | lambdavec =[]; 43 | % counter for number of iterations 44 | count =1; 45 | % main loop in the power method 46 | 47 | while (count <1000) 48 | 49 | y1=A*x0; 50 | 51 | % compute approximate eigenvector 52 | 53 | x1=y1/norm(y1); 54 | 55 | % compute approximate eigenvalue 56 | lambda1 = transpose(x1)*A*x1; 57 | 58 | lambdavec(count)= lambda1 ; 59 | x0=x1; 60 | if(abs(lambda1 - lambda0 ) 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "Poisson.h" 11 | 12 | PetscErrorCode Solve(Mat S, Vec h2b, Vec u, PetscInt method, bool VERBOSE) { 13 | PetscErrorCode ierr; 14 | KSP ksp; 15 | KSPConvergedReason convergedReason; 16 | PC preconditioner; 17 | PetscInt number_of_iterations; 18 | 19 | ierr = KSPCreate(PETSC_COMM_WORLD, &ksp); CHKERRQ(ierr); 20 | ierr = KSPSetOperators(ksp, S, S); CHKERRQ(ierr); 21 | //ierr = KSPSetOperators(ksp, S, S, DIFFERENT_NONZERO_PATTERN); CHKERRQ(ierr); 22 | 23 | ierr = KSPGetPC(ksp, &preconditioner); CHKERRQ(ierr); 24 | if (method == METHOD_JACOBI) { 25 | ierr = Jacobi(preconditioner); CHKERRQ(ierr); 26 | } else if (method == METHOD_GAUSS_SEIDEL) { 27 | ierr = GaussSeidel(preconditioner); CHKERRQ(ierr); 28 | } else if (method == METHOD_SOR) { 29 | ierr = SOR(preconditioner); CHKERRQ(ierr); 30 | } else if (method == METHOD_CG) { 31 | ierr = ConjugateGradient(ksp, preconditioner); CHKERRQ(ierr); 32 | } else if (method == METHOD_PCG) { 33 | ierr = PreconditionedConjugateGradient(ksp, preconditioner); CHKERRQ(ierr); 34 | } 35 | 36 | ierr = KSPSetFromOptions(ksp); CHKERRQ(ierr); 37 | 38 | ierr = KSPSolve(ksp, h2b, u); CHKERRQ(ierr); 39 | ierr = KSPGetIterationNumber(ksp, &number_of_iterations); CHKERRQ(ierr); 40 | 41 | ierr = KSPGetConvergedReason(ksp, &convergedReason); CHKERRQ(ierr); 42 | 43 | if (convergedReason < 0) { 44 | PetscPrintf(PETSC_COMM_WORLD, 45 | "KSP solver failed to converge! Reason: %d\n", convergedReason); 46 | } 47 | 48 | if (VERBOSE) { 49 | PetscPrintf(PETSC_COMM_WORLD, "Number of iterations: %d\n", number_of_iterations); 50 | } 51 | 52 | ierr = KSPDestroy(&ksp); CHKERRQ(ierr); 53 | 54 | return 0; 55 | } 56 | 57 | \end{lstlisting} 58 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Solver.cpp: -------------------------------------------------------------------------------- 1 | 2 | /* Program for choosing different PETSc preconditioners. */ 3 | //***************************************************************** 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "Poisson.h" 11 | 12 | PetscErrorCode Solve(Mat S, Vec h2b, Vec u, PetscInt method, bool VERBOSE) { 13 | PetscErrorCode ierr; 14 | KSP ksp; 15 | KSPConvergedReason convergedReason; 16 | PC preconditioner; 17 | PetscInt number_of_iterations; 18 | 19 | ierr = KSPCreate(PETSC_COMM_WORLD, &ksp); CHKERRQ(ierr); 20 | ierr = KSPSetOperators(ksp, S, S); CHKERRQ(ierr); 21 | //ierr = KSPSetOperators(ksp, S, S, DIFFERENT_NONZERO_PATTERN); CHKERRQ(ierr); 22 | 23 | ierr = KSPGetPC(ksp, &preconditioner); CHKERRQ(ierr); 24 | if (method == METHOD_JACOBI) { 25 | ierr = Jacobi(preconditioner); CHKERRQ(ierr); 26 | } else if (method == METHOD_GAUSS_SEIDEL) { 27 | ierr = GaussSeidel(preconditioner); CHKERRQ(ierr); 28 | } else if (method == METHOD_SOR) { 29 | ierr = SOR(preconditioner); CHKERRQ(ierr); 30 | } else if (method == METHOD_CG) { 31 | ierr = ConjugateGradient(ksp, preconditioner); CHKERRQ(ierr); 32 | } else if (method == METHOD_PCG) { 33 | ierr = PreconditionedConjugateGradient(ksp, preconditioner); CHKERRQ(ierr); 34 | } 35 | 36 | ierr = KSPSetFromOptions(ksp); CHKERRQ(ierr); 37 | 38 | ierr = KSPSolve(ksp, h2b, u); CHKERRQ(ierr); 39 | ierr = KSPGetIterationNumber(ksp, &number_of_iterations); CHKERRQ(ierr); 40 | 41 | ierr = KSPGetConvergedReason(ksp, &convergedReason); CHKERRQ(ierr); 42 | 43 | if (convergedReason < 0) { 44 | PetscPrintf(PETSC_COMM_WORLD, 45 | "KSP solver failed to converge! Reason: %d\n", convergedReason); 46 | } 47 | 48 | if (VERBOSE) { 49 | PetscPrintf(PETSC_COMM_WORLD, "Number of iterations: %d\n", number_of_iterations); 50 | } 51 | 52 | ierr = KSPDestroy(&ksp); CHKERRQ(ierr); 53 | 54 | return 0; 55 | } 56 | -------------------------------------------------------------------------------- /Matlab_code/Hornersrule_5roots.m: -------------------------------------------------------------------------------- 1 | % Polynomial p(x)=(x-1)^2(x-2)(x-3)(x-4)(x-5) 2 | % Polynomial evaluation using Horner's rule. 3 | % Computation of error bounds. 4 | 5 | clear 6 | close all 7 | clc 8 | 9 | N = 8e3; 10 | 11 | %exact coefficients of polynomial p(x) = (x -1)^2(x-2)(x-3)(x-4)(x-5) 12 | 13 | %input interval for p(x): can be changed by user 14 | 15 | x = linspace(-1.0, 7,N); 16 | 17 | %Get coefficients of polynomial p(x) = (x -1)^2(x-2)(x-3)(x-4)(x-5) 18 | syms t; 19 | 20 | a=double(coeffs((t-1).^2*(t-2)*(t-3)*(t-4)*(t-5),t)) 21 | 22 | %eps = 2.2204460492503131e-16; %machine epsilon in MATLAB 23 | eps = 0.5e-16; 24 | 25 | y_horner = zeros(N,1); 26 | 27 | for i = 1:N 28 | 29 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x(i),eps); 30 | y_horner(i) = P; 31 | y_horner_upper(i) = P + bp; 32 | y_horner_lower(i) = P - bp; 33 | 34 | end 35 | 36 | y_horner = zeros(N,1); 37 | 38 | y=0; 39 | 40 | for i = 1:N 41 | 42 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x(i),eps); 43 | y_horner(i) = P; 44 | y_horner_upper(i) = P + bp; 45 | y_horner_lower(i) = P - bp; 46 | 47 | error(i) = abs(bp/P); 48 | 49 | log_error(i) = -log(abs(bp/P)); 50 | 51 | % here we compute error between computed and exact values of polynomial at x(i) 52 | ComputedErrors(i) = P- (x(i)-1).^2*(x(i)-2)*(x(i)-3)*(x(i)-4)*(x(i)-5); 53 | y(i) = (x(i)-1).^2*(x(i)-2)*(x(i)-3)*(x(i)-4)*(x(i)-5); 54 | 55 | LogCompEr(i) = -log(abs(ComputedErrors(i)./P)); 56 | 57 | end 58 | 59 | figure(1) 60 | plot(x,y_horner,'k.') 61 | hold on 62 | plot(x,y,'r','linewidth',2) 63 | legend('Horners rule (8000 points)', 'exact p(x) ') 64 | xlabel('Input interval for x') 65 | 66 | hold off 67 | 68 | figure(2) 69 | plot(x,log_error); 70 | 71 | hold on 72 | plot(x, LogCompEr, '. r'); 73 | 74 | legend(' estimated bound', ' computed bound ') 75 | 76 | xlabel('input interval for x') 77 | 78 | hold off 79 | 80 | figure(3) 81 | plot(x,y,'k',x,y_horner_upper,'r--',x,y_horner_lower,'b--') 82 | xlabel('input interval for x') 83 | legend('exact p(x)','upper bound','lower bound') 84 | 85 | -------------------------------------------------------------------------------- /Matlab_code/FitFunctionNormaleq.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Solution of least squares problem min_x || Ax - y ||_2 3 | % using the method of normal equations. 4 | % Matrix A is constructed as a Vandermonde matrix. 5 | % Program performs fitting to the function y = sin(pi*x/5) + x/5 6 | % ---------------------------------------- 7 | 8 | d=5; % degree of the polynomial 9 | m=10;%number of discretization points or rows in the matrix A 10 | 11 | x=zeros(1,m); 12 | y=zeros(1,m); 13 | A=[]; 14 | for i=1:1:m 15 | x = linspace(-10.0,10.0,m); 16 | % exact function which should be approximated 17 | y(i)= sin(pi*x(i)/5) + x(i)/5; 18 | end 19 | 20 | % construction of a Vamdermonde matrix 21 | 22 | for i=1:1:m 23 | for j=1:1:d+1 24 | A(i,j)=power(x(i),j-1); 25 | end 26 | end 27 | 28 | % computing the right hand side in the method of normal equations 29 | c=A'*y'; 30 | 31 | % computing matrix in the left hand side in the method of normal equations 32 | C=A'*A; 33 | 34 | l=zeros(d+1); 35 | 36 | % solution of the normal equation using Cholesky decomposition 37 | 38 | for j=1:1:d+1 39 | s1=0; 40 | for k=1:1:j-1 41 | s1=s1+l(j,k)*l(j,k); 42 | end 43 | l(j,j)=(C(j,j)-s1)^(1/2); 44 | for i=j+1:1:d+1 45 | s2=0; 46 | for k=1:1:j-1 47 | s2=s2+l(i,k)*l(j,k); 48 | end 49 | l(i,j)=(C(i,j)-s2)/l(j,j); 50 | end 51 | end 52 | for i=1:1:d+1 53 | for k=1:1:i-1 54 | c(i)=c(i)-c(k)*l(i,k); 55 | end 56 | c(i)=c(i)/l(i,i); 57 | end 58 | for i=d+1:-1:1 59 | for k=d+1:-1:i+1 60 | c(i)=c(i)-c(k)*l(k,i); 61 | end 62 | c(i)=c(i)/l(i,i); 63 | end 64 | 65 | figure(1) 66 | plot(x,y,'o- r', 'linewidth',1) 67 | hold on 68 | 69 | % compute approximation to this exact polynomial with comp. coefficients c 70 | 71 | approx = A*c; 72 | plot(x,approx,'*- b', 'linewidth',1) 73 | hold off 74 | 75 | str_xlabel = ['poly.degree d=', num2str(d)]; 76 | 77 | legend('exact sin(pi*x(i)/5) + x(i)/5',str_xlabel); 78 | 79 | xlabel('x') 80 | 81 | % computation of the relative error as 82 | % norm(approx. value - true value) / norm(true value) 83 | e1=norm(y'- approx)/norm(y') 84 | -------------------------------------------------------------------------------- /Matlab_code/InverseIteration.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Inverse Iteration or Inverse Power method 3 | % Computes eigenvalue closest to sigma and corresponding eigenvector 4 | % ---------------------------------------- 5 | clc 6 | clear all 7 | close all 8 | eps = 1e-17; 9 | fig = figure; 10 | 11 | for i =1:4 12 | if(i==1) 13 | % Matrix not diagonalizable 14 | n=2; 15 | A =[0 10;0 0]; 16 | % Matrix has two real eigenvalues with the same sign 17 | % n=3; 18 | % A =[5 0 0;0 2 0;0 0 -5]; 19 | elseif (i==2) 20 | % Matrix has four real eigenvalues with the same sign 21 | n =4; 22 | A=[3,7,8,9;5,-7,4,-7;1,-1,1,-1;9,3,2,5]; 23 | elseif (i ==3) 24 | % Largest eigenvalue is complex 25 | n =3; 26 | A =[0 -5 2; 6 0 -12; 1 3 0]; 27 | elseif (i==4) 28 | % n =2; 29 | % A =[7 -2;3 0]; 30 | n=5; 31 | A=rand(5,5); 32 | end 33 | 34 | % get reference values of eigenvalues 35 | exact_lambda = eig(A); 36 | 37 | %make orthogonalization 38 | Q=orth(rand(n,n)); 39 | 40 | A= Q'*A*Q; 41 | 42 | % set initial guess for the eigenvector x0 43 | x0=rand(n,1); 44 | x0=x0/norm(x0); 45 | lambda0 = inf; 46 | % choose a shift: should be choosen as closest to the desired eigenvalue 47 | sigma=10; 48 | % lambda1 = 0; 49 | lambdavec =[]; 50 | count =1; 51 | % main loop in the power method 52 | while (count <1000) 53 | A_shift = A - sigma*eye(size(A)); 54 | y1= inv(A_shift)*x0; 55 | x1=y1/norm(y1); 56 | lambda1 = transpose(x1)*A*x1; 57 | lambdavec(count)= lambda1 ; 58 | x0=x1; 59 | if(abs(lambda1 - lambda0 )tol && iter<1000 17 | for j=1:(length(G)-1) 18 | for k=j+1:length(G) 19 | [G,J]=oneSidedJacobiRot(G,J,j,k); 20 | end 21 | end 22 | 23 | [sum,v]=off(G'*G); 24 | iter=iter+1; 25 | end 26 | 27 | % elements in the matrix sigma will be the two-norm 28 | % of i-column of the matrix G 29 | 30 | for i=1:length(G) 31 | sigma(i)=norm(G(:,i)); 32 | end 33 | 34 | U=[]; 35 | 36 | for i=1:length(G) 37 | U=[U,G(:,i)/sigma(i)]; 38 | end 39 | 40 | V=J; 41 | 42 | S=diag(sigma); 43 | 44 | end 45 | 46 | % compute one-sided Jacobi rotation for G 47 | 48 | function [G,J] = oneSidedJacobiRot(G,J,j,k ) 49 | 50 | tol=0.0001; 51 | A=(G'*G); 52 | ajj=A(j,j); 53 | ajk=A(j,k); 54 | akk=A(k,k); 55 | 56 | if abs(ajk)>tol 57 | tau=(ajj-akk)/(2*ajk); 58 | t=sign(tau)/(abs(tau)+sqrt(1+tau^2)); 59 | c=1/(sqrt(1+t^2)); 60 | s=c*t; 61 | 62 | R=eye(length(G)); 63 | R(j,j)=c; 64 | R(k,k)=c; 65 | R(j,k)=-s; 66 | R(k,j)=s; 67 | 68 | G=G*R; 69 | 70 | % if eigenvectors are desired 71 | J=J*R; 72 | end 73 | end 74 | 75 | % Compute off's: the square root of the sum of squares 76 | % of the upper off-diagonal elements. 77 | % v is a matrix that holds the information needed. 78 | 79 | function [sum,v] = off(A) 80 | 81 | sum=0; 82 | %create array v for off's: 83 | % in the first row will be sum of square root of the squares of computed off's 84 | % in the second row: the index j 85 | % in the third row: the index k 86 | 87 | v=[0;0;0]; 88 | for i=1:(length(A)-1) 89 | for j=(i+1):length(A) 90 | sum=sum+A(i,j)*A(i,j); 91 | v=[v,[sqrt(A(i,j)*A(i,j));i;j]]; 92 | end 93 | end 94 | sum=sqrt(sum); 95 | v=v(:,2:end); 96 | 97 | end 98 | -------------------------------------------------------------------------------- /Matlab_code/Hornersrule_10roots.m: -------------------------------------------------------------------------------- 1 | % Polynomial p(x) = (x -1)^2(x-2)(x-3)(x-4)(x-5)(x-7)(x-9)(x-11)(x-15)(x-17) 2 | % Polynomial evaluation using Horner's rule. 3 | % Computation of error bounds. 4 | 5 | clear 6 | close all 7 | clc 8 | 9 | N = 8e3; 10 | 11 | % exact coefficients of polynomial 12 | % p(x) = (x -1)^2(x-2)(x-3)(x-4)(x-5)(x-7)(x-9)(x-11)(x-15)(x-17) 13 | 14 | %input interval for p(x) 15 | 16 | x = linspace(-1.0, 20,N); 17 | 18 | %Get coefficients of polynomial p(x) = (x -1)^2(x-2)(x-3)(x-4)(x-5) 19 | syms t; 20 | 21 | a=double(coeffs((t-1).^2*(t-2)*(t-3)*(t-4)*(t-5)*(t-7)*(t-9)*(t-11)*(t-15)*(t-17),t)) 22 | 23 | %eps = 2.2204460492503131e-16; %machine epsilon in MATLAB 24 | eps = 0.5e-16; 25 | 26 | y_horner = zeros(N,1); 27 | 28 | for i = 1:N 29 | 30 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x(i),eps); 31 | y_horner(i) = P; 32 | y_horner_upper(i) = P + bp; 33 | y_horner_lower(i) = P - bp; 34 | 35 | end 36 | 37 | y_horner = zeros(N,1); 38 | 39 | y=0; 40 | 41 | for i = 1:N 42 | 43 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x(i),eps); 44 | y_horner(i) = P; 45 | y_horner_upper(i) = P + bp; 46 | y_horner_lower(i) = P - bp; 47 | 48 | error(i) = abs(bp/P); 49 | 50 | log_error(i) = -log(abs(bp/P)); 51 | 52 | % here we compute error between computed and exact values of polynomial at x(i) 53 | ComputedErrors(i) = P- (x(i)-1).^2*(x(i)-2)*(x(i)-3)*(x(i)-4)*(x(i)-5)*(x(i)- 7)* 54 | (x(i) - 9)*(x(i) - 11)*(x(i) -15)*(x(i)-17); 55 | 56 | y(i) = (x(i)-1).^2*(x(i)-2)*(x(i)-3)*(x(i)-4)*(x(i)-5)*(x(i)- 7)*(x(i) - 9)*(x(i) -11)* 57 | (x(i) -15)*(x(i)-17); 58 | 59 | LogCompEr(i) = -log(abs(ComputedErrors(i)./P)); 60 | 61 | end 62 | 63 | figure(1) 64 | plot(x,y_horner,'k.') 65 | hold on 66 | plot(x,y,'r','linewidth',2) 67 | legend('Horners rule (8000 points)', 'exact p(x) ') 68 | xlabel('Input interval for x') 69 | 70 | hold off 71 | 72 | figure(2) 73 | plot(x,log_error); 74 | 75 | hold on 76 | plot(x, LogCompEr, '. r'); 77 | 78 | legend(' estimated bound', ' computed bound ') 79 | 80 | xlabel('input interval for x') 81 | 82 | hold off 83 | 84 | figure(3) 85 | plot(x,y,'k',x,y_horner_upper,'r--',x,y_horner_lower,'b--') 86 | xlabel('input interval for x') 87 | legend('exact p(x)','upper bound','lower bound') 88 | 89 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/CG.cpp: -------------------------------------------------------------------------------- 1 | 2 | /* 3 | * Program for two versions of the Conjugate gradient method. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "Poisson.h" 12 | /** 13 | * Conjugate gradient method using inbuilt PETSc functions. 14 | */ 15 | 16 | PetscErrorCode ConjugateGradient(KSP ksp, PC preconditioner) { 17 | PetscErrorCode ierr; 18 | 19 | ierr = KSPSetType(ksp, KSPCG); 20 | ierr = PCSetType(preconditioner, PCNONE); CHKERRQ(ierr); 21 | 22 | return 0; 23 | } 24 | 25 | /** 26 | * An implementation of the conjugate gradient method 27 | * not utilizing the PETSc KSP interface, but 28 | * implementing the matrix/vector operations directly. 29 | */ 30 | PetscErrorCode ConjugateGradient_full(Mat A, Vec b, Vec x, bool VERBOSE) { 31 | PetscErrorCode ierr; 32 | PetscInt k=0, n; 33 | PetscScalar mu, nu, rTr, pTz, rNorm, tol = 1e-12; 34 | Vec p, r, z; 35 | 36 | ierr = MatGetSize(A, &n, NULL); CHKERRQ(ierr); 37 | 38 | CreateVector(&p, n); 39 | CreateVector(&r, n); 40 | CreateVector(&z, n); 41 | 42 | VecCopy(b, p); 43 | VecCopy(b, r); 44 | 45 | ierr = VecAssemblyBegin(p); CHKERRQ(ierr); 46 | ierr = VecAssemblyEnd(p); CHKERRQ(ierr); 47 | ierr = VecAssemblyBegin(r); CHKERRQ(ierr); 48 | ierr = VecAssemblyEnd(r); CHKERRQ(ierr); 49 | ierr = VecAssemblyBegin(z); CHKERRQ(ierr); 50 | ierr = VecAssemblyEnd(z); CHKERRQ(ierr); 51 | 52 | ierr = VecZeroEntries(x); 53 | 54 | // Pre-compute first (r^T r) 55 | ierr = VecDot(r, r, &rTr); CHKERRQ(ierr); 56 | 57 | do { 58 | k++; 59 | 60 | // z = A * p_k 61 | ierr = MatMult(A, p, z); CHKERRQ(ierr); 62 | 63 | // nu_k = r_{k-1}^T r_{k-1} / p_k^T z 64 | ierr = VecDot(p, z, &pTz); CHKERRQ(ierr); 65 | nu = rTr / pTz; 66 | 67 | // x_k = x_{k-1} + nu_k p_k 68 | ierr = VecAXPY(x, nu, p); CHKERRQ(ierr); 69 | 70 | // r_k = r_{k-1} - nu_k z 71 | ierr = VecAXPY(r, -nu, z); CHKERRQ(ierr); 72 | 73 | // r_k^T r_k 74 | mu = 1 / rTr; 75 | ierr = VecDot(r, r, &rTr); CHKERRQ(ierr); 76 | 77 | // mu_{k+1} 78 | mu = rTr * mu; 79 | 80 | // p_{k+1} = r_k + mu_{k+1} p_k 81 | ierr = VecAYPX(p, mu, r); 82 | 83 | // || r_k ||_2 84 | ierr = VecNorm(r, NORM_2, &rNorm); 85 | } while (rNorm > tol); 86 | 87 | if (VERBOSE) { 88 | PetscPrintf(PETSC_COMM_WORLD, "Number of iterations: %d\n", k); 89 | } 90 | 91 | return 0; 92 | } 93 | 94 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/CG.cpp~: -------------------------------------------------------------------------------- 1 | \begin{lstlisting} 2 | /** 3 | * Program for two versions of the Conjugate gradient method. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "Poisson.h" 12 | /** 13 | * Conjugate gradient method using inbuilt PETSc functions. 14 | */ 15 | 16 | PetscErrorCode ConjugateGradient(KSP ksp, PC preconditioner) { 17 | PetscErrorCode ierr; 18 | 19 | ierr = KSPSetType(ksp, KSPCG); 20 | ierr = PCSetType(preconditioner, PCNONE); CHKERRQ(ierr); 21 | 22 | return 0; 23 | } 24 | 25 | /** 26 | * An implementation of the conjugate gradient method 27 | * not utilizing the PETSc KSP interface, but 28 | * implementing the matrix/vector operations directly. 29 | */ 30 | PetscErrorCode ConjugateGradient_full(Mat A, Vec b, Vec x, bool VERBOSE) { 31 | PetscErrorCode ierr; 32 | PetscInt k=0, n; 33 | PetscScalar mu, nu, rTr, pTz, rNorm, tol = 1e-12; 34 | Vec p, r, z; 35 | 36 | ierr = MatGetSize(A, &n, NULL); CHKERRQ(ierr); 37 | 38 | CreateVector(&p, n); 39 | CreateVector(&r, n); 40 | CreateVector(&z, n); 41 | 42 | VecCopy(b, p); 43 | VecCopy(b, r); 44 | 45 | ierr = VecAssemblyBegin(p); CHKERRQ(ierr); 46 | ierr = VecAssemblyEnd(p); CHKERRQ(ierr); 47 | ierr = VecAssemblyBegin(r); CHKERRQ(ierr); 48 | ierr = VecAssemblyEnd(r); CHKERRQ(ierr); 49 | ierr = VecAssemblyBegin(z); CHKERRQ(ierr); 50 | ierr = VecAssemblyEnd(z); CHKERRQ(ierr); 51 | 52 | ierr = VecZeroEntries(x); 53 | 54 | // Pre-compute first (r^T r) 55 | ierr = VecDot(r, r, &rTr); CHKERRQ(ierr); 56 | 57 | do { 58 | k++; 59 | 60 | // z = A * p_k 61 | ierr = MatMult(A, p, z); CHKERRQ(ierr); 62 | 63 | // nu_k = r_{k-1}^T r_{k-1} / p_k^T z 64 | ierr = VecDot(p, z, &pTz); CHKERRQ(ierr); 65 | nu = rTr / pTz; 66 | 67 | // x_k = x_{k-1} + nu_k p_k 68 | ierr = VecAXPY(x, nu, p); CHKERRQ(ierr); 69 | 70 | // r_k = r_{k-1} - nu_k z 71 | ierr = VecAXPY(r, -nu, z); CHKERRQ(ierr); 72 | 73 | // r_k^T r_k 74 | mu = 1 / rTr; 75 | ierr = VecDot(r, r, &rTr); CHKERRQ(ierr); 76 | 77 | // mu_{k+1} 78 | mu = rTr * mu; 79 | 80 | // p_{k+1} = r_k + mu_{k+1} p_k 81 | ierr = VecAYPX(p, mu, r); 82 | 83 | // || r_k ||_2 84 | ierr = VecNorm(r, NORM_2, &rNorm); 85 | } while (rNorm > tol); 86 | 87 | if (VERBOSE) { 88 | PetscPrintf(PETSC_COMM_WORLD, "Number of iterations: %d\n", k); 89 | } 90 | 91 | return 0; 92 | } 93 | \end{lstlisting} 94 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/DiscretePoisson2D.cpp: -------------------------------------------------------------------------------- 1 | 2 | /* Program for generatation of the discretized Laplacian */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | const PetscScalar A_amplitude = 12.; 10 | const PetscScalar f_amplitude = 1.; 11 | const PetscScalar c_x = 1.; 12 | const PetscScalar c_y = 1.; 13 | const PetscScalar poisson_x0 = 0.5; 14 | const PetscScalar poisson_y0 = 0.5; 15 | 16 | /** 17 | * Compute coefficient matrices. 18 | * 19 | * n: Number of rows of matrices 20 | * h: Timestep length 21 | * C: n-by-n matrix 22 | * D: (n*n)-by-(n*n) matrix 23 | * f: 24 | **/ 25 | PetscErrorCode DiscretePoisson2D_coeffs(PetscInt n, PetscScalar h, Vec *h2b) { 26 | PetscErrorCode ierr; 27 | PetscInt i, j, idx2[n*n]; 28 | PetscScalar *vecb = new PetscScalar[n*n]; 29 | 30 | // Compute C, D and f 31 | PetscScalar xarg, yarg, expfunc, a, f; 32 | for (i = 0; i < n; i++) { 33 | xarg = (((i+1) * h - poisson_x0)) / c_x; 34 | 35 | for (j = 0; j < n; j++) { 36 | idx2[i*n + j] = i*n + j; 37 | 38 | yarg = (((j+1) * h - poisson_y0)) / c_y; 39 | expfunc = exp(-(xarg*xarg/2 + yarg*yarg/2)); 40 | 41 | f = f_amplitude * expfunc; 42 | a = 1 + A_amplitude * expfunc; 43 | 44 | vecb[i*n + j] = h*h * f / a; 45 | } 46 | } 47 | 48 | ierr = VecSetValues(*h2b, n*n, idx2, vecb, INSERT_VALUES); CHKERRQ(ierr); 49 | 50 | delete [] vecb; 51 | 52 | return 0; 53 | } 54 | PetscErrorCode DiscretePoisson2D(PetscInt n, Mat *A) { 55 | PetscErrorCode ierr; 56 | PetscInt i, k, curr, next, matsize = n*n, idx[matsize]; 57 | PetscScalar *matrep = new PetscScalar[matsize*matsize]; 58 | 59 | // Initialize all elements to 0 60 | for (i = 0; i < matsize; i++) { 61 | // Create index vectors 62 | idx[i] = i; 63 | 64 | for (k = 0; k < matsize; k++) { 65 | matrep[i*matsize + k] = 0; 66 | } 67 | } 68 | 69 | // Set main diagonal 70 | for (i = 0; i < matsize; i++) 71 | matrep[i*matsize + i] = 4.; 72 | 73 | // 1st and 2nd off-diagonals 74 | for (k = 0; k < n; k++) { 75 | for (i = 0; i < n-1; i++) { 76 | curr = (n*k + i); 77 | next = (n*k + i + 1); 78 | 79 | matrep[curr*matsize + next] = -1; 80 | matrep[next*matsize + curr] = -1; 81 | } 82 | } 83 | 84 | // 3rd and 4th off-diagonals 85 | for (i = 0; i < n*(n-1); i++) { 86 | matrep[i*matsize + (i+n)] = -1; 87 | matrep[(i+n)*matsize + i] = -1; 88 | } 89 | 90 | ierr = MatSetValues(*A, matsize, idx, matsize, idx, matrep, INSERT_VALUES); 91 | CHKERRQ(ierr); 92 | 93 | delete [] matrep; 94 | 95 | return 0; 96 | } 97 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_Chol.m: -------------------------------------------------------------------------------- 1 | % main program for the solution of Poisson's equation 2 | % - a laplace = f in 2D using Cholesky decomposition 3 | 4 | close all 5 | %Define input parameters 6 | n=20; % number of inner nodes in one direction. 7 | 8 | A_1 = 10; % amplitude 1 for the rhs 9 | A_2 = 10; % amplitude 2 for the rhs 10 | 11 | h = 1/(n+1); % define step length 12 | 13 | % ---------------------------------------- 14 | % Computing all matrices and vectors 15 | % ---------------------------------------- 16 | % Generate a n*n by n*n stiffness matrix 17 | S = DiscretePoisson2D(n); 18 | 19 | % factorize A=L*L^T using Cholesky decomposition 20 | [L]=Cholesky(S); 21 | 22 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 23 | C = zeros(n,n); 24 | for j=1:n 25 | for i=1:n 26 | C(i,j) = 1; 27 | end 28 | end 29 | 30 | %% compute load vector f 31 | 32 | f=zeros(n^2,1); 33 | for j=1:n 34 | for i=1:n 35 | f(n*(i-1)+j)= A_1*exp(-((i*h-0.25)^2/0.02... 36 | +(j*h-0.25)^2/0.02))+ A_2*exp(-((i*h-0.75)^2/0.02... 37 | +(j*h-0.75)^2/0.02)); 38 | end 39 | end 40 | 41 | % ---------------------------------------- 42 | % Solving the linear system of equations using Gaussian elimination 43 | % ---------------------------------------- 44 | % We have system A u = 1/h^2 (C*L*L^T) u = f 45 | 46 | % 1. Compute vector of right hand side 47 | % as b(i,j)=f(i,j)/a(i,j) 48 | 49 | b=zeros(n^2,1); 50 | for j=1:n 51 | for i=1:n 52 | b(n*(i-1)+j)=f(n*(i-1)+j)/C(i,j); % Use coefficient matrix C 53 | 54 | end 55 | end 56 | 57 | % We now have system to solve: 1/h^2 A u = b 58 | % Use first LU decomposition: 1/h^2 (L L^T) u = b 59 | % 2. Compute v = L^(-1)*b by forward substitution. 60 | 61 | v=ForwSub(L,b); 62 | 63 | % We now have system 1/h^2 L^T u = v 64 | % 3. Compute w = L^T^(-1)*v by backward substitution. 65 | 66 | w=BackSub(L',v); 67 | 68 | % 4. We now have system 1/h^2 u = w 69 | % Compute finally solution as: u=h^2*w 70 | u=h^2*w; 71 | 72 | % ---------------------------------------- 73 | % Plots and figures. 74 | % ---------------------------------------- 75 | 76 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 77 | Z = zeros(n+2,n+2); 78 | for j=1:n 79 | for i=1:n 80 | Z(i+1,j+1) = u(n*(i-1)+j); 81 | end 82 | end 83 | 84 | %% plotting 85 | x1=0:h:1; 86 | y1=0:h:1; 87 | 88 | figure(1) 89 | surf(x1,y1,Z) % same plot as above, (x1, y1 are vectors) 90 | view(2) 91 | colorbar 92 | xlabel('x_1') 93 | ylabel('x_2') 94 | zlabel('u(x_1,x_2)') 95 | title( ['u(x_1,x_2) with N = ',num2str(n)]) 96 | 97 | figure(2) 98 | surf(x1,y1,Z) % same plot as above 99 | colorbar 100 | xlabel('x_1') 101 | ylabel('x_2') 102 | zlabel('u(x_1,x_2)') 103 | title( ['u(x_1,x_2) with N = ',num2str(n)]) 104 | -------------------------------------------------------------------------------- /Matlab_code/MethodQR_shift.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Method of QR iteration with shift sigma=A(n,n) 3 | % ---------------------------------------- 4 | clc 5 | %clear all 6 | %close all 7 | eps = 1e-09; 8 | fig = figure; 9 | N=10; 10 | for i =1:6 11 | if(i==1) 12 | n=N; 13 | A=hilb(N); 14 | elseif (i==2) 15 | n=20; 16 | A=hilb(20); 17 | elseif (i ==3) 18 | % Largest eigenvalue is complex 19 | n =3; 20 | A =[0 -5 2; 6 0 -12; 1 3 0]; 21 | elseif (i==4) 22 | % Matrix has four real eigenvalues 23 | n =4; 24 | A=[3,7,8,9;5,-7,4,-7;1,-1,1,-1;9,3,2,5]; 25 | elseif (i==5) 26 | n =5; 27 | % 28 | A=[3,7,8,9,12;5,-7,4,-7,8;1,1,-1,1,-1;4,3,2,1,7;9,3,2,5,4]; 29 | elseif (i==6) 30 | n=N; 31 | A= rand(N,N); 32 | end 33 | 34 | lambda0= inf(n,1); 35 | count = 1; 36 | iter =1; 37 | %choose shift 38 | %sigma=1.0; 39 | sigma=A(n,n); 40 | %sigma=A(1,1); 41 | 42 | % get exact eigenvalues in sorted order 43 | exact_lambda = sort(eig(A)); 44 | %%% Method of QR iteration with shift 45 | 46 | for k = 1:100 47 | 48 | A = A - sigma*eye(n); 49 | 50 | [Q,R] = qr(A); 51 | % end 52 | 53 | A = R*Q + sigma*eye(n); 54 | 55 | %compute shift 56 | sigma=A(n,n); 57 | 58 | % %%%%%%%%% Find eigenvalues from Real Schur block 59 | j =2; count =1; 60 | eigs = zeros(1,n); 61 | while (j <=n) 62 | %real eigenvalues 63 | if(abs(A(j,j-1)) < 1e-7) 64 | eigs(j-1) =A(j -1,j -1); 65 | count= j -1; 66 | else 67 | % Complex eigenvalues 68 | eigs(j-1: j)= eig(A(j -1:j,j -1:j)); 69 | count =j; 70 | j=j +1; 71 | end 72 | j=j +1; 73 | end 74 | if(count < length(eigs)) 75 | eigs(n)=A(n,n); 76 | end 77 | %******************************* 78 | 79 | computed_lambda = sort(eigs)'; 80 | 81 | if(norm(abs(computed_lambda - lambda0 )) 6 | #include 7 | #include 8 | #include 9 | 10 | const PetscScalar A_amplitude = 12.; 11 | const PetscScalar f_amplitude = 1.; 12 | const PetscScalar c_x = 1.; 13 | const PetscScalar c_y = 1.; 14 | const PetscScalar poisson_x0 = 0.5; 15 | const PetscScalar poisson_y0 = 0.5; 16 | 17 | /** 18 | * Compute coefficient matrices. 19 | * 20 | * n: Number of rows of matrices 21 | * h: Timestep length 22 | * C: n-by-n matrix 23 | * D: (n*n)-by-(n*n) matrix 24 | * f: 25 | **/ 26 | PetscErrorCode DiscretePoisson2D_coeffs(PetscInt n, PetscScalar h, Vec *h2b) { 27 | PetscErrorCode ierr; 28 | PetscInt i, j, idx2[n*n]; 29 | PetscScalar *vecb = new PetscScalar[n*n]; 30 | 31 | // Compute C, D and f 32 | PetscScalar xarg, yarg, expfunc, a, f; 33 | for (i = 0; i < n; i++) { 34 | xarg = (((i+1) * h - poisson_x0)) / c_x; 35 | 36 | for (j = 0; j < n; j++) { 37 | idx2[i*n + j] = i*n + j; 38 | 39 | yarg = (((j+1) * h - poisson_y0)) / c_y; 40 | expfunc = exp(-(xarg*xarg/2 + yarg*yarg/2)); 41 | 42 | f = f_amplitude * expfunc; 43 | a = 1 + A_amplitude * expfunc; 44 | 45 | vecb[i*n + j] = h*h * f / a; 46 | } 47 | } 48 | 49 | ierr = VecSetValues(*h2b, n*n, idx2, vecb, INSERT_VALUES); CHKERRQ(ierr); 50 | 51 | delete [] vecb; 52 | 53 | return 0; 54 | } 55 | PetscErrorCode DiscretePoisson2D(PetscInt n, Mat *A) { 56 | PetscErrorCode ierr; 57 | PetscInt i, k, curr, next, matsize = n*n, idx[matsize]; 58 | PetscScalar *matrep = new PetscScalar[matsize*matsize]; 59 | 60 | // Initialize all elements to 0 61 | for (i = 0; i < matsize; i++) { 62 | // Create index vectors 63 | idx[i] = i; 64 | 65 | for (k = 0; k < matsize; k++) { 66 | matrep[i*matsize + k] = 0; 67 | } 68 | } 69 | 70 | // Set main diagonal 71 | for (i = 0; i < matsize; i++) 72 | matrep[i*matsize + i] = 4.; 73 | 74 | // 1st and 2nd off-diagonals 75 | for (k = 0; k < n; k++) { 76 | for (i = 0; i < n-1; i++) { 77 | curr = (n*k + i); 78 | next = (n*k + i + 1); 79 | 80 | matrep[curr*matsize + next] = -1; 81 | matrep[next*matsize + curr] = -1; 82 | } 83 | } 84 | 85 | // 3rd and 4th off-diagonals 86 | for (i = 0; i < n*(n-1); i++) { 87 | matrep[i*matsize + (i+n)] = -1; 88 | matrep[(i+n)*matsize + i] = -1; 89 | } 90 | 91 | ierr = MatSetValues(*A, matsize, idx, matsize, idx, matrep, INSERT_VALUES); 92 | CHKERRQ(ierr); 93 | 94 | delete [] matrep; 95 | 96 | return 0; 97 | } 98 | 99 | \end{lstlisting} 100 | -------------------------------------------------------------------------------- /Matlab_code/MainBellspline.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Solution of least squares problem min_x || Ax - y ||_2 3 | % using the method of normal equations, QR decomposition 4 | % and SVD decomposition. 5 | % Matrix A is constructed using bellsplines. 6 | % Program performs fitting to the function y = sin(pi*x/5) + x/5 7 | % ---------------------------------------- 8 | 9 | clc 10 | clear 11 | clf 12 | close all 13 | format short 14 | 15 | % input interval on which we fit the function 16 | interval=10; 17 | 18 | % junction points 19 | 20 | T=linspace(-10,interval,7)'; 21 | 22 | % Define number of measurement points m 23 | m=30; 24 | x=linspace(-10,interval,m)'; 25 | 26 | %exact function to be fitted 27 | b=sin(pi*x/5) +x/5; 28 | 29 | % construct matrix A with bellsplines 30 | %Number of bellsplines should be number of junction points +2 31 | 32 | A=fbell(x,T); 33 | 34 | %solution of system Ax = b using different methods for solution 35 | % of least squares problem. 36 | tic 37 | % use method of normal equations 38 | xHatChol = LLSChol(A,b); 39 | toc 40 | tic 41 | %use SVD decomposition of A 42 | xHatSVD = LLSSVD(A,b); 43 | toc 44 | tic 45 | % use QR decomposition of A 46 | xHatQR = LLSQR(A,b); 47 | toc 48 | 49 | % compute condition number of A 50 | cond(A) 51 | 52 | % use iterative refinement of the obtained solution 53 | % via Newton's method 54 | % choose tolerance in Newton's method 55 | tol =0.2; 56 | 57 | y= newtonIR(A,xHatChol,b,tol); 58 | y1= newtonIR(A,xHatQR,b,tol); 59 | y2= newtonIR(A,xHatSVD,b,tol); 60 | 61 | % compute relative errors 62 | 63 | eC=norm(A*xHatChol-b)/norm(b); 64 | eS=norm(A*xHatSVD-b)/norm(b); 65 | eQ=norm(A*xHatQR-b)/norm(b); 66 | 67 | disp(' --------------Computed relative errors ------------------- ') 68 | disp(' Method of normal eq. QR SVD') 69 | disp('') 70 | 71 | disp([eC eS eQ ]) 72 | 73 | disp('Computed relative errors after iterative refinement via Newton method ') 74 | disp(' Method of normal eq. QR SVD') 75 | disp('') 76 | 77 | disp([norm(A*y-b)/norm(b) norm(A*y1-b)/norm(b) norm(A*y2-b)/norm(b)]) 78 | 79 | % Plot results 80 | 81 | figure(1) 82 | %plot(t,A,'linewidth',2) 83 | 84 | plot(x,A,'linewidth',2) 85 | 86 | m =size(A,2); 87 | str_xlabel = [' number of bellsplines=', num2str(m)]; 88 | title(str_xlabel) 89 | 90 | figure('Name','Cholesky') 91 | title('Cholesky') 92 | plot(x,b,'o- r', 'linewidth',2) 93 | hold on 94 | plot(x,A*xHatChol,' *- b', 'linewidth',2) 95 | legend('exact ', 'B-spline degree 3, Cholesky'); 96 | 97 | figure('Name','QR') 98 | plot(x,b,'o- r', 'linewidth',2) 99 | hold on 100 | plot(x,A*xHatQR,'* - b', 'linewidth',2) 101 | legend('exact ', 'B-spline degree 3, QR'); 102 | 103 | figure('Name','SVD') 104 | title('SVD') 105 | plot(x,b,'o- r', 'linewidth',2) 106 | hold on 107 | plot(x,A*xHatSVD,'*- b', 'linewidth',2) 108 | legend('exact ', 'B-spline degree 3, SVD'); 109 | -------------------------------------------------------------------------------- /Matlab_code/HessenbergQR.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Matlab program HessenbergQR.m: we will first reduce the matrix A to the upper 3 | % Hessenberg matrix and then compute it's QR factorization 4 | % ---------------------------------------- 5 | clc 6 | clear all 7 | close all 8 | eps = 1e-07; 9 | fig = figure; 10 | N=10; 11 | for i =1:6 12 | if(i==1) 13 | n=N; 14 | A=hilb(N); 15 | elseif (i==2) 16 | n=20; 17 | A=hilb(20); 18 | elseif (i ==3) 19 | % Largest eigenvalue is complex 20 | n =3; 21 | A =[0 -5 2; 6 0 -12; 1 3 0]; 22 | elseif (i==4) 23 | % Matrix has four real eigenvalues 24 | n =4; 25 | A=[3,7,8,9;5,-7,4,-7;1,-1,1,-1;9,3,2,5]; 26 | elseif (i==5) 27 | n =5; 28 | % 29 | A=[3,7,8,9,12;5,-7,4,-7,8;1,1,-1,1,-1;4,3,2,1,7;9,3,2,5,4]; 30 | elseif (i==6) 31 | n=N; 32 | A= rand(N,N); 33 | 34 | end 35 | 36 | lambda0= inf(n,1); 37 | count = 1; 38 | iter =1; 39 | 40 | % get exact eigenvalues in sorted order 41 | exact_lambda = sort(eig(A)); 42 | 43 | % First we reduce matrix A to upper Hessenberg 44 | 45 | for k=1:n - 2 46 | x= A(k+1:n,k); 47 | u=x; 48 | u(1) = u(1)+ sign(x(1))*norm(x); 49 | u=u/norm (u); 50 | P= eye(n-k) - 2*(u*u') ; 51 | A(k +1:n,k:n) =P*A(k +1:n,k:n) ; 52 | A(1:n,k +1:n)=A(1:n,k+1:n)*P; 53 | end 54 | 55 | % ---------------------------------------- 56 | 57 | for k = 1:1000 58 | [Q,R] = qr(A); 59 | A = R*Q; 60 | end 61 | 62 | %%%%%%%%%% Find eigenvalues from Real Schur block 63 | j =2; count =1; 64 | eigs = zeros(1,n); 65 | while (j <=n) 66 | %real eigenvalues 67 | if(abs(A(j,j-1)) < 1e-3) 68 | eigs(j-1) =A(j -1,j -1); 69 | count= j -1; 70 | else 71 | % Complex eigenvalues 72 | eigs(j-1: j)= eig(A(j -1:j,j -1:j)); 73 | count =j; 74 | j=j +1; 75 | end 76 | j=j +1; 77 | end 78 | if(count < length(eigs)) 79 | eigs(n)=A(n,n); 80 | end 81 | % ---------------------------------------- 82 | 83 | computed_lambda = sort(eigs)'; 84 | 85 | if(norm(abs(computed_lambda - lambda0 )) 0 55 | 56 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x_left,eps); 57 | p_left = P; 58 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x_right,eps); 59 | p_right = P; 60 | 61 | %check 62 | 63 | if p_left > 0 || p_right < 0 64 | disp('choose another interval') 65 | exit 66 | end 67 | 68 | iterations = 0; 69 | 70 | while x_right - x_left > 2*eps 71 | 72 | iterations = iterations + 1; 73 | 74 | x_mid = (x_right + x_left)/2; 75 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x_mid,eps); 76 | p_mid = P; 77 | 78 | if p_left*p_mid < 0 79 | x_right = x_mid; 80 | p_right = p_mid; 81 | end 82 | if p_right*p_mid < 0 83 | x_left = x_mid; 84 | p_left = p_mid; 85 | end 86 | if p_mid == 0 87 | x_left = x_mid; 88 | x_right = x_mid; 89 | end 90 | 91 | end 92 | 93 | Root = (x_left + x_right)/2 94 | % iterations 95 | 96 | y_horner = zeros(N,1); 97 | 98 | for i = 1:N 99 | 100 | [P,bp] = evaluate_polynomial_by_Horners_rule(a,x(i),eps); 101 | y_horner(i) = P; 102 | y_horner_upper(i) = P + bp; 103 | y_horner_lower(i) = P - bp; 104 | 105 | error(i) = abs(bp/P); 106 | 107 | log_error(i) = -log(abs(bp/P)); 108 | 109 | % here we compute error between computed and exact values of polynomial at x(i) 110 | ComputedErrors(i) = P- ((x(i)- 9).^9); 111 | 112 | LogCompEr(i) = -log(abs(ComputedErrors(i)./P)); 113 | 114 | end 115 | 116 | figure(1) 117 | plot(x,y_horner,'k.') 118 | hold on 119 | plot(x,y,'r','linewidth',2) 120 | legend('Horners rule (8000 points)', 'exact p(x)') 121 | xlabel('Input interval for x') 122 | 123 | hold off 124 | 125 | figure(2) 126 | plot(x,log_error); 127 | 128 | hold on 129 | plot(x, LogCompEr, '. r'); 130 | 131 | legend('log error = -log(abs(bp/P))', ' -log(abs((P - p(x))/P)) ') 132 | 133 | xlabel('input interval for x') 134 | 135 | hold off 136 | 137 | figure(3) 138 | 139 | plot(x,error); 140 | 141 | legend('error = abs(bp/P)') 142 | 143 | xlabel('input interval for x') 144 | 145 | figure(4) 146 | plot(x,y,'k',x,y_horner_upper,'r--',x,y_horner_lower,'b--') 147 | xlabel('input interval for x') 148 | legend('exact polynomial ','upper bound','lower bound') 149 | 150 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Main.cpp: -------------------------------------------------------------------------------- 1 | // The Main program which 2 | // solves the Dirichlet problem for the Poisson's equation in 2D 3 | // using PETSc. 4 | 5 | static char help[] =""; 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "Poisson.h" 13 | 14 | const PetscInt n = 20; 15 | const PetscScalar h = 1 / (PetscScalar)(n + 1); 16 | 17 | const bool VERBOSE = true; 18 | 19 | using namespace std; 20 | 21 | char METHOD_NAMES[8][70] = { 22 | "invalid method", 23 | "Jacobi's method", 24 | "Gauss-Seidel method", 25 | "Successive Overrelaxation method (SOR)", 26 | "Conjugate Gradient method", 27 | "Conjugate Gradient method (Algorithm 12.13)", 28 | "Preconditioned Conjugate Gradient method", 29 | "Preconditioned Conjugate Gradient method (Algorithm 12.14)"}; 30 | 31 | char *GetMethodName(PetscInt method) { 32 | if (method < 0 || method > 7) 33 | return METHOD_NAMES[0]; 34 | else 35 | return METHOD_NAMES[method]; 36 | } 37 | 38 | int main(int argc, char **argv) { 39 | PetscErrorCode ierr; 40 | ierr = PetscInitialize(&argc, &argv,(char *)0, help);CHKERRQ(ierr); 41 | 42 | PetscInt method = atoi(argv[1]); 43 | PetscBool methodSet = PETSC_FALSE; 44 | Mat S; 45 | Vec h2b, u; 46 | 47 | 48 | ierr = PetscOptionsGetInt(NULL, NULL, "-m", &method, &methodSet); 49 | if (method < 1 || method > 7) { 50 | cout << "Invalid number of the selected method: " 51 | << method << ".\nExiting..." << endl; 52 | exit(-1); 53 | } 54 | 55 | // To use SOR with omega != 1, we need to disable inodes 56 | if (method == METHOD_SOR) 57 | PetscOptionsSetValue(NULL, "-mat_no_inode", NULL); 58 | 59 | ierr = CreateMatrix(&S, n*n, n*n); CHKERRQ(ierr); 60 | ierr = CreateVector(&h2b, n*n); CHKERRQ(ierr); 61 | ierr = CreateVector(&u, n*n); CHKERRQ(ierr); 62 | 63 | // create discrete Laplacian 64 | ierr = DiscretePoisson2D(n, &S); 65 | 66 | // create right hand side 67 | ierr = DiscretePoisson2D_coeffs(n, h, &h2b); 68 | 69 | ierr = MatAssemblyBegin(S, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 70 | ierr = MatAssemblyEnd(S, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 71 | ierr = VecAssemblyBegin(h2b); CHKERRQ(ierr); 72 | ierr = VecAssemblyEnd(h2b); CHKERRQ(ierr); 73 | ierr = VecAssemblyBegin(u); CHKERRQ(ierr); 74 | ierr = VecAssemblyEnd(u); CHKERRQ(ierr); 75 | 76 | /* 77 | Below we solve system S*u= h2b 78 | */ 79 | if (VERBOSE) 80 | PetscPrintf(PETSC_COMM_WORLD, "Using %s\n", GetMethodName(method)); 81 | 82 | if (method == METHOD_CG_FULL) 83 | ConjugateGradient_full(S, h2b, u, VERBOSE); 84 | else if (method == METHOD_PCG_FULL) 85 | PreconditionedConjugateGradient_full(S, h2b, u, VERBOSE); 86 | else 87 | Solve(S, h2b, u, method, VERBOSE); 88 | 89 | // Print out solution 90 | FILE* resultfile = fopen("solution.m", "w"); 91 | 92 | if (VERBOSE) { 93 | PetscInt i, j, matsize, *idx = new PetscInt[n*n]; 94 | PetscScalar *vecu = new PetscScalar[n*n]; 95 | matsize = n*n; 96 | 97 | for (i = 0; i < matsize; i++) 98 | idx[i] = i; 99 | 100 | ierr = VecGetValues(u, matsize, idx, vecu); 101 | 102 | for (i = 0; i < n; i++) { 103 | for (j = 0; j < n; j++) { 104 | PetscPrintf(PETSC_COMM_WORLD, "%.12e ", vecu[n*i + j]); 105 | fprintf(resultfile, "%.12e ", vecu[n*i + j]); 106 | } 107 | PetscPrintf(PETSC_COMM_WORLD, "\n"); 108 | fprintf(resultfile, "\n"); 109 | } 110 | 111 | delete [] vecu; 112 | delete [] idx; 113 | } 114 | fclose(resultfile); 115 | ierr = PetscFinalize(); CHKERRQ(ierr); 116 | return 0; 117 | } 118 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/Main.cpp~: -------------------------------------------------------------------------------- 1 | // The Main program which 2 | // solves the Dirichlet problem for the Poisson's equation in 2D 3 | // using PETSc. 4 | 5 | static char help[] =""; 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "Poisson.h" 13 | 14 | const PetscInt n = 20; 15 | const PetscScalar h = 1 / (PetscScalar)(n + 1); 16 | 17 | const bool VERBOSE = true; 18 | 19 | using namespace std; 20 | 21 | char METHOD_NAMES[8][50] = { 22 | "invalid method", 23 | "Jacobi's method", 24 | "Gauss-Seidel method", 25 | "Successive Overrelaxation method (SOR)", 26 | "Conjugate Gradient method", 27 | "Conjugate Gradient method (Algorithm 12.13)", 28 | "Preconditioned Conjugate Gradient method", 29 | "Preconditioned Conjugate Gradient method (Algorithm 12.14)" 30 | }; 31 | 32 | char *GetMethodName(PetscInt method) { 33 | if (method < 0 || method > 7) 34 | return METHOD_NAMES[0]; 35 | else 36 | return METHOD_NAMES[method]; 37 | } 38 | 39 | int main(int argc, char **argv) { 40 | PetscErrorCode ierr; 41 | ierr = PetscInitialize(&argc, &argv,(char *)0, help);CHKERRQ(ierr); 42 | 43 | PetscInt method = atoi(argv[1]); 44 | PetscBool methodSet = PETSC_FALSE; 45 | Mat S; 46 | Vec h2b, u; 47 | 48 | 49 | ierr = PetscOptionsGetInt(NULL, NULL, "-m", &method, &methodSet); 50 | if (method < 1 || method > 7) { 51 | cout << "Invalid number of the selected method: " 52 | << method << ".\nExiting..." << endl; 53 | exit(-1); 54 | } 55 | 56 | // To use SOR with omega != 1, we need to disable inodes 57 | if (method == METHOD_SOR) 58 | PetscOptionsSetValue(NULL, "-mat_no_inode", NULL); 59 | 60 | ierr = CreateMatrix(&S, n*n, n*n); CHKERRQ(ierr); 61 | ierr = CreateVector(&h2b, n*n); CHKERRQ(ierr); 62 | ierr = CreateVector(&u, n*n); CHKERRQ(ierr); 63 | 64 | // create discrete Laplacian 65 | ierr = DiscretePoisson2D(n, &S); 66 | 67 | // create right hand side 68 | ierr = DiscretePoisson2D_coeffs(n, h, &h2b); 69 | 70 | ierr = MatAssemblyBegin(S, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 71 | ierr = MatAssemblyEnd(S, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 72 | ierr = VecAssemblyBegin(h2b); CHKERRQ(ierr); 73 | ierr = VecAssemblyEnd(h2b); CHKERRQ(ierr); 74 | ierr = VecAssemblyBegin(u); CHKERRQ(ierr); 75 | ierr = VecAssemblyEnd(u); CHKERRQ(ierr); 76 | 77 | /* 78 | Below we solve system S*u= h2b 79 | */ 80 | if (VERBOSE) 81 | PetscPrintf(PETSC_COMM_WORLD, "Using %s\n", GetMethodName(method)); 82 | 83 | if (method == METHOD_CG_FULL) 84 | ConjugateGradient_full(S, h2b, u, VERBOSE); 85 | else if (method == METHOD_PCG_FULL) 86 | PreconditionedConjugateGradient_full(S, h2b, u, VERBOSE); 87 | else 88 | Solve(S, h2b, u, method, VERBOSE); 89 | 90 | // Print out solution 91 | FILE* resultfile = fopen("solution.m", "w"); 92 | 93 | if (VERBOSE) { 94 | PetscInt i, j, matsize, *idx = new PetscInt[n*n]; 95 | PetscScalar *vecu = new PetscScalar[n*n]; 96 | matsize = n*n; 97 | 98 | for (i = 0; i < matsize; i++) 99 | idx[i] = i; 100 | 101 | ierr = VecGetValues(u, matsize, idx, vecu); 102 | 103 | for (i = 0; i < n; i++) { 104 | for (j = 0; j < n; j++) { 105 | PetscPrintf(PETSC_COMM_WORLD, "%.12e ", vecu[n*i + j]); 106 | fprintf(resultfile, "%.12e ", vecu[n*i + j]); 107 | } 108 | PetscPrintf(PETSC_COMM_WORLD, "\n"); 109 | fprintf(resultfile, "\n"); 110 | } 111 | 112 | delete [] vecu; 113 | delete [] idx; 114 | } 115 | fclose(resultfile); 116 | ierr = PetscFinalize(); CHKERRQ(ierr); 117 | return 0; 118 | } 119 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_LU.m: -------------------------------------------------------------------------------- 1 | % main program for the solution of Poisson's equation 2 | % - a laplace = f in 2D 3 | 4 | close all 5 | %Define input parameters 6 | n=20; % number of inner nodes in one direction. 7 | a_amp = 12; % amplitude for the function a(x_1,x_2) 8 | f_amp = 1; % 1, 50, 100 choose const. f value 9 | x_0=0.5; 10 | y_0=0.5; 11 | c_x=1; 12 | c_y=1; 13 | 14 | h = 1/(n+1); % define step length 15 | 16 | % ---------------------------------------- 17 | % Computing all matrices and vectors 18 | % ---------------------------------------- 19 | % Generate a n*n by n*n stiffness matrix 20 | S = DiscretePoisson2D(n); 21 | 22 | % factorize A using LU decomposition with pivoting 23 | [L,U,P]=LU_PP(S); 24 | 25 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 26 | C = zeros(n,n); 27 | for i=1:n 28 | for j=1:n 29 | C(i,j) = 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 30 | +(j*h-y_0)^2/(2*c_y^2))); 31 | end 32 | end 33 | % create diagonal matrix from C 34 | D = zeros(n^2,n^2); 35 | for i=1:n 36 | for j=1:n 37 | D(j+n*(i-1),j+n*(i-1)) = C(i,j); 38 | end 39 | end 40 | 41 | %% calculate load vector f 42 | 43 | % If f is constant. 44 | % f = f_amp*ones(n^2,1); 45 | 46 | % If f is Gaussian function. 47 | f=zeros(n^2,1); 48 | for i=1:n 49 | for j=1:n 50 | f(n*(i-1)+j)=f_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 51 | +(j*h-y_0)^2/(2*c_y^2))); 52 | end 53 | end 54 | 55 | % ---------------------------------------- 56 | % Solving the linear system of equations using Gaussian elimination 57 | % ---------------------------------------- 58 | % We have system A u = 1/h^2 D L U u = f 59 | 60 | % 1. Compute vector of right hand side 61 | % b = D^(-1)*f given by b(i,j)=f(i,j)/a(i,j) 62 | 63 | b=zeros(n^2,1); 64 | for i=1:n 65 | for j=1:n 66 | b(n*(i-1)+j)=f(n*(i-1)+j)/C(i,j); % Use coefficient matrix C or 67 | % diagonal matrix D to get a(i,j) 68 | end 69 | end 70 | 71 | % We now have system to solve: 1/h^2 A u = b 72 | % Use first LU decomposition: 1/h^2 L U u = b 73 | % 2. Compute v = L^(-1)*b by forward substitution. 74 | 75 | v=ForwSub(L,P*b); 76 | 77 | % We now have system 1/h^2 U u = v 78 | % 3. Compute w = U^(-1)*v by backward substitution. 79 | 80 | w=BackSub(U,v); 81 | 82 | % 4. We now have system 1/h^2 u = w 83 | % Compute finally solution as: u=h^2*w 84 | u=h^2*w; 85 | 86 | % ---------------------------------------- 87 | % Plots and figures. 88 | % ---------------------------------------- 89 | 90 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 91 | Z = zeros(n+2,n+2); 92 | for i=1:n 93 | for j=1:n 94 | Z(i+1,j+1) = u(j+n*(i-1)); 95 | end 96 | end 97 | 98 | %% plotting 99 | x1=0:h:1; 100 | y1=0:h:1; 101 | 102 | figure(1) 103 | surf(x1,y1,Z) % same plot as above, (x1, y1 are vectors) 104 | view(2) 105 | colorbar 106 | xlabel('x_1') 107 | ylabel('x_2') 108 | zlabel('u(x_1,x_2)') 109 | title( ['u(x_1,x_2) with A = ',num2str(a_amp),... 110 | ', N = ',num2str(n)]) 111 | 112 | figure(2) 113 | surf(x1,y1,Z) % same plot as above 114 | colorbar 115 | xlabel('x_1') 116 | ylabel('x_2') 117 | zlabel('u(x_1,x_2)') 118 | title( ['u(x_1,x_2) with A = ',num2str(a_amp),... 119 | ', N = ',num2str(n)]) 120 | 121 | % Plotting a(x,y) 122 | Z_a= zeros(n+2); 123 | for i=1:(n+2) 124 | for j=1:(n+2) 125 | Z_a(i,j)= 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 126 | +(j*h-y_0)^2/(2*c_y^2))); 127 | end 128 | end 129 | figure(3) 130 | surf(x1,y1,Z_a) 131 | xlabel('x_1') 132 | ylabel('x_2') 133 | zlabel('a(x_1,x_2)') 134 | title( ['a(x_1,x_2) with A = ',num2str(a_amp)]) 135 | 136 | % plott the function f(x,y) 137 | Z_f= zeros(n+2); 138 | for i=1:(n+2) 139 | for j=1:(n+2) 140 | Z_f(i,j)=f_amp*exp(-((x1(i)-x_0)^2/(2*c_x^2)... 141 | +(y1(j)-y_0)^2/(2*c_y^2))); 142 | end 143 | end 144 | figure(4) 145 | surf(x1,y1,Z_f) 146 | xlabel('x_1') 147 | ylabel('x_2') 148 | zlabel('f(x_1,x_2)') 149 | title( ['f(x_1,x_2) with A_f = ',num2str(f_amp)]) 150 | -------------------------------------------------------------------------------- /Matlab_code/MainHatFit.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Solution of the least squares problem $ \min_x || Ax - y ||_2 $ 3 | % using the method of normal equations, QR decomposition 4 | % and SVD decomposition. 5 | % Matrix $A$ is constructed using linear splines. 6 | % The program performs fitting to the function $y = \sin(\pi*x/5) + x/5 $ 7 | % ---------------------------------------- 8 | 9 | clc 10 | clear 11 | clf 12 | format long 13 | close all 14 | 15 | % Define the number of measurements or data points. 16 | % It is also the number of columns in matrix $A$. 17 | m = 100; 18 | 19 | % the number of junction points 20 | K = 5; 21 | 22 | x = linspace(-10, 10.0, m)'; 23 | T = linspace(-10, 10.0, K)'; 24 | 25 | % function which we want to fit 26 | b = sin(pi*x/5) + x/5; 27 | 28 | A = zeros(m, K); 29 | 30 | % construct matrix A using linear splines 31 | for k = 1:K 32 | A(:,k) = fihatt(k, x, T); 33 | end 34 | % compute condition number of A 35 | cond(A) 36 | 37 | % solution of linear system $Ax = b$ by different methods 38 | 39 | % using method of normal equations 40 | xHatChol = LLSChol(A, b); 41 | 42 | % using QR decomposition of A 43 | xHatQR = LLSQR(A, b); 44 | 45 | % using SVD decomposition of A 46 | xHatSVD = LLSSVD(A, b); 47 | 48 | disp(' Computed relative error ') 49 | disp(' Method of normal eq. QR SVD') 50 | disp('') 51 | 52 | disp([norm(A*xHatChol-b)/norm(b) norm(A*xHatQR-b)/norm(b) ... 53 | norm(A*xHatSVD-b)/norm(b)]) 54 | 55 | % Method of iterative refinement via Newton's method 56 | 57 | tol = 0.07; 58 | refinedC = newtonIR(A, xHatChol, b, tol); 59 | refinedQ = newtonIR(A, xHatQR, b, tol); 60 | refinedS = newtonIR(A, xHatSVD, b, tol); 61 | 62 | disp('Computed relative error after iterative refinement via Newton method ') 63 | disp(' Method of normal eq. QR SVD') 64 | disp('') 65 | 66 | disp([norm(A*refinedC-b)/norm(b) norm(A*refinedQ-b)/norm(b) norm(A*refinedS-b)/norm(b)]) 67 | 68 | % Plot exact and computed functions 69 | 70 | % choose the number of points to plot solution 71 | x = linspace(-10, 10.0, 100)'; 72 | b = (sin(pi*x/5) + x/5); 73 | A = zeros(100, K); 74 | 75 | for k = 1:K 76 | A(:,k) = fihatt(k, x, T); 77 | end 78 | 79 | % choose method to be plotted 80 | 81 | %method = 'cholesky'; 82 | %method = 'refinedcholesky'; 83 | %method = 'qr'; 84 | %method = 'refinedqr'; 85 | %method = 'svd'; 86 | method = 'refinedsvd'; 87 | 88 | switch lower(method) 89 | case 'cholesky' 90 | % Here, A is constructed by linear splines, approximated function is computed 91 | % via the method of normal equations (Cholesky decomposition) 92 | solution = A*xHatChol; 93 | case 'refinedcholesky' 94 | % Here, A is constructed by linear splines, approximated function is computed 95 | % via iterative refinement of the Cholesky-solution through the Newton method 96 | solution = A*refinedC; 97 | case 'qr' 98 | % Here, A is constructed by linear splines, approximated function is computed 99 | % via QR decomposition 100 | solution = A*xHatQR; 101 | case 'refinedqr' 102 | % Here, A is constructed by linear splines, approximated function is computed 103 | % via iterative refinement of the QR-solution through the Newton method 104 | solution = A*refinedQ; 105 | case 'svd' 106 | % Here, A is constructed by linear splines, approximated function is computed 107 | % via SVD decomposition 108 | solution = A*xHatSVD; 109 | case 'refinedsvd' 110 | % Here, A is constructed by linear splines, approximated function is computed 111 | % via iterative refinement of the SVD-solution through the Newton method 112 | solution = A*refinedS; 113 | otherwise 114 | disp('Unknown method') 115 | end 116 | 117 | figure (1) 118 | plot(x, b, 'o r', 'linewidth', 1) 119 | hold on 120 | plot(x, solution, ' - b', 'linewidth', 2) 121 | legend('function', 'approx'); 122 | figure('Name', 'Hat functions') 123 | plot(x, A, 'k') 124 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_ConjugateGrad.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Main program for the solution of Poisson's equation 3 | % - a laplace = f in 2D using Conjugate Gradient Method 4 | % ---------------------------------------- 5 | 6 | close all 7 | %Define input parameters 8 | n=20; % number of inner nodes in one direction. 9 | a_amp = 12; % amplitude for the function a(x_1,x_2) 10 | f_amp = 1; % 1, 50, 100 choose const. f value 11 | x_0=0.5; 12 | y_0=0.5; 13 | c_x=1; 14 | c_y=1; 15 | 16 | h = 1/(n+1); % define step length 17 | 18 | % ---------------------------------------- 19 | % Computing all matrices and vectors 20 | % ---------------------------------------- 21 | % Generate a n*n by n*n stiffness matrix 22 | S = DiscretePoisson2D(n); 23 | 24 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 25 | C = zeros(n,n); 26 | for i=1:n 27 | for j=1:n 28 | C(i,j) = 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 29 | +(j*h-y_0)^2/(2*c_y^2))); 30 | end 31 | end 32 | % create diagonal matrix from C 33 | D = zeros(n^2,n^2); 34 | for i=1:n 35 | for j=1:n 36 | D(j+n*(i-1),j+n*(i-1)) = C(i,j); 37 | end 38 | end 39 | 40 | %% calculate load vector f 41 | 42 | % If f is constant. 43 | % f = f_amp*ones(n^2,1); 44 | 45 | % If f is Gaussian function. 46 | f=zeros(n^2,1); 47 | for i=1:n 48 | for j=1:n 49 | f(n*(i-1)+j)=f_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 50 | +(j*h-y_0)^2/(2*c_y^2))); 51 | end 52 | end 53 | 54 | % Compute vector of right hand side 55 | % b = D^(-1)*f given by b(i,j)=f(i,j)/a(i,j) 56 | 57 | b=zeros(n^2,1); 58 | for i=1:n 59 | for j=1:n 60 | b(n*(i-1)+j)=f(n*(i-1)+j)/C(i,j); % Use coefficient matrix C or 61 | % diagonal matrix D to get a(i,j) 62 | end 63 | end 64 | % ---------------------------------------- 65 | % ----------- Conjugate gradient method 66 | % ---------------------------------------- 67 | % We should solve: 1/h^2 S u = b 68 | 69 | k=0; 70 | err = 1; x=0; r0= h^2*b; p= h^2*b; tol=10^(-9); 71 | while(err>tol) 72 | k=k+1; 73 | z = S*p; 74 | nu = (r0'*r0)/(p'*z); 75 | x = x + nu*p; 76 | r1 = r0 - nu*z; 77 | mu = (r1'*r1)/(r0'*r0); 78 | p = r1 + mu*p; 79 | r0=r1; 80 | err = norm(r0); 81 | end 82 | 83 | disp('-- Number of iterations in Conjugate gradient method ----------') 84 | k 85 | 86 | % ---------------------------------------- 87 | % Plots and figures. 88 | % ---------------------------------------- 89 | 90 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 91 | Z = zeros(n+2,n+2); 92 | for i=1:n 93 | for j=1:n 94 | Z(i+1,j+1) = x(j+n*(i-1)); 95 | end 96 | end 97 | 98 | %% plotting 99 | x1=0:h:1; 100 | y1=0:h:1; 101 | 102 | subplot(2,2,1) 103 | 104 | surf(x1,y1,Z) % same plot as above, (x1, y1 are vectors) 105 | view(2) 106 | colorbar 107 | xlabel('x_1') 108 | ylabel('x_2') 109 | zlabel('u(x_1,x_2)') 110 | title( ['u(x_1,x_2) in Conjugate gradient method ',... 111 | ', N = ',num2str(n)]) 112 | 113 | subplot(2,2,2) 114 | surf(x1,y1,Z) % same plot as above 115 | colorbar 116 | xlabel('x_1') 117 | ylabel('x_2') 118 | zlabel('u(x_1,x_2)') 119 | title( ['u(x_1,x_2) in Conjugate gradient method ', ... 120 | ', N = ',num2str(n)]) 121 | 122 | % Plotting a(x,y) 123 | Z_a= zeros(n+2); 124 | for i=1:(n+2) 125 | for j=1:(n+2) 126 | Z_a(i,j)= 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 127 | +(j*h-y_0)^2/(2*c_y^2))); 128 | end 129 | end 130 | 131 | subplot(2,2,3) 132 | 133 | surf(x1,y1,Z_a) 134 | xlabel('x_1') 135 | ylabel('x_2') 136 | zlabel('a(x_1,x_2)') 137 | title( ['a(x_1,x_2) with A = ',num2str(a_amp)]) 138 | 139 | % plott the function f(x,y) 140 | Z_f= zeros(n+2); 141 | for i=1:(n+2) 142 | for j=1:(n+2) 143 | Z_f(i,j)=f_amp*exp(-((x1(i)-x_0)^2/(2*c_x^2)... 144 | +(y1(j)-y_0)^2/(2*c_y^2))); 145 | end 146 | end 147 | 148 | subplot(2,2,4) 149 | 150 | surf(x1,y1,Z_f) 151 | xlabel('x_1') 152 | ylabel('x_2') 153 | zlabel('f(x_1,x_2)') 154 | title( ['f(x_1,x_2) with A_f = ',num2str(f_amp)]) 155 | 156 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/PCG.cpp: -------------------------------------------------------------------------------- 1 | 2 | /*Program for using Preconditioned Conjugate gradient method */ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "Poisson.h" 10 | 11 | 12 | PetscErrorCode PreconditionedConjugateGradient(KSP ksp, PC preconditioner) { 13 | PetscErrorCode ierr; 14 | 15 | ierr = KSPSetType(ksp, KSPCG); 16 | 17 | //ierr = PCSetType(preconditioner, PCJACOBI); CHKERRQ(ierr); 18 | ierr = PCSetType(preconditioner, PCCHOLESKY); CHKERRQ(ierr); 19 | 20 | return 0; 21 | } 22 | 23 | /** 24 | * Implements the preconditioned conjugate gradient 25 | * method with Jacobi preconditioning. 26 | */ 27 | PetscErrorCode PreconditionedConjugateGradient_full(Mat A, Vec b, Vec x, 28 | bool VERBOSE) { 29 | PetscErrorCode ierr; 30 | Mat Minv; 31 | Vec diagonal, unity; 32 | PetscInt n; 33 | 34 | ierr = MatGetSize(A, &n, NULL); CHKERRQ(ierr); 35 | ierr = CreateMatrix(&Minv, n, n); CHKERRQ(ierr); 36 | ierr = CreateVector(&diagonal, n); CHKERRQ(ierr); 37 | ierr = CreateVector(&unity, n); CHKERRQ(ierr); 38 | 39 | ierr = MatAssemblyBegin(Minv, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 40 | ierr = MatAssemblyEnd(Minv, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 41 | ierr = VecAssemblyBegin(diagonal); CHKERRQ(ierr); 42 | ierr = VecAssemblyEnd(diagonal); CHKERRQ(ierr); 43 | ierr = VecAssemblyBegin(unity); CHKERRQ(ierr); 44 | ierr = VecAssemblyEnd(unity); CHKERRQ(ierr); 45 | 46 | // We use the diagonal preconditioner for simplicity 47 | ierr = MatGetDiagonal(A, diagonal); CHKERRQ(ierr); 48 | 49 | // Compute inverse of all diagonal entries 50 | ierr = VecSet(unity, 1.0); CHKERRQ(ierr); 51 | ierr = VecPointwiseDivide(diagonal, unity, diagonal); 52 | 53 | // Create M^{-1} 54 | ierr = MatDiagonalSet(Minv, diagonal, INSERT_VALUES); CHKERRQ(ierr); 55 | 56 | return PreconditionedConjugateGradient_inner(A, b, x, Minv, VERBOSE); 57 | } 58 | PetscErrorCode PreconditionedConjugateGradient_inner(Mat A, Vec b, Vec x, 59 | Mat Minv, bool VERBOSE) { 60 | PetscErrorCode ierr; 61 | PetscInt k=0, n; 62 | PetscScalar mu, nu, yTr, pTz, rNorm, tol = 1e-12; 63 | Vec p, r, y, z; 64 | 65 | ierr = MatGetSize(A, &n, NULL); CHKERRQ(ierr); 66 | 67 | CreateVector(&p, n); 68 | CreateVector(&r, n); 69 | CreateVector(&y, n); 70 | CreateVector(&z, n); 71 | 72 | VecCopy(b, r); 73 | ierr = MatMult(Minv, b, p); CHKERRQ(ierr); 74 | VecCopy(p, y); 75 | 76 | ierr = VecAssemblyBegin(p); CHKERRQ(ierr); 77 | ierr = VecAssemblyEnd(p); CHKERRQ(ierr); 78 | ierr = VecAssemblyBegin(r); CHKERRQ(ierr); 79 | ierr = VecAssemblyEnd(r); CHKERRQ(ierr); 80 | ierr = VecAssemblyBegin(y); CHKERRQ(ierr); 81 | ierr = VecAssemblyEnd(y); CHKERRQ(ierr); 82 | ierr = VecAssemblyBegin(z); CHKERRQ(ierr); 83 | ierr = VecAssemblyEnd(z); CHKERRQ(ierr); 84 | 85 | ierr = VecZeroEntries(x); 86 | 87 | // Pre-compute first (y^T r) 88 | ierr = VecDot(y, r, &yTr); CHKERRQ(ierr); 89 | 90 | do { 91 | k++; 92 | 93 | // z = A * p_k 94 | ierr = MatMult(A, p, z); CHKERRQ(ierr); 95 | 96 | // nu_k = y_{k-1}^T r_{k-1} / p_k^T z 97 | ierr = VecDot(p, z, &pTz); CHKERRQ(ierr); 98 | nu = yTr / pTz; 99 | 100 | // x_k = x_{k-1} + nu_k p_k 101 | ierr = VecAXPY(x, nu, p); CHKERRQ(ierr); 102 | 103 | // r_k = r_{k-1} - nu_k z 104 | ierr = VecAXPY(r, -nu, z); CHKERRQ(ierr); 105 | 106 | // y_k = M^{-1} r_k 107 | ierr = MatMult(Minv, r, y); CHKERRQ(ierr); 108 | 109 | // y_k^T r_k 110 | mu = 1 / yTr; 111 | ierr = VecDot(y, r, &yTr); CHKERRQ(ierr); 112 | 113 | // mu_{k+1} 114 | mu = yTr * mu; 115 | 116 | // p_{k+1} = r_k + mu_{k+1} p_k 117 | ierr = VecAYPX(p, mu, y); 118 | 119 | // || r_k ||_2 120 | ierr = VecNorm(r, NORM_2, &rNorm); 121 | } while (rNorm > tol); 122 | 123 | if (VERBOSE) { 124 | PetscPrintf(PETSC_COMM_WORLD, "Number of iterations: %d\n", k); 125 | } 126 | 127 | return 0; 128 | } 129 | -------------------------------------------------------------------------------- /PETSc_code/PoissonIterative/PCG.cpp~: -------------------------------------------------------------------------------- 1 | \begin{lstlisting} 2 | 3 | /*Program for using Preconditioned Conjugate gradient method */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "Poisson.h" 11 | 12 | 13 | PetscErrorCode PreconditionedConjugateGradient(KSP ksp, PC preconditioner) { 14 | PetscErrorCode ierr; 15 | 16 | ierr = KSPSetType(ksp, KSPCG); 17 | 18 | //ierr = PCSetType(preconditioner, PCJACOBI); CHKERRQ(ierr); 19 | ierr = PCSetType(preconditioner, PCCHOLESKY); CHKERRQ(ierr); 20 | 21 | return 0; 22 | } 23 | 24 | /** 25 | * Implements the preconditioned conjugate gradient 26 | * method with Jacobi preconditioning. 27 | */ 28 | PetscErrorCode PreconditionedConjugateGradient_full(Mat A, Vec b, Vec x, 29 | bool VERBOSE) { 30 | PetscErrorCode ierr; 31 | Mat Minv; 32 | Vec diagonal, unity; 33 | PetscInt n; 34 | 35 | ierr = MatGetSize(A, &n, NULL); CHKERRQ(ierr); 36 | ierr = CreateMatrix(&Minv, n, n); CHKERRQ(ierr); 37 | ierr = CreateVector(&diagonal, n); CHKERRQ(ierr); 38 | ierr = CreateVector(&unity, n); CHKERRQ(ierr); 39 | 40 | ierr = MatAssemblyBegin(Minv, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 41 | ierr = MatAssemblyEnd(Minv, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); 42 | ierr = VecAssemblyBegin(diagonal); CHKERRQ(ierr); 43 | ierr = VecAssemblyEnd(diagonal); CHKERRQ(ierr); 44 | ierr = VecAssemblyBegin(unity); CHKERRQ(ierr); 45 | ierr = VecAssemblyEnd(unity); CHKERRQ(ierr); 46 | 47 | // We use the diagonal preconditioner for simplicity 48 | ierr = MatGetDiagonal(A, diagonal); CHKERRQ(ierr); 49 | 50 | // Compute inverse of all diagonal entries 51 | ierr = VecSet(unity, 1.0); CHKERRQ(ierr); 52 | ierr = VecPointwiseDivide(diagonal, unity, diagonal); 53 | 54 | // Create M^{-1} 55 | ierr = MatDiagonalSet(Minv, diagonal, INSERT_VALUES); CHKERRQ(ierr); 56 | 57 | return PreconditionedConjugateGradient_inner(A, b, x, Minv, VERBOSE); 58 | } 59 | PetscErrorCode PreconditionedConjugateGradient_inner(Mat A, Vec b, Vec x, 60 | Mat Minv, bool VERBOSE) { 61 | PetscErrorCode ierr; 62 | PetscInt k=0, n; 63 | PetscScalar mu, nu, yTr, pTz, rNorm, tol = 1e-12; 64 | Vec p, r, y, z; 65 | 66 | ierr = MatGetSize(A, &n, NULL); CHKERRQ(ierr); 67 | 68 | CreateVector(&p, n); 69 | CreateVector(&r, n); 70 | CreateVector(&y, n); 71 | CreateVector(&z, n); 72 | 73 | VecCopy(b, r); 74 | ierr = MatMult(Minv, b, p); CHKERRQ(ierr); 75 | VecCopy(p, y); 76 | 77 | ierr = VecAssemblyBegin(p); CHKERRQ(ierr); 78 | ierr = VecAssemblyEnd(p); CHKERRQ(ierr); 79 | ierr = VecAssemblyBegin(r); CHKERRQ(ierr); 80 | ierr = VecAssemblyEnd(r); CHKERRQ(ierr); 81 | ierr = VecAssemblyBegin(y); CHKERRQ(ierr); 82 | ierr = VecAssemblyEnd(y); CHKERRQ(ierr); 83 | ierr = VecAssemblyBegin(z); CHKERRQ(ierr); 84 | ierr = VecAssemblyEnd(z); CHKERRQ(ierr); 85 | 86 | ierr = VecZeroEntries(x); 87 | 88 | // Pre-compute first (y^T r) 89 | ierr = VecDot(y, r, &yTr); CHKERRQ(ierr); 90 | 91 | do { 92 | k++; 93 | 94 | // z = A * p_k 95 | ierr = MatMult(A, p, z); CHKERRQ(ierr); 96 | 97 | // nu_k = y_{k-1}^T r_{k-1} / p_k^T z 98 | ierr = VecDot(p, z, &pTz); CHKERRQ(ierr); 99 | nu = yTr / pTz; 100 | 101 | // x_k = x_{k-1} + nu_k p_k 102 | ierr = VecAXPY(x, nu, p); CHKERRQ(ierr); 103 | 104 | // r_k = r_{k-1} - nu_k z 105 | ierr = VecAXPY(r, -nu, z); CHKERRQ(ierr); 106 | 107 | // y_k = M^{-1} r_k 108 | ierr = MatMult(Minv, r, y); CHKERRQ(ierr); 109 | 110 | // y_k^T r_k 111 | mu = 1 / yTr; 112 | ierr = VecDot(y, r, &yTr); CHKERRQ(ierr); 113 | 114 | // mu_{k+1} 115 | mu = yTr * mu; 116 | 117 | // p_{k+1} = r_k + mu_{k+1} p_k 118 | ierr = VecAYPX(p, mu, y); 119 | 120 | // || r_k ||_2 121 | ierr = VecNorm(r, NORM_2, &rNorm); 122 | } while (rNorm > tol); 123 | 124 | if (VERBOSE) { 125 | PetscPrintf(PETSC_COMM_WORLD, "Number of iterations: %d\n", k); 126 | } 127 | 128 | return 0; 129 | } 130 | 131 | \end{lstlisting} 132 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_Gauss_Seidel.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Main program for the solution of Poisson's equation 3 | % - a laplace = f in 2D using iterative Gauss-Seidel method 4 | % ---------------------------------------- 5 | 6 | close all 7 | clc 8 | clear 9 | clf 10 | %Define input parameters 11 | n=20; % number of inner nodes in one direction. 12 | a_amp = 12; % amplitude for the function a(x_1,x_2) 13 | f_amp = 1; % we can choose f=1, 50, 100 14 | x_0=0.5; 15 | y_0=0.5; 16 | c_x=1; 17 | c_y=1; 18 | 19 | h = 1/(n+1); % define step length 20 | 21 | % ---------------------------------------- 22 | % Computing all matrices and vectors 23 | % ---------------------------------------- 24 | % Generate a n*n by n*n stiffness matrix 25 | S = DiscretePoisson2D(n); 26 | 27 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 28 | C = zeros(n,n); 29 | for i=1:n 30 | for j=1:n 31 | C(i,j) = 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 32 | +(j*h-y_0)^2/(2*c_y^2))); 33 | end 34 | end 35 | % create diagonal matrix from C 36 | D = zeros(n^2,n^2); 37 | for i=1:n 38 | for j=1:n 39 | D(j+n*(i-1),j+n*(i-1)) = C(i,j); 40 | end 41 | end 42 | 43 | % If f is constant. 44 | % f = f_amp*ones(n^2,1); 45 | 46 | % If f is Gaussian function. 47 | f=zeros(n^2,1); 48 | for i=1:n 49 | for j=1:n 50 | f(n*(i-1)+j)=f_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 51 | +(j*h-y_0)^2/(2*c_y^2))); 52 | end 53 | end 54 | 55 | % Compute vector of right hand side 56 | % b = D^(-1)*f computed as b(i,j)=f(i,j)/a(i,j) 57 | 58 | b=zeros(n^2,1); 59 | for i=1:n 60 | for j=1:n 61 | b(n*(i-1)+j)= h^2*(f(n*(i-1)+j))/C(i,j); % Use coefficient matrix C or 62 | % diagonal matrix D to get a(i,j) 63 | 64 | end 65 | end 66 | 67 | % ---------------------------------------- 68 | % Solution of S*u = b using iterative Gauss-Seidel method 69 | % ---------------------------------------- 70 | 71 | residual = 1; k=0; tol=10^(-9); 72 | 73 | u = zeros(n^2,1); 74 | u_old = u; 75 | 76 | % use Gauss-Seidel algorithm without red-black ordering: 77 | % values u(1:(j-1)) are already updated, and u_old((j+1):n^2) 78 | % are older, computed on the previous iteration 79 | 80 | while (norm(residual)> tol) 81 | for j = 1:n^2 82 | u(j) = 1/S(j,j) * (b(j) ... 83 | - S(j,1:(j-1))*u(1:(j-1)) - S(j,(j+1):n^2)*u_old((j+1):n^2)); 84 | end 85 | u_old = u; 86 | residual = S*u- b; 87 | k = k+1; 88 | end 89 | 90 | disp('-- Number of iterations in Gauss-Seidel method ----------') 91 | k 92 | 93 | % ---------------------------------------- 94 | % Plots and figures for Gauss-Seidel method 95 | % ---------------------------------------- 96 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 97 | Z = zeros(n+2,n+2); 98 | for i=1:n 99 | for j=1:n 100 | Z(i+1,j+1) = u(j+n*(i-1)); 101 | end 102 | end 103 | 104 | %% plotting 105 | x1=0:h:1; 106 | y1=0:h:1; 107 | 108 | subplot (2,2,1) 109 | surf(x1,y1, Z) % same plot as above, (x1, y1 are vectors) 110 | view(2) 111 | colorbar 112 | xlabel('x_1') 113 | ylabel('x_2') 114 | zlabel('u(x_1,x_2)') 115 | title( ['solution u(x_1,x_2) Gauss-Seidel method ',... 116 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 117 | 118 | subplot (2,2,2) 119 | 120 | surf(x1,y1, Z) % same plot as above 121 | colorbar 122 | xlabel('x_1') 123 | ylabel('x_2') 124 | zlabel('u(x_1,x_2)') 125 | 126 | title( ['solution u(x_1,x_2) Gauss-Seidel method',... 127 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 128 | 129 | % Plotting a(x,y) 130 | Z_a= zeros(n+2); 131 | for i=1:(n+2) 132 | for j=1:(n+2) 133 | Z_a(i,j)= 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 134 | +(j*h-y_0)^2/(2*c_y^2))); 135 | end 136 | end 137 | 138 | subplot (2,2,3) 139 | surf(x1,y1,Z_a) 140 | xlabel('x_1') 141 | ylabel('x_2') 142 | zlabel('a(x_1,x_2)') 143 | title( ['coefficient a(x_1,x_2) with A = ',num2str(a_amp)]) 144 | 145 | % plott the function f(x,y) 146 | Z_f= zeros(n+2); 147 | for i=1:(n+2) 148 | for j=1:(n+2) 149 | Z_f(i,j)=f_amp*exp(-((x1(i)-x_0)^2/(2*c_x^2)... 150 | +(y1(j)-y_0)^2/(2*c_y^2))); 151 | end 152 | end 153 | 154 | subplot (2,2,4) 155 | surf(x1,y1,Z_f) 156 | xlabel('x_1') 157 | ylabel('x_2') 158 | zlabel('f(x_1,x_2)') 159 | title( ['f(x_1,x_2) with A_f = ',num2str(f_amp)]) 160 | 161 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_PrecConjugateGrad.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Main program for the solution of Poisson's equation 3 | % - a laplace = f in 2D using Preconditioned Conjugate Gradient Method 4 | % ---------------------------------------- 5 | 6 | close all 7 | %Define input parameters 8 | n=20; % number of inner nodes in one direction. 9 | a_amp = 12; % amplitude for the function a(x_1,x_2) 10 | f_amp = 1; % we can set f = 1, 50, 100 11 | x_0=0.5; 12 | y_0=0.5; 13 | c_x=1; 14 | c_y=1; 15 | 16 | h = 1/(n+1); % define step length 17 | 18 | % ---------------------------------------- 19 | % Computing all matrices and vectors 20 | % ---------------------------------------- 21 | % Generate a n*n by n*n stiffness matrix 22 | S = DiscretePoisson2D(n); 23 | 24 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 25 | C = zeros(n,n); 26 | for i=1:n 27 | for j=1:n 28 | C(i,j) = 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 29 | +(j*h-y_0)^2/(2*c_y^2))); 30 | end 31 | end 32 | % create diagonal matrix from C 33 | D = zeros(n^2,n^2); 34 | for i=1:n 35 | for j=1:n 36 | D(j+n*(i-1),j+n*(i-1)) = C(i,j); 37 | end 38 | end 39 | 40 | %% calculate load vector f 41 | 42 | % If f is constant. 43 | % f = f_amp*ones(n^2,1); 44 | 45 | % If f is Gaussian function. 46 | f=zeros(n^2,1); 47 | for i=1:n 48 | for j=1:n 49 | f(n*(i-1)+j)=f_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 50 | +(j*h-y_0)^2/(2*c_y^2))); 51 | end 52 | end 53 | 54 | % 1. Compute vector of right hand side 55 | % b = D^(-1)*f given by b(i,j)=f(i,j)/a(i,j) 56 | 57 | b=zeros(n^2,1); 58 | for i=1:n 59 | for j=1:n 60 | b(n*(i-1)+j)=f(n*(i-1)+j)/C(i,j); % Use coefficient matrix C or 61 | % diagonal matrix D to get a(i,j) 62 | end 63 | end 64 | 65 | % ---------------------------------------- 66 | % --- Preconditioned conjugate gradient method (PCGM): 67 | % choose different preconditioners: 68 | % Cholesky factorization, Jacobi preconditioner, block Jacobi preconditioner 69 | % ---------------------------------------- 70 | % We now have system to solve: 1/h^2 S u = b 71 | 72 | %initialize preconditioner 73 | Ssparse = sparse(S); 74 | 75 | % Preconditioner: preconditioner matrix here is incomplete 76 | % Cholesky factorization of S 77 | 78 | cond = ichol(Ssparse); cond=cond*cond'; cond=full(inv(cond)); 79 | 80 | % Preconditioner: preconditioner matrix here is 81 | % Jacobi preconditioner. 82 | % Results are the same as in usual conjugate gradient update 83 | 84 | %M = diag(diag(S)); 85 | %cond = diag(1.0./diag(M)); 86 | 87 | % Preconditioner: preconditioner matrix here is 88 | %Block Jacobi Preconditioner 89 | %blockSize = 2; % size of blocks 90 | %cond = zeros(n^2); 91 | %Iinds = ceil( (1:(blockSize*n^2))/blockSize); 92 | %Jinds = blockSize*ceil( (1:(blockSize*n^2))/blockSize^2)-(blockSize-1) ... 93 | % + repmat%(0:blockSize-1,1,n^2); 94 | %vecInds = sub2ind(size(S),Iinds, Jinds); 95 | %cond(vecInds) = S(vecInds); 96 | 97 | %initialize parameters in the method 98 | err = 1; x=0; r0= h^2*b; p=cond*h^2*b; y0=cond*r0; tol=10^(-9); 99 | k=0; 100 | 101 | while(err>tol) 102 | z = S*p; 103 | nu = (y0'*r0)/(p'*z); 104 | x = x + nu*p; 105 | r1 = r0 - nu*z; 106 | y1 = cond*r1; 107 | mu = (y1'*r1)/(y0'*r0); 108 | p = y1 + mu*p; 109 | r0=r1; 110 | y0=y1; 111 | err = norm(r0); 112 | k=k+1; 113 | end 114 | 115 | disp('-- Number of iterations in Preconditioned conjugate gradient method (PCGM) ') 116 | k 117 | 118 | % ---------------------------------------- 119 | % Plots and figures. 120 | % ---------------------------------------- 121 | 122 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 123 | Z = zeros(n+2,n+2); 124 | for i=1:n 125 | for j=1:n 126 | Z(i+1,j+1) = x(j+n*(i-1)); 127 | end 128 | end 129 | 130 | %% plotting 131 | x1=0:h:1; 132 | y1=0:h:1; 133 | 134 | subplot(2,2,1) 135 | surf(x1,y1,Z) % same plot as above, (x1, y1 are vectors) 136 | view(2) 137 | colorbar 138 | xlabel('x_1') 139 | ylabel('x_2') 140 | zlabel('u(x_1,x_2)') 141 | title( ['u(x_1,x_2) in PCGM',... 142 | ', N = ',num2str(n)]) 143 | 144 | subplot(2,2,2) 145 | surf(x1,y1,Z) % same plot as above 146 | colorbar 147 | xlabel('x_1') 148 | ylabel('x_2') 149 | zlabel('u(x_1,x_2)') 150 | title( ['u(x_1,x_2) in PCGM ',... 151 | ', N = ',num2str(n)]) 152 | 153 | % Plotting a(x,y) 154 | Z_a= zeros(n+2); 155 | for i=1:(n+2) 156 | for j=1:(n+2) 157 | Z_a(i,j)= 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 158 | +(j*h-y_0)^2/(2*c_y^2))); 159 | end 160 | end 161 | 162 | subplot(2,2,3) 163 | surf(x1,y1,Z_a) 164 | xlabel('x_1') 165 | ylabel('x_2') 166 | zlabel('a(x_1,x_2)') 167 | title( ['a(x_1,x_2) with A = ',num2str(a_amp)]) 168 | 169 | % plott the function f(x,y) 170 | Z_f= zeros(n+2); 171 | for i=1:(n+2) 172 | for j=1:(n+2) 173 | Z_f(i,j)=f_amp*exp(-((x1(i)-x_0)^2/(2*c_x^2)... 174 | +(y1(j)-y_0)^2/(2*c_y^2))); 175 | end 176 | end 177 | 178 | subplot(2,2,4) 179 | surf(x1,y1,Z_f) 180 | xlabel('x_1') 181 | ylabel('x_2') 182 | zlabel('f(x_1,x_2)') 183 | title( ['f(x_1,x_2) with A_f = ',num2str(f_amp)]) 184 | 185 | -------------------------------------------------------------------------------- /Matlab_code/DivideandConq.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Computes algorithm of Divide-and-Conquer: 3 | % eigenvalues will be roots of the secular equation and will lie 4 | % on the diagonal of the output matrix L. 5 | % In the output matrix Q will be corresponding eigenvectors. 6 | % ---------------------------------------- 7 | 8 | function [Q,L] = DivideandConq(T) 9 | % Compute size of input matrix T: 10 | [m,n] = size(T); 11 | 12 | % here we will divide the matrix 13 | m2 = floor(m/2); 14 | 15 | %if m=0 we shall return 16 | if m2 == 0 %1 by 1 17 | Q = 1; L = T; 18 | return; 19 | %else we perform recursive computations 20 | else 21 | [T,T1,T2,bm,v] = formT(T,m2); 22 | 23 | %recursive computations 24 | [Q1,L1] = DivideandConq(T1); 25 | [Q2,L2] = DivideandConq(T2); 26 | 27 | %pick out the last and first columns of the transposes: 28 | Q1T = Q1'; 29 | Q2T = Q2'; 30 | u = [Q1T(:,end); Q2T(:,1)]; 31 | 32 | %Creating the D-matrix: 33 | D = zeros(n); 34 | D(1:m2,1:m2) = L1; 35 | D((m2+1):end,(m2+1):end) = L2; 36 | 37 | % The Q matrix (with Q1 and Q2 on the "diagonals") 38 | Q = zeros(n); 39 | Q(1:m2,1:m2) = Q1; 40 | Q((m2+1):end,(m2+1):end) = Q2; 41 | 42 | %Creating the matrix B, which determinant is the secular equation: 43 | % det B = f(\lambda)=0 44 | B = D+bm*u*u'; 45 | 46 | % Compute eigenvalues as roots of the secular equation 47 | % f(\lambda)=0 using Newton's method 48 | eigs = NewtonMethod(D,bm,u); 49 | Q3 = zeros(m,n); 50 | 51 | % compute eigenvectors for corresponding eigenvalues 52 | for i = 1:length(eigs) 53 | Q3(:,i) = (D-eigs(i)*eye(m))\u; 54 | Q3(:,i) = Q3(:,i)/norm(Q3(:,i)); 55 | end 56 | 57 | %Compute eigenvectors of the original input matrix T 58 | Q = Q*Q3; 59 | 60 | % Present eigenvalues of the original matrix input T 61 | %(they will be on diagonal) 62 | L = zeros(m,n); 63 | L(1:(m+1):end) = eigs; 64 | 65 | return; 66 | end 67 | 68 | end 69 | 70 | % Compute T1, T2 constant bm and the vector v 71 | %from the input matrix A. 72 | 73 | function [T,T1,T2,bm,v] = formT(A,m) 74 | 75 | T1 = A(1:m,1:m); 76 | T2 = A((m+1):end,(m+1):end); 77 | bm = A(m,m+1); 78 | 79 | T1(end) = T1(end)-bm; 80 | T2(1) = T2(1)-bm; 81 | 82 | v = zeros(size(A,1),1); 83 | v(m:m+1) = 1; 84 | 85 | T = zeros(size(A)); 86 | T(1:m,1:m) = T1; 87 | T((m+1):end,(m+1):end) = T2; 88 | 89 | end 90 | 91 | % compute eigenvalues in the secular equation 92 | % using the Newton's method 93 | 94 | function eigs = NewtonMethod(D,p,u) 95 | [m,n] = size(D); 96 | 97 | %The initial guess in the Newton's method 98 | % will be the numbers d_i 99 | startingPoints = sort(diag(D)); 100 | 101 | %if p > 0 we have an eigenvalue on the right, else on the left 102 | if p >= 0 103 | startingPoints = [startingPoints; startingPoints(end)+10000]; 104 | elseif p < 0 105 | startingPoints = [startingPoints(1)-10000; startingPoints]; 106 | end 107 | 108 | eigs = zeros(m,1); 109 | 110 | % tolerance in Newton's method 111 | convCriteria = 1e-05; 112 | 113 | % step in the approximation of the derrivative 114 | % in Newton's method 115 | dx = 0.00001; 116 | 117 | %plot the secular equation 118 | X = linspace(-3,3,1000); 119 | for t = 1:1000 120 | y(t) =SecularEqEval(D,p,u,X(t),m,n); 121 | end 122 | plot(X,y, 'LineWidth',2) 123 | axis([-3 3 -5 5]) 124 | legend('graph of the secular equation $f(\lambda)=0$') 125 | 126 | %Start Newton's method 127 | for i = 1:m 128 | %the starting value of lambda 129 | currentVal = (startingPoints(i)+startingPoints(i+1) )/ 2; 130 | 131 | % this value is used inthe stoppimg criterion below 132 | currentVal2 = inf; 133 | % computed secular equation for \lambda=currentVal 134 | fCurr = SecularEqEval(D,p,u,currentVal,m,n); 135 | 136 | rands = 0; 137 | k =0; 138 | j = 0; 139 | 140 | if ~((startingPoints(i+1)-startingPoints(i)) < 0.0001) 141 | while ~(abs(fCurr) < convCriteria) 142 | 143 | %compute value of the function dfApprox with small step dx to 144 | %approximate derivative 145 | fval2 = SecularEqEval(D,p,u,currentVal+dx,m,n); 146 | fval1 = SecularEqEval(D,p,u,currentVal,m,n); 147 | dfApprox = (fval2-fval1)/dx; 148 | 149 | % compute new value of currentVal in Newton's method, 150 | % or perform one iteration in Newton's method 151 | currentVal = currentVal - fCurr/dfApprox; 152 | 153 | % check: if we are outside of the current range, reinput inside: 154 | if currentVal <= startingPoints(i) 155 | currentVal= startingPoints(i)+0.0001; 156 | k=k+1; 157 | elseif currentVal >= startingPoints(i+1); 158 | currentVal= startingPoints(i+1)-0.0001; 159 | k=k+1; 160 | elseif dfApprox == Inf || dfApprox == -Inf 161 | currentVal= startingPoints(i) + ... 162 | rand*(startingPoints(i+1)-startingPoints(i)); 163 | rands = rands+1; 164 | end 165 | 166 | j=j+1; 167 | 168 | fCurr = SecularEqEval(D,p,u,currentVal,m,n); 169 | 170 | if k > 10 || j > 50; 171 | tempVec = [startingPoints(i),startingPoints(i+1)]; 172 | [val,ind] = min(abs([startingPoints(i),startingPoints(i+1)]-currentVal)); 173 | if ind == 1 174 | currentVal = tempVec(ind)+0.00001; 175 | else 176 | currentVal = tempVec(ind)-0.00001; 177 | end 178 | break; 179 | elseif currentVal2 == currentVal || rands > 5 || isnan(currentVal) || isnan(fCurr) 180 | currentVal = currentVal2; 181 | break; 182 | end 183 | %save last value: 184 | currentVal2 = currentVal; 185 | end 186 | end 187 | 188 | %assigning eigenvalue in the right order 189 | eigs(i) = currentVal; 190 | 191 | end 192 | 193 | end 194 | 195 | % evaluate the secular equation in Newton's method for the computed 196 | % eigenvalue x 197 | function fVal = SecularEqEval(D,p,u,x,m,n) 198 | 199 | fVal = 1+p*u'*inv((D-x*eye(m,n)))*u; 200 | 201 | end 202 | 203 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_Gauss_SeidelRedBlack.m: -------------------------------------------------------------------------------- 1 | 2 | % ---------------------------------------- 3 | % Main program for the solution of Poisson's equation 4 | % - a laplace = f in 2D using iterative Gauss-Seidel method 5 | % with Red-Black ordering 6 | % ---------------------------------------- 7 | 8 | close all 9 | clc 10 | clear 11 | clf 12 | %Define input parameters 13 | n=20; % number of inner nodes in one direction. 14 | a_amp = 12; % amplitude for the function a(x_1,x_2) 15 | f_amp = 1; % we can choose f=1, 50, 100 16 | x_0=0.5; 17 | y_0=0.5; 18 | c_x=1; 19 | c_y=1; 20 | 21 | h = 1/(n+1); % define step length 22 | 23 | % ---------------------------------------- 24 | % Computing all matrices and vectors 25 | % ---------------------------------------- 26 | % Generate a n*n by n*n stiffness matrix 27 | S = DiscretePoisson2D(n); 28 | 29 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 30 | C = zeros(n,n); 31 | for i=1:n 32 | for j=1:n 33 | C(i,j) = 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 34 | +(j*h-y_0)^2/(2*c_y^2))); 35 | end 36 | end 37 | % create diagonal matrix from C 38 | D = zeros(n^2,n^2); 39 | for i=1:n 40 | for j=1:n 41 | D(j+n*(i-1),j+n*(i-1)) = C(i,j); 42 | end 43 | end 44 | 45 | % If f is constant. 46 | % f = f_amp*ones(n^2,1); 47 | 48 | % If f is Gaussian function. 49 | f=zeros(n^2,1); 50 | for i=1:n 51 | for j=1:n 52 | f(n*(i-1)+j)=f_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 53 | +(j*h-y_0)^2/(2*c_y^2))); 54 | end 55 | end 56 | 57 | % Compute vector of right hand side 58 | % b = D^(-1)*f computed as b(i,j)=f(i,j)/a(i,j) 59 | 60 | b=zeros(n^2,1); 61 | for i=1:n 62 | for j=1:n 63 | b(n*(i-1)+j)=f(n*(i-1)+j)/C(i,j); % Use coefficient matrix C or 64 | % diagonal matrix D to get a(i,j) 65 | end 66 | end 67 | 68 | % ---------------------------------------- 69 | % Solution of 1/h^2 S u = b using iterative Gauss-Seidel method 70 | % with red-black ordering, version I 71 | % ---------------------------------------- 72 | 73 | err = 1; k=0; tol=10^(-9); 74 | V = zeros(n,n); 75 | V_old = zeros(n,n); 76 | F=vec2mat(b,n)'; 77 | X=diag(ones(1,n-1),-1); 78 | X=X+X'; 79 | 80 | blackindex = invhilb(n) < 0; 81 | redindex = fliplr(blackindex); 82 | B=V; 83 | V(redindex)=0; 84 | 85 | R=V; 86 | V(blackindex)=0; 87 | 88 | redF = F; redF(blackindex)=0; 89 | blackF = F; blackF(redindex)=0; 90 | 91 | while(err>tol) 92 | R = (X*B + B*X + h^2*redF)/4; 93 | B = (X*R + R*X + h^2*blackF)/4; 94 | k=k+1; 95 | 96 | V_new =R+B; 97 | err = norm(V_new - V_old); 98 | V_old = V_new; 99 | end 100 | 101 | V_new = [zeros(1,n+2); zeros(n,1) V_new zeros(n,1);zeros(1,n+2)] 102 | 103 | disp('-- Number of iterations in Gauss-Seidel method ----------') 104 | k 105 | 106 | % ---------------------------------------- 107 | % Plots and figures for Gauss-Seidel method 108 | % ---------------------------------------- 109 | 110 | figure(1) 111 | %% plotting 112 | x1=0:h:1; 113 | y1=0:h:1; 114 | 115 | subplot (2,2,1) 116 | surf(x1,y1,V_new) % same plot as above, (x1, y1 are vectors) 117 | view(2) 118 | colorbar 119 | xlabel('x_1') 120 | ylabel('x_2') 121 | zlabel('u(x_1,x_2)') 122 | title( ['solution u(x_1,x_2) in Gauss-Seidel Red-Black ordering',... 123 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 124 | 125 | subplot (2,2,2) 126 | 127 | surf(x1,y1,V_new) % same plot as above 128 | colorbar 129 | xlabel('x_1') 130 | ylabel('x_2') 131 | zlabel('u(x_1,x_2)') 132 | 133 | title( ['solution u(x_1,x_2) in Gauss-Seidel Red-Black ordering',... 134 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 135 | 136 | % Plotting a(x,y) 137 | Z_a= zeros(n+2); 138 | for i=1:(n+2) 139 | for j=1:(n+2) 140 | Z_a(i,j)= 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 141 | +(j*h-y_0)^2/(2*c_y^2))); 142 | end 143 | end 144 | 145 | subplot (2,2,3) 146 | surf(x1,y1,Z_a) 147 | xlabel('x_1') 148 | ylabel('x_2') 149 | zlabel('a(x_1,x_2)') 150 | title( ['coefficient a(x_1,x_2) with A = ',num2str(a_amp)]) 151 | 152 | % plott the function f(x,y) 153 | Z_f= zeros(n+2); 154 | for i=1:(n+2) 155 | for j=1:(n+2) 156 | Z_f(i,j)=f_amp*exp(-((x1(i)-x_0)^2/(2*c_x^2)... 157 | +(y1(j)-y_0)^2/(2*c_y^2))); 158 | end 159 | end 160 | 161 | subplot (2,2,4) 162 | surf(x1,y1,Z_f) 163 | xlabel('x_1') 164 | ylabel('x_2') 165 | zlabel('f(x_1,x_2)') 166 | title( ['f(x_1,x_2) with A_f = ',num2str(f_amp)]) 167 | 168 | % ---------------------------------------- 169 | % Solution of 1/h^2 S u = b using iterative Gauss-Seidel method 170 | % with red-black ordering, version II 171 | % ---------------------------------------- 172 | 173 | err = 1; k=0; tol=10^(-9); 174 | % Initial guess 175 | uold = zeros(n+2, n+2); 176 | unew= uold; 177 | 178 | while(err > tol) 179 | % Red nodes 180 | for i = 2:n+1 181 | for j = 2:n+1 182 | if(mod(i+j,2) == 0) 183 | unew(i, j) = (uold(i-1, j) + uold(i+1, j) + uold(i, j-1) + uold(i, j+1) + h^2*b(n*(i-2)+j-1))/4.0; 184 | % for computation of residual 185 | u(j-1 + n*(i-2)) = unew(i,j); 186 | end 187 | end 188 | end 189 | 190 | % Black nodes 191 | for i = 2:n+1 192 | for j = 2:n+1 193 | if(mod(i+j,2) == 1) 194 | unew(i,j) = 0.25*(unew(i-1,j) + unew(i+1,j) ... 195 | + unew(i,j-1) + unew(i,j+1) + h^2*b(n*(i-2)+j-1)); 196 | % for computation of residual 197 | u(j-1 + n*(i-2)) = unew(i,j); 198 | end 199 | end 200 | end 201 | 202 | k = k+1; 203 | 204 | % different stopping rules 205 | err = norm(unew-uold); 206 | %computation of residual 207 | % err = norm(S*u' - h^2*b); 208 | uold = unew; 209 | end 210 | 211 | u = reshape(unew(2:end-1, 2:end-1)', n*n, 1); 212 | 213 | disp('-- Number of iterations in the version II of Gauss-Seidel method----------') 214 | 215 | k 216 | 217 | % ---------------------------------------- 218 | % Plots and figures for version II 219 | % ---------------------------------------- 220 | 221 | figure(2) 222 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 223 | V_new = zeros(n+2,n+2); 224 | for i=1:n 225 | for j=1:n 226 | V_new(i+1,j+1) = u(j+n*(i-1)); 227 | end 228 | end 229 | 230 | %% plotting 231 | x1=0:h:1; 232 | y1=0:h:1; 233 | 234 | subplot (1,2,1) 235 | surf(x1,y1,V_new) % same plot as above, (x1, y1 are vectors) 236 | view(2) 237 | colorbar 238 | xlabel('x_1') 239 | ylabel('x_2') 240 | zlabel('u(x_1,x_2)') 241 | title( ['solution u(x_1,x_2) in Gauss-Seidel Red-Black ordering, version II',... 242 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 243 | 244 | subplot (1,2,2) 245 | 246 | surf(x1,y1,V_new) % same plot as above 247 | colorbar 248 | xlabel('x_1') 249 | ylabel('x_2') 250 | zlabel('u(x_1,x_2)') 251 | 252 | title( ['solution u(x_1,x_2) in Gauss-Seidel Red-Black ordering, version II',... 253 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 254 | 255 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_Jacobi.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Main program for the solution of Poisson's equation 3 | % - a laplace = f in 2D using iterative Jacobi method 4 | % ---------------------------------------- 5 | 6 | close all 7 | clc 8 | clear 9 | clf 10 | %Define input parameters 11 | n=20; % number of inner nodes in one direction. 12 | a_amp = 12; % amplitude for the function a(x_1,x_2) 13 | f_amp = 1; % we can choose f=1, 50, 100 14 | x_0=0.5; 15 | y_0=0.5; 16 | c_x=1; 17 | c_y=1; 18 | 19 | h = 1/(n+1); % define step length 20 | 21 | % ---------------------------------------- 22 | % Computing all matrices and vectors 23 | % ---------------------------------------- 24 | % Generate a n*n by n*n stiffness matrix 25 | S = DiscretePoisson2D(n); 26 | 27 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 28 | C = zeros(n,n); 29 | for i=1:n 30 | for j=1:n 31 | C(i,j) = 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 32 | +(j*h-y_0)^2/(2*c_y^2))); 33 | end 34 | end 35 | % create diagonal matrix from C 36 | D = zeros(n^2,n^2); 37 | for i=1:n 38 | for j=1:n 39 | D(j+n*(i-1),j+n*(i-1)) = C(i,j); 40 | end 41 | end 42 | 43 | % If f is constant. 44 | % f = f_amp*ones(n^2,1); 45 | 46 | % If f is Gaussian function. 47 | f=zeros(n^2,1); 48 | for i=1:n 49 | for j=1:n 50 | f(n*(i-1)+j)= f_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 51 | +(j*h-y_0)^2/(2*c_y^2))); 52 | end 53 | end 54 | 55 | % Compute vector of right hand side 56 | % b = D^(-1)*f computed as b(i,j)=f(i,j)/a(i,j) 57 | 58 | b=zeros(n^2,1); 59 | for i=1:n 60 | for j=1:n 61 | b(n*(i-1)+j)= f(n*(i-1)+j)/C(i,j); % Use coefficient matrix C or 62 | % diagonal matrix D to get a(i,j) 63 | end 64 | end 65 | 66 | % ---------------------------------------- 67 | % --- Solution of 1/h^2 S*u = b using Jacobi's method, version I 68 | % ---------------------------------------- 69 | 70 | err = 1; k=0; tol=10^(-9); 71 | 72 | w_old = ones(length(S),1); 73 | L=tril(S,-1); 74 | U=L'; 75 | Dinv=diag(diag(S).^(-1)); 76 | R=Dinv*(-L-U); 77 | c=Dinv*h^2*b; 78 | 79 | while(err>tol) 80 | w_new = R*w_old +c; 81 | k=k+1; 82 | 83 | % stopping criterion: choose one of two 84 | err = norm(w_new-w_old); 85 | % err = norm(S*w_new - h^2*b); 86 | w_old = w_new; 87 | end 88 | 89 | disp('-- Number of iterations in the version I of Jacobi method ----------') 90 | k 91 | 92 | % ---------------------------------------- 93 | % Plots and figures for version I 94 | % ---------------------------------------- 95 | 96 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 97 | V_new = zeros(n+2,n+2); 98 | for i=1:n 99 | for j=1:n 100 | V_new(i+1,j+1) = w_new(j+n*(i-1)); 101 | end 102 | end 103 | 104 | %% plotting 105 | x1=0:h:1; 106 | y1=0:h:1; 107 | 108 | figure(1) 109 | 110 | subplot (2,2,1) 111 | surf(x1,y1,V_new) % same plot as above, (x1, y1 are vectors) 112 | view(2) 113 | colorbar 114 | xlabel('x_1') 115 | ylabel('x_2') 116 | zlabel('u(x_1,x_2)') 117 | title( ['solution u(x_1,x_2) Jacobi version I ',... 118 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 119 | 120 | subplot (2,2,2) 121 | 122 | surf(x1,y1,V_new) % same plot as above 123 | colorbar 124 | xlabel('x_1') 125 | ylabel('x_2') 126 | zlabel('u(x_1,x_2)') 127 | 128 | title( ['solution u(x_1,x_2) Jacobi version I',... 129 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 130 | 131 | % Plotting a(x,y) 132 | Z_a= zeros(n+2); 133 | for i=1:(n+2) 134 | for j=1:(n+2) 135 | Z_a(i,j)= 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 136 | +(j*h-y_0)^2/(2*c_y^2))); 137 | end 138 | end 139 | 140 | subplot (2,2,3) 141 | surf(x1,y1,Z_a) 142 | xlabel('x_1') 143 | ylabel('x_2') 144 | zlabel('a(x_1,x_2)') 145 | title( ['coefficient a(x_1,x_2) with A = ',num2str(a_amp)]) 146 | 147 | % plott the function f(x,y) 148 | Z_f= zeros(n+2); 149 | for i=1:(n+2) 150 | for j=1:(n+2) 151 | Z_f(i,j)=f_amp*exp(-((x1(i)-x_0)^2/(2*c_x^2)... 152 | +(y1(j)-y_0)^2/(2*c_y^2))); 153 | end 154 | end 155 | 156 | subplot (2,2,4) 157 | surf(x1,y1,Z_f) 158 | xlabel('x_1') 159 | ylabel('x_2') 160 | zlabel('f(x_1,x_2)') 161 | title( ['f(x_1,x_2) with A_f = ',num2str(f_amp)]) 162 | 163 | % ---------------------------------------- 164 | % --- Jacobi's method, version II -------------------------- 165 | % ---------------------------------------- 166 | 167 | k=0; err = 1; 168 | V_old = zeros(n,n); 169 | V_new = zeros(n,n); 170 | F=vec2mat(b,n)'; 171 | X=diag(ones(1,n-1),-1); 172 | X=X+X'; 173 | 174 | while(err>tol) 175 | V_new = (X*V_old + V_old*X' + h^2*F)/4; 176 | k=k+1; 177 | err = norm(V_new-V_old); 178 | V_old = V_new; 179 | end 180 | 181 | %apply boundary conditions 182 | V_new = [zeros(1,n+2); zeros(n,1) V_new zeros(n,1);zeros(1,n+2)] 183 | 184 | disp('-- Number of iterations in the version II of Jacobi method ----------') 185 | k 186 | 187 | figure(2) 188 | % ---------------------------------------- 189 | % Plots and figures for version II 190 | % ---------------------------------------- 191 | 192 | %% plotting 193 | x1=0:h:1; 194 | y1=0:h:1; 195 | 196 | subplot (1,2,1) 197 | surf(x1,y1,V_new) % same plot as above, (x1, y1 are vectors) 198 | view(2) 199 | colorbar 200 | xlabel('x_1') 201 | ylabel('x_2') 202 | zlabel('u(x_1,x_2)') 203 | title( ['solution u(x_1,x_2) Jacobi version II ',... 204 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 205 | 206 | subplot (1,2,2) 207 | 208 | surf(x1,y1,V_new) % same plot as above 209 | colorbar 210 | xlabel('x_1') 211 | ylabel('x_2') 212 | zlabel('u(x_1,x_2)') 213 | 214 | title( ['solution u(x_1,x_2) Jacobi version II',... 215 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 216 | 217 | % ---------------------------------------- 218 | % --- Jacobi's method, version III -------------------------- 219 | % ---------------------------------------- 220 | 221 | err = 1; k=0; tol=10^(-9); 222 | % Initial guess 223 | uold = zeros(n+2, n+2); 224 | unew= uold; 225 | 226 | % counter for iterations 227 | k = 0; 228 | 229 | while(err > tol) 230 | for i = 2:n+1 231 | for j = 2:n+1 232 | unew(i, j) = (uold(i-1, j) + uold(i+1, j) + uold(i, j-1) + uold(i, j+1) + h^2*b(n*(i-2)+j-1))/4.0; 233 | end 234 | end 235 | k = k+1; 236 | err = norm(unew-uold); 237 | uold = unew; 238 | end 239 | 240 | u = reshape(unew(2:end-1, 2:end-1)', n*n, 1); 241 | 242 | disp('-- Number of iterations in the version III of Jacobi method ----------') 243 | 244 | k 245 | 246 | figure(3) 247 | % ---------------------------------------- 248 | % Plots and figures for version III 249 | % ---------------------------------------- 250 | 251 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 252 | V_new = zeros(n+2,n+2); 253 | for i=1:n 254 | for j=1:n 255 | V_new(i+1,j+1) = u(j+n*(i-1)); 256 | end 257 | end 258 | 259 | %% plotting 260 | x1=0:h:1; 261 | y1=0:h:1; 262 | 263 | subplot (1,2,1) 264 | surf(x1,y1,V_new) % same plot as above, (x1, y1 are vectors) 265 | view(2) 266 | colorbar 267 | xlabel('x_1') 268 | ylabel('x_2') 269 | zlabel('u(x_1,x_2)') 270 | title( ['solution u(x_1,x_2) Jacobi version III ',... 271 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 272 | 273 | subplot (1,2,2) 274 | 275 | surf(x1,y1,V_new) % same plot as above 276 | colorbar 277 | xlabel('x_1') 278 | ylabel('x_2') 279 | zlabel('u(x_1,x_2)') 280 | 281 | title( ['solution u(x_1,x_2) Jacobi version III',... 282 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 283 | 284 | -------------------------------------------------------------------------------- /README_programs.txt: -------------------------------------------------------------------------------- 1 | 2 | *********************** Matlab programs *************************** 3 | 4 | All programs in the file Matlab_code.zip 5 | are implemented in Matlab and correspond to the following chapters 6 | in the Appendix: 7 | 8 | A.1: Matlab Programs for Gaussian Elimination using LU 9 | Factorization: the main program is 10 | 11 | Poisson2D_LU.m (Example 8.2) 12 | 13 | and functions which are used by this programs are in files: 14 | 15 | DiscretePoisson2D.m (Example 8.2) 16 | LU_PP.m (Algorithm 8.1) 17 | ForwSub.m (Algorithm 8.2) 18 | BackSub.m (Algorithm 8.3) 19 | 20 | 21 | A.2: Matlab programs for Cholesky decomposition: the main program is 22 | 23 | Poisson2D_Chol.m (Example 8.4.4) 24 | 25 | and functions which are used by this programs are in files: 26 | 27 | DiscretePoisson2D.m (Example 8.2) 28 | Cholesky.m (Algorithm 8.10) 29 | ForwSub.m (Algorithm 8.2) 30 | BackSub.m (Algorithm 8.3) 31 | 32 | A.3: Matlab Programs testing Hager’s condition estimator: 33 | the main program is 34 | 35 | TestHagersCondAlg.m (Example 8.4) 36 | 37 | and function which is used by this program is in the file: 38 | 39 | HagersAlg.m (Algorithm 8.7) 40 | 41 | 42 | A.4: Matlab Program FitFunctionNormaleq.m (Example 9.2) 43 | to test fitting 44 | to a polynomial using method of normal equations. 45 | 46 | A.5: Matlab Program FitFunctionQRCGS.m to test fitting to a 47 | polynomial using QR decomposition via classical Gram-Schmidt (CGS) 48 | orthogonalization procedure (Algorithm 9.4). 49 | 50 | A.6: Matlab Program CGS.m performing QR decomposition via 51 | classical Gram-Schmidt (CGS) orthogonalization procedure (Algorithm 9.4). 52 | 53 | A.7: Matlab Programs to fit a function using linear splines: the 54 | main program is 55 | 56 | MainHatFit.m (Example 9.3) 57 | 58 | and functions which are used by this program are in files: 59 | 60 | fihatt.m (Example 9.3) 61 | LLSChol.m (Algorithm 8.10) 62 | LLSQR.m (Algorithm 9.4) 63 | LLSSVD.m 64 | newtonIR.m (Algorithm 8.8) 65 | 66 | 67 | A.8: Matlab Programs to fit a function using bellsplines. The 68 | main program is MainBellspline.m (Example 9.4). 69 | 70 | 71 | and functions which are used by this program are in files: 72 | 73 | LLSChol.m (Algorithm 8.10) 74 | LLSQR.m (Algorithm 9.4) 75 | LLSSVD.m 76 | newtonIR.m (Algorithm 8.8) 77 | 78 | A.9 : Matlab Program PowerM.m (Example 10.1) 79 | to test Power Method (Algorithm 10.1). 80 | 81 | A.10: Matlab Program InverseIteration.m (Examples 10.5-10.8) 82 | to test Inverse 83 | Iteration Method (Algorithm 10.2). 84 | 85 | A.11: Matlab Program MethodOrtIter.m (Examples 10.9-10.14) to test Method of 86 | Orthogonal Iteration (Algorithm 10.3) 87 | 88 | 89 | A.12: Matlab Program MethodQR iter.m (Example 10.15) to test Method of 90 | QR Iteration (Algorithm 10.4). 91 | 92 | A.13: Matlab Program MethodQR shift.m (Example 10.16) to test Method of 93 | QR Iteration with Shifts (Algorithm 10.5). 94 | 95 | A.14: Matlab Program MethodQR Wshift.m (Example 10.16) to test Method of 96 | QR Iteration with shifts (Algorithm 10.5) using Wilkinson’s Shift. 97 | 98 | 99 | A.15: Matlab Program HessenbergQR.m (Example 10.17): first is used 100 | Hessenberg Reduction (Algorithm 10.6) and then the Method of QR 101 | Iteration (Algorithm 10.4). 102 | 103 | A.16: Matlab Program testRayleigh.m (Example 11.1) 104 | for computation 105 | the Rayleigh Quotient (Algorithm 11.1). 106 | 107 | Function which is used by the main program testRayleigh 108 | is in the file: 109 | 110 | RayleighQuotient.m (Algorithm 11.1) 111 | 112 | A.17: Matlab Program for computation of the 113 | algorithm of Divide-and-Conquer: the main program is 114 | testDC.m (Example 11.2) 115 | 116 | and function which is used by this program is in the file: 117 | 118 | DivideandConq.m (Algorithm 11.2) 119 | 120 | A.18: Matlab Program Bisection.m (Example 11.3, Algorithm 11.4) 121 | which finds all eigenvalues of the matrix A ion the input interval [a,b). 122 | 123 | Function which is used by the main program Bisection.m is in the file: 124 | 125 | Negcount.m 126 | 127 | A.19: Matlab Program testClassicalJacobi.m (Example 11.4). 128 | 129 | Function which is used by the main program testClassicalJacobi.m 130 | is in the file: 131 | RunJacobi.m (Algorithm 11.7) 132 | 133 | A.20: Matlab Program testSVDJacobi.m (Example 11.5) 134 | Function which is used by the main program testSVDJacobi.m 135 | is in the file: 136 | 137 | RunSVDJacobi.m (Algorithm 11.14) 138 | 139 | A.21: Matlab Programs for solution of the Dirichlet problem 140 | for the Poisson's equation in 2D on a square using iterative Jacobi method: 141 | the main program is 142 | 143 | Poisson2D_Jacobi.m (Example 12.1, Algorithms 12.1, 12.2) 144 | 145 | and function which is used by this program is in the file: 146 | 147 | DiscretePoisson2D.m 148 | 149 | A.22: Matlab Programs for solution of the Dirichlet problem 150 | for the Poisson's equation in 2D on a square using iterative 151 | Gauss-Seidel method: 152 | the main program is 153 | 154 | Poisson2D_Gauss_Seidel.m (Example 12.2, Algorithms 12.3) 155 | 156 | and function which is used by this program is in the file: 157 | 158 | DiscretePoisson2D.m 159 | 160 | 161 | A.23: Matlab Programs for solution of the Dirichlet problem 162 | for the Poisson's equation in 2D on a square using iterative 163 | Gauss-Seidel method with red-black ordering: 164 | the main program is 165 | 166 | Poisson2D_Gauss_SeidelRedBlack.m (Example 12.2, Algorithm 12.4) 167 | 168 | 169 | and function which is used by this program is: 170 | 171 | DiscretePoisson2D.m 172 | 173 | A.24: Matlab Programs for solution of the Dirichlet problem 174 | for the Poisson's equation in 2D on a square using iterative 175 | SOR method: the main program is 176 | 177 | Poisson2D_SOR.m (Example 12.3, Algorithms 12.5, 12.6) 178 | 179 | and function which is used by this program is in the file: 180 | 181 | DiscretePoisson2D.m 182 | 183 | 184 | A.25: Matlab Programs for solution of the Dirichlet problem for the 185 | Poisson's equation in 2D on a square using Conjugate Gradient method: 186 | the main program is 187 | 188 | Poisson2D ConjugateGrad.m (Example 12.4, Algorithm 12.13) 189 | 190 | and function which is used by this program is in the file: 191 | 192 | DiscretePoisson2D.m 193 | 194 | 195 | A.26: Matlab Programs for solution of the Dirichlet problem for the 196 | Poisson's equation in 2D on a square using Preconditioned Conjugate 197 | Gradient method: the main program is 198 | 199 | Poisson2D_PrecConjugateGrad.m (Example 12.6, Algorithm 12.14) 200 | 201 | and function which is used by this program is in the file: 202 | 203 | DiscretePoisson2D.m 204 | 205 | 206 | ********************* C++ and PETSc programs *************************** 207 | 208 | A.27: PETSc programs for the solution of the Poisson’s equation 209 | in two dimensions on a square using different iterative methods. 210 | 211 | All programs in the file PETSc_code.zip are implemented in C++ and the 212 | software package PETSc (http://www.mcs.anl.gov/petsc/). 213 | 214 | These 215 | programs illustrate Example 12.5: solution of the Dirichlet problem 216 | for the Poisson's equation in 2D on a square using different iterative methods. 217 | 218 | The different iterative methods 219 | are encoded by numbers 1-7 in the main program Main.cpp 220 | in the 221 | following order: 222 | 1 - Jacobi’s method, 223 | 2 - Gauss-Seidel method, 224 | 3 - Successive Overrelaxation method (SOR), 225 | 4 - Conjugate Gradient method, 226 | 5 - Conjugate Gradient method (Algorithm 12.13), 227 | 6 - Preconditioned Conjugate Gradient method, 228 | 7 - Preconditioned Conjugate Gradient method (Algorithm 12.14). 229 | 230 | Methods 1-5 use inbuilt PETSc functions, and 231 | methods 6,7 implement algorithms 12.13, 12.14, respectively. For 232 | example, we can run the program Main.cpp using SOR method as follows: 233 | 234 | > nohup Main 3 > result.m 235 | 236 | After running the results will 237 | be printed in the file result.m and can be viewed in Matlab using the 238 | command surf(result). 239 | 240 | -------------------------------------------------------------------------------- /Matlab_code/Poisson2D_SOR.m: -------------------------------------------------------------------------------- 1 | % ---------------------------------------- 2 | % Main program for the solution of Poisson's equation 3 | % - a laplace = f in 2D using iterative SOR method 4 | % ---------------------------------------- 5 | 6 | close all 7 | clc 8 | clear 9 | clf 10 | %Define input parameters 11 | n=20; % number of inner nodes in one direction. 12 | a_amp = 12; % amplitude for the function a(x_1,x_2) 13 | f_amp = 1; % we can choose f=1, 50, 100 14 | x_0=0.5; 15 | y_0=0.5; 16 | c_x=1; 17 | c_y=1; 18 | 19 | h = 1/(n+1); % define step length 20 | 21 | % ---------------------------------------- 22 | % Computing all matrices and vectors 23 | % ---------------------------------------- 24 | % Generate a n*n by n*n stiffness matrix 25 | S = DiscretePoisson2D(n); 26 | 27 | %% generate coefficient matrix of a((x_1)_i,(x_2)_j) = a(i*h,j*h) 28 | C = zeros(n,n); 29 | for i=1:n 30 | for j=1:n 31 | C(i,j) = 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 32 | +(j*h-y_0)^2/(2*c_y^2))); 33 | end 34 | end 35 | % create diagonal matrix from C 36 | D = zeros(n^2,n^2); 37 | for i=1:n 38 | for j=1:n 39 | D(j+n*(i-1),j+n*(i-1)) = C(i,j); 40 | end 41 | end 42 | 43 | % If f is constant. 44 | % f = f_amp*ones(n^2,1); 45 | 46 | % If f is Gaussian function. 47 | f=zeros(n^2,1); 48 | for i=1:n 49 | for j=1:n 50 | f(n*(i-1)+j)=f_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 51 | +(j*h-y_0)^2/(2*c_y^2))); 52 | end 53 | end 54 | 55 | % Compute vector of right hand side 56 | % b = D^(-1)*f computed as b(i,j)=f(i,j)/a(i,j) 57 | 58 | b=zeros(n^2,1); 59 | for i=1:n 60 | for j=1:n 61 | b(n*(i-1)+j)=f(n*(i-1)+j)/C(i,j); % Use coefficient matrix C or 62 | % diagonal matrix D to get a(i,j) 63 | end 64 | end 65 | 66 | % ---------------------------------------- 67 | % Solution of 1/h^2 S u = b using SOR method 68 | % with red-black ordering, version I 69 | % ---------------------------------------- 70 | 71 | err = 1; k=0; sch = 0; tol=10^(-9); 72 | V = zeros(n,n); 73 | V_old = zeros(n,n); 74 | F=vec2mat(b,n)'; 75 | X=diag(ones(1,n-1),-1); 76 | X=X+X'; 77 | 78 | %arrange red-black indexing 79 | 80 | blackindex = invhilb(n) < 0; 81 | redindex = fliplr(blackindex); 82 | B=V; 83 | V(redindex)=0; 84 | 85 | R=V; 86 | V(blackindex)=0; 87 | 88 | redF = F; redF(blackindex)=0; 89 | blackF = F; blackF(redindex)=0; 90 | 91 | % extract matrices L and U for matrix RSOR 92 | 93 | L=tril(S,-1); 94 | U=L'; 95 | Dinv=diag(diag(S).^(-1)); 96 | L = Dinv*(-L); 97 | U = Dinv*(-U); 98 | D=diag(ones(1,n*n)); 99 | 100 | omegas = 1.05:0.05:1.95; 101 | for omega = omegas 102 | k=0; 103 | err =1; 104 | B=V; 105 | V(redindex)=0; 106 | 107 | R=V; 108 | V(blackindex)=0; 109 | 110 | % counter for omega 111 | sch = sch+1; 112 | 113 | while(err>tol) 114 | R = (1 - omega)*R + omega*(X*B + B*X + h^2*redF)/4; 115 | B = (1- omega)*B + omega*(X*R + R*X + h^2*blackF)/4; 116 | k=k+1; 117 | 118 | V_new =R+B; 119 | err = norm(V_new - V_old); 120 | V_old = V_new; 121 | end 122 | 123 | % the matrix RSOR in the method SOR: x_m+1 = RSOR*x_m + c_SOR 124 | RSOR = inv(D - omega*L)*((1-omega)*D + omega*U); 125 | 126 | lambda = max(abs(eig(RSOR))); 127 | 128 | mu = (lambda + omega -1)/(sqrt(lambda)*omega); 129 | 130 | disp('-- Relaxation parameter in SOR method ----------') 131 | omega 132 | disp('-- Computed optimal relaxation parameter ----------') 133 | 134 | omega_opt = 2/(1 + sqrt(1 - mu^2)) 135 | 136 | if (omega <= 2.0 && omega >=omega_opt ) 137 | disp('-- omega_opt < omega < 2.0 ----------') 138 | radius = omega -1 139 | elseif(omega <= omega_opt && omega > 0) 140 | disp('-- omega < omega_opt ----------') 141 | omega_tail = -omega +0.5*omega^2*mu^2 ... 142 | + omega*mu*sqrt(1 - omega + 0.25*omega^2*mu^2) 143 | radius = 1 + omega_tail 144 | end 145 | 146 | disp('-- Number of iterations in SOR method ----------') 147 | k 148 | 149 | iterations(sch) = k; 150 | spectral_radius(sch)= radius; 151 | omega_optimal(sch) = omega_opt; 152 | end 153 | 154 | % apply zero boundary conditions 155 | V_new = [zeros(1,n+2); zeros(n,1) V_new zeros(n,1);zeros(1,n+2)]; 156 | 157 | % ---------------------------------------- 158 | % Plots and figures for SOR method, version I 159 | % ---------------------------------------- 160 | 161 | figure(1) 162 | %% plotting 163 | x1=0:h:1; 164 | y1=0:h:1; 165 | 166 | subplot (2,2,1) 167 | surf(x1,y1,V_new) % same plot as above, (x1, y1 are vectors) 168 | view(2) 169 | colorbar 170 | xlabel('x_1') 171 | ylabel('x_2') 172 | zlabel('u(x_1,x_2)') 173 | title( ['solution u(x_1,x_2) in SOR method ',... 174 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 175 | 176 | subplot (2,2,2) 177 | 178 | surf(x1,y1,V_new) % same plot as above 179 | colorbar 180 | xlabel('x_1') 181 | ylabel('x_2') 182 | zlabel('u(x_1,x_2)') 183 | 184 | title( ['solution u(x_1,x_2) in SOR method',... 185 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 186 | 187 | % Plotting a(x,y) 188 | Z_a= zeros(n+2); 189 | for i=1:(n+2) 190 | for j=1:(n+2) 191 | Z_a(i,j)= 1 + a_amp*exp(-((i*h-x_0)^2/(2*c_x^2)... 192 | +(j*h-y_0)^2/(2*c_y^2))); 193 | end 194 | end 195 | 196 | subplot (2,2,3) 197 | surf(x1,y1,Z_a) 198 | xlabel('x_1') 199 | ylabel('x_2') 200 | zlabel('a(x_1,x_2)') 201 | title( ['coefficient a(x_1,x_2) with A = ',num2str(a_amp)]) 202 | 203 | % plott the function f(x,y) 204 | Z_f= zeros(n+2); 205 | for i=1:(n+2) 206 | for j=1:(n+2) 207 | Z_f(i,j)=f_amp*exp(-((x1(i)-x_0)^2/(2*c_x^2)... 208 | +(y1(j)-y_0)^2/(2*c_y^2))); 209 | end 210 | end 211 | 212 | subplot (2,2,4) 213 | surf(x1,y1,Z_f) 214 | xlabel('x_1') 215 | ylabel('x_2') 216 | zlabel('f(x_1,x_2)') 217 | title( ['f(x_1,x_2) with A_f = ',num2str(f_amp)]) 218 | 219 | % plot convergence of SOR depending on omega 220 | figure(2) 221 | 222 | plot(omegas, iterations,'b o-', 'LineWidth',2) 223 | hold on 224 | plot(omega_optimal, iterations,'r o ', 'LineWidth',2) 225 | 226 | xlabel('Relaxation parameter \omega') 227 | ylabel('Number of iterations in SOR') 228 | legend('SOR(\omega)','Computed optimal \omega') 229 | title(['Mesh: ',num2str(n),' by ',num2str(n),' points']) 230 | 231 | % plot convergence of SOR depending on omega 232 | figure(3) 233 | plot(omegas, spectral_radius,'b o-', 'LineWidth',2) 234 | 235 | xlabel('Relaxation parameter \omega') 236 | ylabel(' Spectral radius \rho(R_{SOR(\omega)})') 237 | legend('\rho(R_{SOR(\omega)})') 238 | title(['Mesh: ',num2str(n),' by ',num2str(n),' points']) 239 | 240 | % ---------------------------------------- 241 | % Solution of 1/h^2 S u = b using iterative SOR 242 | % with red-black ordering, version II 243 | % ---------------------------------------- 244 | 245 | disp('-- Works SOR method, version II ----------') 246 | 247 | err = 1; k=0; tol=10^(-9); 248 | 249 | % choose relaxation parameter 0 < omega < 2 250 | % optimal omega can be computed as 251 | omega_opt = 2/(1 + sin(pi/(n+1))) 252 | % Initial guess 253 | uold = zeros(n+2, n+2); 254 | unew= uold; 255 | 256 | while(err > tol) 257 | % Red nodes 258 | for i = 2:n+1 259 | for j = 2:n+1 260 | if(mod(i+j,2) == 0) 261 | unew(i, j) = (1-omega)*unew(i,j) + ... 262 | omega*(uold(i-1, j) + uold(i+1, j) + uold(i, j-1) + uold(i, j+1) ... 263 | + h^2*b(n*(i-2)+j-1))/4.0; 264 | % for computation of residual 265 | u(j-1 + n*(i-2)) = unew(i,j); 266 | end 267 | end 268 | end 269 | 270 | % Black nodes 271 | for i = 2:n+1 272 | for j = 2:n+1 273 | if(mod(i+j,2) == 1) 274 | unew(i,j) = (1-omega)*unew(i,j) + ... 275 | omega*0.25*(unew(i-1,j) + unew(i+1,j) + unew(i,j-1) + unew(i,j+1) + ... 276 | h^2*b(n*(i-2)+j-1)); 277 | % for computation of residual 278 | u(j-1 + n*(i-2)) = unew(i,j); 279 | end 280 | end 281 | end 282 | 283 | k = k+1; 284 | 285 | % different stopping rules 286 | err = norm(unew-uold); 287 | %computation of residual 288 | % err = norm(S*u' - h^2*b); 289 | uold = unew; 290 | end 291 | 292 | u = reshape(unew(2:end-1, 2:end-1)', n*n, 1); 293 | 294 | disp('-- Number of iterations in the version II of SOR ----------') 295 | 296 | k 297 | 298 | % ---------------------------------------- 299 | % Plots and figures for version II 300 | % ---------------------------------------- 301 | 302 | figure(4) 303 | % sort the data in u into the mesh-grid, the boundary nodes are zero. 304 | V_new = zeros(n+2,n+2); 305 | for i=1:n 306 | for j=1:n 307 | V_new(i+1,j+1) = u(j+n*(i-1)); 308 | end 309 | end 310 | 311 | %% plotting 312 | x1=0:h:1; 313 | y1=0:h:1; 314 | 315 | subplot (1,2,1) 316 | surf(x1,y1,V_new) % same plot as above, (x1, y1 are vectors) 317 | view(2) 318 | colorbar 319 | xlabel('x_1') 320 | ylabel('x_2') 321 | zlabel('u(x_1,x_2)') 322 | title( ['solution u(x_1,x_2) in SOR with Red-Black ordering, version II',... 323 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 324 | 325 | subplot (1,2,2) 326 | 327 | surf(x1,y1,V_new) % same plot as above 328 | colorbar 329 | xlabel('x_1') 330 | ylabel('x_2') 331 | zlabel('u(x_1,x_2)') 332 | 333 | title( ['solution u(x_1,x_2) in SOR with Red-Black ordering, version II',... 334 | ', N = ',num2str(n),', iter. = ',num2str(k)]) 335 | 336 | --------------------------------------------------------------------------------