1
0
Fork 0
This repository has been archived on 2021-01-12. You can view files and clone it, but cannot push or open issues or pull requests.
uni-m.cds-num-met/week3/ex1.py

152 lines
4.1 KiB
Python
Executable File

#!/usr/bin/env python3
import numpy as np
from matplotlib import pyplot
from solvers import *
def linear_equation_system_1():
A = [[ 10, -1, 2, 0 ],
[ -1, 11, -1, 3 ],
[ 2, -1, 10, -1 ],
[ 0, 3, -1, 8 ]]
b = [ 6, 25, -11, 15 ]
return A, b
def linear_equation_system_2():
A = [[ 0.2, 0.1, 1.0, 1.0, 0.0],
[ 0.1, 4.0, -1.0, 1.0, -1.0],
[ 1.0, -1.0, 60.0, 0.0, -2.0],
[ 1.0, 1.0, 0.0, 8.0, 4.0],
[ 0.0, -1.0, -2.0, 4.0, 700.0]]
b = [ 1, 2, 3, 4, 5 ]
return A, b
def get_solver_stats(A = None, b = None, epsilons = None, solvers = None):
import timeit
if A is None:
A, b = linear_equation_system_1()
if epsilons is None:
epsilons = np.array([ 0.1, 0.01, 0.001, 0.0001 ])
if solvers is None:
solvers = [ jacobi, steepest_descent, conjugate_gradient ]
x_exact = np.linalg.solve(A, b)
N = len(solvers)
M = len(epsilons)
steps = np.zeros((N,M), dtype=np.int)
diffs = np.zeros((N,M), dtype=np.float64)
times = np.zeros((N,M), dtype=np.float64)
for i, func in enumerate(solvers):
for j, eps in enumerate(epsilons):
start = timeit.timeit()
x, k = func(A, b, eps)
diffs[i][j] = diff(x_exact, x)
steps[i][j] = k
times[i][j] = start - timeit.timeit()
return steps, diffs, times
def plot_steps(epsilons, steps, names, fmt = None, show = True):
pyplot.subplots()
pyplot.title("Performance for different Iterative Methods")
pyplot.xlabel("Accuracy $\epsilon$")
pyplot.ylabel("Steps $N$")
pyplot.yscale('log')
pyplot.xscale('log')
if fmt is None:
fmt = '-d'
for i, name in enumerate(names):
if isinstance(fmt, list):
pyplot.plot(epsilons, steps[i], fmt[i], label=name)
else:
pyplot.plot(epsilons, steps[i], fmt, label=name)
pyplot.legend()
if show:
pyplot.show()
# Main functions
def main1():
solvers = [ jacobi, steepest_descent, conjugate_gradient ]
## System 1
print("Linear Equation System 1")
print(24*"-")
A, b = linear_equation_system_1()
epsilons = np.logspace(-1, -4, 4)
steps, diffs, times = get_solver_stats(A, b, epsilons, solvers)
for i, func in enumerate(solvers):
print("Name: ", func.__qualname__)
print("Eps: ", epsilons)
print("Diffs:", diffs[i])
print("Steps:", steps[i])
print("Times:", times[i])
print()
plot_steps(epsilons, steps, map(lambda x: x.__name__, solvers))
def main2():
solvers = [ jacobi, steepest_descent, conjugate_gradient ]
# System 2
print("Linear Equation System 2")
print("------------------------")
A, b = linear_equation_system_2()
epsilons = np.logspace(-1, -8, 8)
steps, diffs, times = get_solver_stats(A, b, epsilons, solvers)
print("Condition Number:", np.linalg.cond(A))
print("""
Both Steepest Descent and Conjugate Gradient are fast for small accuracies.
However, they take many more steps than the Jacobi method when we require larger accuracies.
This can be explained by a large Condition Number.
""")
# Preconditioning of System 2
print("\nConditioned Linear Equation System 2")
print( "------------------------------------")
D = np.diag(np.array(A, dtype=np.float64))
C = np.diagflat(np.sqrt(np.reciprocal(D)))
A_tilde = np.dot(np.dot(C,A), C)
b_tilde = np.dot(C,b)
cond_steps, cond_diffs, cond_times = get_solver_stats(A_tilde, b_tilde, epsilons, solvers)
print("Condition Number:", np.linalg.cond(A_tilde))
print("""
By Preconditioning the Gradient Methods are much faster.
This is especially visible for small accuracies in the conjugate gradient method.
""")
all_steps = np.concatenate((steps, cond_steps), axis=0)
fmts = ['b-d', 'g-*', 'y-^', 'b--d', 'g--*', 'y--^']
names = [x.__name__ for x in solvers] + ["Conditioned " + x.__name__ for x in solvers]
plot_steps(epsilons, all_steps, names, fmts)
if __name__ == "__main__":
main1()
main2()