1
0
Fork 0

Week3: Finished Exercises

This commit is contained in:
Eric Teunis de Boone 2020-02-20 18:21:06 +01:00
parent 667490a069
commit 0b5dc4864e
2 changed files with 264 additions and 0 deletions

151
week3/ex1.py Executable file
View File

@ -0,0 +1,151 @@
#!/usr/bin/env python3
import numpy as np
from matplotlib import pyplot
from solvers import *
def linear_equation_system_1():
A = [[ 10, -1, 2, 0 ],
[ -1, 11, -1, 3 ],
[ 2, -1, 10, -1 ],
[ 0, 3, -1, 8 ]]
b = [ 6, 25, -11, 15 ]
return A, b
def linear_equation_system_2():
A = [[ 0.2, 0.1, 1.0, 1.0, 0.0],
[ 0.1, 4.0, -1.0, 1.0, -1.0],
[ 1.0, -1.0, 60.0, 0.0, -2.0],
[ 1.0, 1.0, 0.0, 8.0, 4.0],
[ 0.0, -1.0, -2.0, 4.0, 700.0]]
b = [ 1, 2, 3, 4, 5 ]
return A, b
def get_solver_stats(A = None, b = None, epsilons = None, solvers = None):
import timeit
if A is None:
A, b = linear_equation_system_1()
if epsilons is None:
epsilons = np.array([ 0.1, 0.01, 0.001, 0.0001 ])
if solvers is None:
solvers = [ jacobi, steepest_descent, conjugate_gradient ]
x_exact = np.linalg.solve(A, b)
N = len(solvers)
M = len(epsilons)
steps = np.zeros((N,M), dtype=np.int)
diffs = np.zeros((N,M), dtype=np.float64)
times = np.zeros((N,M), dtype=np.float64)
for i, func in enumerate(solvers):
for j, eps in enumerate(epsilons):
start = timeit.timeit()
x, k = func(A, b, eps)
diffs[i][j] = diff(x_exact, x)
steps[i][j] = k
times[i][j] = start - timeit.timeit()
return steps, diffs, times
def plot_steps(epsilons, steps, names, fmt = None, show = True):
pyplot.subplots()
pyplot.title("Performance for different Iterative Methods")
pyplot.xlabel("Accuracy $\epsilon$")
pyplot.ylabel("Steps $N$")
pyplot.yscale('log')
pyplot.xscale('log')
if fmt is None:
fmt = '-d'
for i, name in enumerate(names):
if isinstance(fmt, list):
pyplot.plot(epsilons, steps[i], fmt[i], label=name)
else:
pyplot.plot(epsilons, steps[i], fmt, label=name)
pyplot.legend()
if show:
pyplot.show()
# Main functions
def main1():
solvers = [ jacobi, steepest_descent, conjugate_gradient ]
## System 1
print("Linear Equation System 1")
print(24*"-")
A, b = linear_equation_system_1()
epsilons = np.logspace(-1, -4, 4)
steps, diffs, times = get_solver_stats(A, b, epsilons, solvers)
for i, func in enumerate(solvers):
print("Name: ", func.__qualname__)
print("Eps: ", epsilons)
print("Diffs:", diffs[i])
print("Steps:", steps[i])
print("Times:", times[i])
print()
plot_steps(epsilons, steps, map(lambda x: x.__name__, solvers))
def main2():
solvers = [ jacobi, steepest_descent, conjugate_gradient ]
# System 2
print("Linear Equation System 2")
print("------------------------")
A, b = linear_equation_system_2()
epsilons = np.logspace(-1, -8, 8)
steps, diffs, times = get_solver_stats(A, b, epsilons, solvers)
print("Condition Number:", np.linalg.cond(A))
print("""
Both Steepest Descent and Conjugate Gradient are fast for small accuracies.
However, they take many more steps than the Jacobi method when we require larger accuracies.
This can be explained by a large Condition Number.
""")
# Preconditioning of System 2
print("\nConditioned Linear Equation System 2")
print( "------------------------------------")
D = np.diag(np.array(A, dtype=np.float64))
C = np.diagflat(np.sqrt(np.reciprocal(D)))
A_tilde = np.dot(np.dot(C,A), C)
b_tilde = np.dot(C,b)
cond_steps, cond_diffs, cond_times = get_solver_stats(A_tilde, b_tilde, epsilons, solvers)
print("Condition Number:", np.linalg.cond(A_tilde))
print("""
By Preconditioning the Gradient Methods are much faster.
This is especially visible for small accuracies in the conjugate gradient method.
""")
all_steps = np.concatenate((steps, cond_steps), axis=0)
fmts = ['b-d', 'g-*', 'y-^', 'b--d', 'g--*', 'y--^']
names = [x.__name__ for x in solvers] + ["Conditioned " + x.__name__ for x in solvers]
plot_steps(epsilons, all_steps, names, fmts)
if __name__ == "__main__":
main1()
main2()

113
week3/solvers.py Normal file
View File

@ -0,0 +1,113 @@
#!/usr/bin/env python3
import numpy as np
def diff(a, b):
return np.amax(np.abs(a-b))
def jacobi(A, b, eps):
A = np.array(A, dtype=np.float64)
b = np.array(b, dtype=np.float64)
D = np.diag(A)
L = -np.tril(A, -1)
U = -np.triu(A, 1)
D_inv = np.diagflat(np.reciprocal(D))
# initially x_f = x_(i-1)
# this changes when in the loop
x_i = np.dot(D_inv, b)
x_f = np.zeros(len(A))
k = 1
while diff(x_i, x_f) >= eps:
k += 1
# Save the previous solution vector as x_f
x_f = x_i
# Create new solution vector
x_i = np.dot(np.dot(D_inv, ( L + U )), x_f ) + np.dot(D_inv, b)
return x_i, k
def steepest_descent(A, b, eps):
A = np.array(A, dtype=np.float64)
b = np.array(b, dtype=np.float64)
# initially x_f = x_(i-1)
# this changes when in the loop
x_f = np.zeros(len(A), dtype=np.float64)
k = 1
v_f = b
t = np.dot(v_f, v_f) / np.dot(v_f, np.dot(A, v_f))
x_i = x_f + t*v_f
while diff(x_i, x_f) >= eps:
k += 1
# Pre calculate v_f and t
v_f = b - np.dot(A, x_i)
t = np.dot(v_f, v_f) / np.dot(v_f, np.dot(A, v_f))
# Save the previous solution vector as x_f
x_f = x_i
# Create new solution vector
x_i = x_f + t * v_f
return x_i, k
def conjugate_gradient(A, b, eps):
A = np.array(A, dtype=np.float64)
b = np.array(b, dtype=np.float64)
# initially x_f = x_(i-1)
# this changes when in the loop
x_f = np.zeros(len(A), dtype=np.float64)
r_f = b - np.dot(A, x_f)
v_f = r_f
k = 1
# Calculate first iteration
t = np.dot(r_f, r_f) / np.dot(v_f, np.dot(A, v_f))
x_i = x_f + t*v_f
r_i = r_f - t * np.dot(A, v_f)
s = np.dot(r_i, r_i) / np.dot(r_f, r_f)
v_i = r_i - s*v_f
# Set r and v vectors for next loop
r_f = r_i
v_f = v_i
while diff(x_i, x_f) >= eps:
k += 1
t = np.dot(r_f, r_f) / np.dot(v_f, np.dot(A, v_f))
# Save the previous solution vector as x_f
x_f = x_i
# Create new solution vector
x_i = x_f + t*v_f
# Calculate r and v vectors
r_i = r_f - t * np.dot(A, v_f)
s = np.dot(r_i, r_i) / np.dot(r_f, r_f)
v_i = r_i - s*v_f
# Save r and v vectors for next loop
r_f = r_i
v_f = v_i
return x_i, k