2020-02-20 18:21:06 +01:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
import numpy as np
|
2020-02-27 16:02:04 +01:00
|
|
|
from itertools import count as count
|
2020-02-20 18:21:06 +01:00
|
|
|
|
|
|
|
def diff(a, b):
|
|
|
|
return np.amax(np.abs(a-b))
|
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
def jacobi(A, b, eps, max_iter = None):
|
|
|
|
""" Use the Jacobi Method to solve a Linear System. """
|
|
|
|
|
2020-02-20 18:21:06 +01:00
|
|
|
A = np.array(A, dtype=np.float64)
|
|
|
|
b = np.array(b, dtype=np.float64)
|
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Determine Diagonal and Upper and Lower matrices
|
2020-02-20 18:21:06 +01:00
|
|
|
D = np.diag(A)
|
|
|
|
L = -np.tril(A, -1)
|
|
|
|
U = -np.triu(A, 1)
|
|
|
|
|
|
|
|
D_inv = np.diagflat(np.reciprocal(D))
|
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
x_0 = D_inv @ b
|
|
|
|
for i in count():
|
|
|
|
x_1 = D_inv @ ( L + U) @ x_0
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Are we close enough?
|
|
|
|
if diff(x_0, x_1) < eps:
|
|
|
|
return x_1, i
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Running out of iterations
|
|
|
|
if max_iter is not None and max_iter >= i:
|
|
|
|
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Set values for next loop
|
|
|
|
x_0 = x_1
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
def steepest_descent(A, b, eps, max_iter = None):
|
|
|
|
""" Use Steepest Descent to solve a Linear System. """
|
2020-02-20 18:21:06 +01:00
|
|
|
A = np.array(A, dtype=np.float64)
|
|
|
|
b = np.array(b, dtype=np.float64)
|
|
|
|
|
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
x_0 = np.zeros(len(A), dtype=np.float64)
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
for i in count():
|
|
|
|
Ax = A @ x_0
|
|
|
|
v = b - Ax
|
|
|
|
t = np.dot(v,v) / np.dot(v, A @ v )
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
x_1 = x_0 + t*v
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Are we close enough?
|
|
|
|
if diff(x_0, x_1) < eps:
|
|
|
|
return x_1, i
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Running out of iterations
|
|
|
|
if max_iter is not None and max_iter >= i:
|
|
|
|
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Set values for next loop
|
|
|
|
x_0 = x_1
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
def conjugate_gradient(A, b, eps, max_iter = None):
|
|
|
|
""" Use the Conjugate Gradient Method to solve a Linear System. """
|
2020-02-20 18:21:06 +01:00
|
|
|
A = np.array(A, dtype=np.float64)
|
|
|
|
b = np.array(b, dtype=np.float64)
|
|
|
|
|
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Setup vectors
|
|
|
|
x_0 = np.zeros(len(A), dtype=np.float64)
|
|
|
|
r_0 = b - A @ x_0
|
|
|
|
v = r_0.copy()
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
for i in count():
|
|
|
|
Ax = A @ x_0
|
|
|
|
Av = A @ v
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
r_0_square = np.dot(r_0, r_0)
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
t = r_0_square / np.dot(v, Av )
|
|
|
|
x_1 = x_0 + t*v
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
r_1 = r_0 - t * Av
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
s = np.dot(r_1, r_1) / r_0_square
|
|
|
|
v = r_1 + s*v
|
2020-02-20 18:21:06 +01:00
|
|
|
|
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Are we close enough?
|
|
|
|
if diff(x_0, x_1) < eps:
|
|
|
|
return x_1, i
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Running out of iterations
|
|
|
|
if max_iter is not None and max_iter >= i:
|
|
|
|
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
2020-02-20 18:21:06 +01:00
|
|
|
|
2020-02-27 16:02:04 +01:00
|
|
|
# Set values for next loop
|
|
|
|
x_0 = x_1
|
|
|
|
r_0 = r_1
|