Week3: Rewrote Solvers to include a max_iter parameter
This commit is contained in:
parent
79984000ff
commit
4b90ae8898
1 changed files with 56 additions and 70 deletions
126
week3/solvers.py
126
week3/solvers.py
|
@ -1,113 +1,99 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from itertools import count as count
|
||||||
|
|
||||||
def diff(a, b):
|
def diff(a, b):
|
||||||
return np.amax(np.abs(a-b))
|
return np.amax(np.abs(a-b))
|
||||||
|
|
||||||
def jacobi(A, b, eps):
|
def jacobi(A, b, eps, max_iter = None):
|
||||||
|
""" Use the Jacobi Method to solve a Linear System. """
|
||||||
|
|
||||||
A = np.array(A, dtype=np.float64)
|
A = np.array(A, dtype=np.float64)
|
||||||
b = np.array(b, dtype=np.float64)
|
b = np.array(b, dtype=np.float64)
|
||||||
|
|
||||||
|
# Determine Diagonal and Upper and Lower matrices
|
||||||
D = np.diag(A)
|
D = np.diag(A)
|
||||||
L = -np.tril(A, -1)
|
L = -np.tril(A, -1)
|
||||||
U = -np.triu(A, 1)
|
U = -np.triu(A, 1)
|
||||||
|
|
||||||
D_inv = np.diagflat(np.reciprocal(D))
|
D_inv = np.diagflat(np.reciprocal(D))
|
||||||
|
|
||||||
# initially x_f = x_(i-1)
|
x_0 = D_inv @ b
|
||||||
# this changes when in the loop
|
for i in count():
|
||||||
x_i = np.dot(D_inv, b)
|
x_1 = D_inv @ ( L + U) @ x_0
|
||||||
x_f = np.zeros(len(A))
|
|
||||||
k = 1
|
|
||||||
|
|
||||||
while diff(x_i, x_f) >= eps:
|
# Are we close enough?
|
||||||
k += 1
|
if diff(x_0, x_1) < eps:
|
||||||
|
return x_1, i
|
||||||
|
|
||||||
# Save the previous solution vector as x_f
|
# Running out of iterations
|
||||||
x_f = x_i
|
if max_iter is not None and max_iter >= i:
|
||||||
|
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
||||||
|
|
||||||
# Create new solution vector
|
# Set values for next loop
|
||||||
x_i = np.dot(np.dot(D_inv, ( L + U )), x_f ) + np.dot(D_inv, b)
|
x_0 = x_1
|
||||||
|
|
||||||
return x_i, k
|
def steepest_descent(A, b, eps, max_iter = None):
|
||||||
|
""" Use Steepest Descent to solve a Linear System. """
|
||||||
def steepest_descent(A, b, eps):
|
|
||||||
A = np.array(A, dtype=np.float64)
|
A = np.array(A, dtype=np.float64)
|
||||||
b = np.array(b, dtype=np.float64)
|
b = np.array(b, dtype=np.float64)
|
||||||
|
|
||||||
# initially x_f = x_(i-1)
|
|
||||||
# this changes when in the loop
|
|
||||||
x_f = np.zeros(len(A), dtype=np.float64)
|
|
||||||
k = 1
|
|
||||||
|
|
||||||
v_f = b
|
x_0 = np.zeros(len(A), dtype=np.float64)
|
||||||
t = np.dot(v_f, v_f) / np.dot(v_f, np.dot(A, v_f))
|
|
||||||
x_i = x_f + t*v_f
|
|
||||||
|
|
||||||
while diff(x_i, x_f) >= eps:
|
for i in count():
|
||||||
k += 1
|
Ax = A @ x_0
|
||||||
|
v = b - Ax
|
||||||
|
t = np.dot(v,v) / np.dot(v, A @ v )
|
||||||
|
|
||||||
# Pre calculate v_f and t
|
x_1 = x_0 + t*v
|
||||||
v_f = b - np.dot(A, x_i)
|
|
||||||
|
|
||||||
t = np.dot(v_f, v_f) / np.dot(v_f, np.dot(A, v_f))
|
# Are we close enough?
|
||||||
|
if diff(x_0, x_1) < eps:
|
||||||
|
return x_1, i
|
||||||
|
|
||||||
# Save the previous solution vector as x_f
|
# Running out of iterations
|
||||||
x_f = x_i
|
if max_iter is not None and max_iter >= i:
|
||||||
|
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
||||||
|
|
||||||
# Create new solution vector
|
# Set values for next loop
|
||||||
x_i = x_f + t * v_f
|
x_0 = x_1
|
||||||
|
|
||||||
return x_i, k
|
def conjugate_gradient(A, b, eps, max_iter = None):
|
||||||
|
""" Use the Conjugate Gradient Method to solve a Linear System. """
|
||||||
def conjugate_gradient(A, b, eps):
|
|
||||||
A = np.array(A, dtype=np.float64)
|
A = np.array(A, dtype=np.float64)
|
||||||
b = np.array(b, dtype=np.float64)
|
b = np.array(b, dtype=np.float64)
|
||||||
|
|
||||||
# initially x_f = x_(i-1)
|
|
||||||
# this changes when in the loop
|
|
||||||
x_f = np.zeros(len(A), dtype=np.float64)
|
|
||||||
r_f = b - np.dot(A, x_f)
|
|
||||||
v_f = r_f
|
|
||||||
|
|
||||||
k = 1
|
# Setup vectors
|
||||||
|
x_0 = np.zeros(len(A), dtype=np.float64)
|
||||||
|
r_0 = b - A @ x_0
|
||||||
|
v = r_0.copy()
|
||||||
|
|
||||||
# Calculate first iteration
|
for i in count():
|
||||||
t = np.dot(r_f, r_f) / np.dot(v_f, np.dot(A, v_f))
|
Ax = A @ x_0
|
||||||
x_i = x_f + t*v_f
|
Av = A @ v
|
||||||
|
|
||||||
r_i = r_f - t * np.dot(A, v_f)
|
r_0_square = np.dot(r_0, r_0)
|
||||||
s = np.dot(r_i, r_i) / np.dot(r_f, r_f)
|
|
||||||
|
|
||||||
v_i = r_i + s*v_f
|
t = r_0_square / np.dot(v, Av )
|
||||||
|
x_1 = x_0 + t*v
|
||||||
|
|
||||||
# Set r and v vectors for next loop
|
r_1 = r_0 - t * Av
|
||||||
r_f = r_i
|
|
||||||
v_f = v_i
|
|
||||||
|
|
||||||
while diff(x_i, x_f) >= eps:
|
s = np.dot(r_1, r_1) / r_0_square
|
||||||
k += 1
|
v = r_1 + s*v
|
||||||
|
|
||||||
t = np.dot(r_f, r_f) / np.dot(v_f, np.dot(A, v_f))
|
|
||||||
# Save the previous solution vector as x_f
|
|
||||||
x_f = x_i
|
|
||||||
|
|
||||||
# Create new solution vector
|
|
||||||
x_i = x_f + t*v_f
|
|
||||||
|
|
||||||
# Calculate r and v vectors
|
|
||||||
|
|
||||||
r_i = r_f - t * np.dot(A, v_f)
|
|
||||||
s = np.dot(r_i, r_i) / np.dot(r_f, r_f)
|
|
||||||
v_i = r_i + s*v_f
|
|
||||||
|
|
||||||
# Save r and v vectors for next loop
|
|
||||||
r_f = r_i
|
|
||||||
v_f = v_i
|
|
||||||
|
|
||||||
|
|
||||||
return x_i, k
|
# Are we close enough?
|
||||||
|
if diff(x_0, x_1) < eps:
|
||||||
|
return x_1, i
|
||||||
|
|
||||||
|
# Running out of iterations
|
||||||
|
if max_iter is not None and max_iter >= i:
|
||||||
|
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
||||||
|
|
||||||
|
# Set values for next loop
|
||||||
|
x_0 = x_1
|
||||||
|
r_0 = r_1
|
||||||
|
|
Reference in a new issue