Week3: Rewrote Solvers to include a max_iter parameter
This commit is contained in:
parent
79984000ff
commit
4b90ae8898
1 changed files with 56 additions and 70 deletions
126
week3/solvers.py
126
week3/solvers.py
|
@ -1,113 +1,99 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import numpy as np
|
||||
from itertools import count as count
|
||||
|
||||
def diff(a, b):
|
||||
return np.amax(np.abs(a-b))
|
||||
|
||||
def jacobi(A, b, eps):
|
||||
def jacobi(A, b, eps, max_iter = None):
|
||||
""" Use the Jacobi Method to solve a Linear System. """
|
||||
|
||||
A = np.array(A, dtype=np.float64)
|
||||
b = np.array(b, dtype=np.float64)
|
||||
|
||||
|
||||
# Determine Diagonal and Upper and Lower matrices
|
||||
D = np.diag(A)
|
||||
L = -np.tril(A, -1)
|
||||
U = -np.triu(A, 1)
|
||||
|
||||
D_inv = np.diagflat(np.reciprocal(D))
|
||||
|
||||
# initially x_f = x_(i-1)
|
||||
# this changes when in the loop
|
||||
x_i = np.dot(D_inv, b)
|
||||
x_f = np.zeros(len(A))
|
||||
k = 1
|
||||
x_0 = D_inv @ b
|
||||
for i in count():
|
||||
x_1 = D_inv @ ( L + U) @ x_0
|
||||
|
||||
while diff(x_i, x_f) >= eps:
|
||||
k += 1
|
||||
# Are we close enough?
|
||||
if diff(x_0, x_1) < eps:
|
||||
return x_1, i
|
||||
|
||||
# Save the previous solution vector as x_f
|
||||
x_f = x_i
|
||||
# Running out of iterations
|
||||
if max_iter is not None and max_iter >= i:
|
||||
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
||||
|
||||
# Create new solution vector
|
||||
x_i = np.dot(np.dot(D_inv, ( L + U )), x_f ) + np.dot(D_inv, b)
|
||||
# Set values for next loop
|
||||
x_0 = x_1
|
||||
|
||||
return x_i, k
|
||||
|
||||
def steepest_descent(A, b, eps):
|
||||
def steepest_descent(A, b, eps, max_iter = None):
|
||||
""" Use Steepest Descent to solve a Linear System. """
|
||||
A = np.array(A, dtype=np.float64)
|
||||
b = np.array(b, dtype=np.float64)
|
||||
|
||||
# initially x_f = x_(i-1)
|
||||
# this changes when in the loop
|
||||
x_f = np.zeros(len(A), dtype=np.float64)
|
||||
k = 1
|
||||
|
||||
v_f = b
|
||||
t = np.dot(v_f, v_f) / np.dot(v_f, np.dot(A, v_f))
|
||||
x_i = x_f + t*v_f
|
||||
x_0 = np.zeros(len(A), dtype=np.float64)
|
||||
|
||||
while diff(x_i, x_f) >= eps:
|
||||
k += 1
|
||||
for i in count():
|
||||
Ax = A @ x_0
|
||||
v = b - Ax
|
||||
t = np.dot(v,v) / np.dot(v, A @ v )
|
||||
|
||||
# Pre calculate v_f and t
|
||||
v_f = b - np.dot(A, x_i)
|
||||
|
||||
t = np.dot(v_f, v_f) / np.dot(v_f, np.dot(A, v_f))
|
||||
x_1 = x_0 + t*v
|
||||
|
||||
# Save the previous solution vector as x_f
|
||||
x_f = x_i
|
||||
# Are we close enough?
|
||||
if diff(x_0, x_1) < eps:
|
||||
return x_1, i
|
||||
|
||||
# Create new solution vector
|
||||
x_i = x_f + t * v_f
|
||||
# Running out of iterations
|
||||
if max_iter is not None and max_iter >= i:
|
||||
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
||||
|
||||
return x_i, k
|
||||
# Set values for next loop
|
||||
x_0 = x_1
|
||||
|
||||
def conjugate_gradient(A, b, eps):
|
||||
def conjugate_gradient(A, b, eps, max_iter = None):
|
||||
""" Use the Conjugate Gradient Method to solve a Linear System. """
|
||||
A = np.array(A, dtype=np.float64)
|
||||
b = np.array(b, dtype=np.float64)
|
||||
|
||||
# initially x_f = x_(i-1)
|
||||
# this changes when in the loop
|
||||
x_f = np.zeros(len(A), dtype=np.float64)
|
||||
r_f = b - np.dot(A, x_f)
|
||||
v_f = r_f
|
||||
|
||||
k = 1
|
||||
|
||||
# Calculate first iteration
|
||||
t = np.dot(r_f, r_f) / np.dot(v_f, np.dot(A, v_f))
|
||||
x_i = x_f + t*v_f
|
||||
# Setup vectors
|
||||
x_0 = np.zeros(len(A), dtype=np.float64)
|
||||
r_0 = b - A @ x_0
|
||||
v = r_0.copy()
|
||||
|
||||
r_i = r_f - t * np.dot(A, v_f)
|
||||
s = np.dot(r_i, r_i) / np.dot(r_f, r_f)
|
||||
|
||||
v_i = r_i + s*v_f
|
||||
|
||||
# Set r and v vectors for next loop
|
||||
r_f = r_i
|
||||
v_f = v_i
|
||||
|
||||
while diff(x_i, x_f) >= eps:
|
||||
k += 1
|
||||
for i in count():
|
||||
Ax = A @ x_0
|
||||
Av = A @ v
|
||||
|
||||
t = np.dot(r_f, r_f) / np.dot(v_f, np.dot(A, v_f))
|
||||
# Save the previous solution vector as x_f
|
||||
x_f = x_i
|
||||
r_0_square = np.dot(r_0, r_0)
|
||||
|
||||
# Create new solution vector
|
||||
x_i = x_f + t*v_f
|
||||
t = r_0_square / np.dot(v, Av )
|
||||
x_1 = x_0 + t*v
|
||||
|
||||
# Calculate r and v vectors
|
||||
r_1 = r_0 - t * Av
|
||||
|
||||
r_i = r_f - t * np.dot(A, v_f)
|
||||
s = np.dot(r_i, r_i) / np.dot(r_f, r_f)
|
||||
v_i = r_i + s*v_f
|
||||
|
||||
# Save r and v vectors for next loop
|
||||
r_f = r_i
|
||||
v_f = v_i
|
||||
s = np.dot(r_1, r_1) / r_0_square
|
||||
v = r_1 + s*v
|
||||
|
||||
|
||||
return x_i, k
|
||||
# Are we close enough?
|
||||
if diff(x_0, x_1) < eps:
|
||||
return x_1, i
|
||||
|
||||
# Running out of iterations
|
||||
if max_iter is not None and max_iter >= i:
|
||||
raise RuntimeError("Did not converge in {} steps".format(max_iter))
|
||||
|
||||
# Set values for next loop
|
||||
x_0 = x_1
|
||||
r_0 = r_1
|
||||
|
|
Reference in a new issue