demo + utils venv
This commit is contained in:
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
+254
@@ -0,0 +1,254 @@
|
||||
"""Test functions for the sparse.linalg._expm_multiply module
|
||||
"""
|
||||
|
||||
from __future__ import division, print_function, absolute_import
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose, assert_, assert_equal
|
||||
from scipy._lib._numpy_compat import suppress_warnings
|
||||
|
||||
from scipy.sparse import SparseEfficiencyWarning
|
||||
import scipy.linalg
|
||||
from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
|
||||
_onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
|
||||
_expm_multiply_interval)
|
||||
|
||||
|
||||
def less_than_or_close(a, b):
|
||||
return np.allclose(a, b) or (a < b)
|
||||
|
||||
|
||||
class TestExpmActionSimple(object):
|
||||
"""
|
||||
These tests do not consider the case of multiple time steps in one call.
|
||||
"""
|
||||
|
||||
def test_theta_monotonicity(self):
|
||||
pairs = sorted(_theta.items())
|
||||
for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
|
||||
assert_(theta_a < theta_b)
|
||||
|
||||
def test_p_max_default(self):
|
||||
m_max = 55
|
||||
expected_p_max = 8
|
||||
observed_p_max = _compute_p_max(m_max)
|
||||
assert_equal(observed_p_max, expected_p_max)
|
||||
|
||||
def test_p_max_range(self):
|
||||
for m_max in range(1, 55+1):
|
||||
p_max = _compute_p_max(m_max)
|
||||
assert_(p_max*(p_max - 1) <= m_max + 1)
|
||||
p_too_big = p_max + 1
|
||||
assert_(p_too_big*(p_too_big - 1) > m_max + 1)
|
||||
|
||||
def test_onenormest_matrix_power(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
for p in range(4):
|
||||
if not p:
|
||||
M = np.identity(n)
|
||||
else:
|
||||
M = np.dot(M, A)
|
||||
estimated = _onenormest_matrix_power(A, p)
|
||||
exact = np.linalg.norm(M, 1)
|
||||
assert_(less_than_or_close(estimated, exact))
|
||||
assert_(less_than_or_close(exact, 3*estimated))
|
||||
|
||||
def test_expm_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
k = 3
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
B = np.random.randn(n, k)
|
||||
observed = expm_multiply(A, B)
|
||||
expected = np.dot(scipy.linalg.expm(A), B)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_matrix_vector_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
v = np.random.randn(n)
|
||||
observed = expm_multiply(A, v)
|
||||
expected = np.dot(scipy.linalg.expm(A), v)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_scaled_expm_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
k = 3
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
for t in (0.2, 1.0, 1.5):
|
||||
with np.errstate(invalid='ignore'):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
B = np.random.randn(n, k)
|
||||
observed = _expm_multiply_simple(A, B, t=t)
|
||||
expected = np.dot(scipy.linalg.expm(t*A), B)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_scaled_expm_multiply_single_timepoint(self):
|
||||
np.random.seed(1234)
|
||||
t = 0.1
|
||||
n = 5
|
||||
k = 2
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, k)
|
||||
observed = _expm_multiply_simple(A, B, t=t)
|
||||
expected = scipy.linalg.expm(t*A).dot(B)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_sparse_expm_multiply(self):
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
k = 3
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = scipy.sparse.rand(n, n, density=0.05)
|
||||
B = np.random.randn(n, k)
|
||||
observed = expm_multiply(A, B)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"splu requires CSC matrix format")
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"spsolve is more efficient when sparse b is in the CSC matrix format")
|
||||
expected = scipy.linalg.expm(A).dot(B)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
def test_complex(self):
|
||||
A = np.array([
|
||||
[1j, 1j],
|
||||
[0, 1j]], dtype=complex)
|
||||
B = np.array([1j, 1j])
|
||||
observed = expm_multiply(A, B)
|
||||
expected = np.array([
|
||||
1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
|
||||
1j * np.exp(1j)], dtype=complex)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
|
||||
class TestExpmActionInterval(object):
|
||||
|
||||
def test_sparse_expm_multiply_interval(self):
|
||||
np.random.seed(1234)
|
||||
start = 0.1
|
||||
stop = 3.2
|
||||
n = 40
|
||||
k = 3
|
||||
endpoint = True
|
||||
for num in (14, 13, 2):
|
||||
A = scipy.sparse.rand(n, n, density=0.05)
|
||||
B = np.random.randn(n, k)
|
||||
v = np.random.randn(n)
|
||||
for target in (B, v):
|
||||
X = expm_multiply(A, target,
|
||||
start=start, stop=stop, num=num, endpoint=endpoint)
|
||||
samples = np.linspace(start=start, stop=stop,
|
||||
num=num, endpoint=endpoint)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"splu requires CSC matrix format")
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"spsolve is more efficient when sparse b is in the CSC matrix format")
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution,
|
||||
scipy.linalg.expm(t*A).dot(target))
|
||||
|
||||
def test_expm_multiply_interval_vector(self):
|
||||
np.random.seed(1234)
|
||||
start = 0.1
|
||||
stop = 3.2
|
||||
endpoint = True
|
||||
for num in (14, 13, 2):
|
||||
for n in (1, 2, 5, 20, 40):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
v = np.random.randn(n)
|
||||
X = expm_multiply(A, v,
|
||||
start=start, stop=stop, num=num, endpoint=endpoint)
|
||||
samples = np.linspace(start=start, stop=stop,
|
||||
num=num, endpoint=endpoint)
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, scipy.linalg.expm(t*A).dot(v))
|
||||
|
||||
def test_expm_multiply_interval_matrix(self):
|
||||
np.random.seed(1234)
|
||||
start = 0.1
|
||||
stop = 3.2
|
||||
endpoint = True
|
||||
for num in (14, 13, 2):
|
||||
for n in (1, 2, 5, 20, 40):
|
||||
for k in (1, 2):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
B = np.random.randn(n, k)
|
||||
X = expm_multiply(A, B,
|
||||
start=start, stop=stop, num=num, endpoint=endpoint)
|
||||
samples = np.linspace(start=start, stop=stop,
|
||||
num=num, endpoint=endpoint)
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
|
||||
|
||||
def test_sparse_expm_multiply_interval_dtypes(self):
|
||||
# Test A & B int
|
||||
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
|
||||
B = np.ones(5, dtype=int)
|
||||
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
|
||||
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
|
||||
|
||||
# Test A complex, B int
|
||||
A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
|
||||
B = np.ones(5, dtype=int)
|
||||
Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
|
||||
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
|
||||
|
||||
# Test A int, B complex
|
||||
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
|
||||
B = 1j*np.ones(5, dtype=complex)
|
||||
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
|
||||
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
|
||||
|
||||
def test_expm_multiply_interval_status_0(self):
|
||||
self._help_test_specific_expm_interval_status(0)
|
||||
|
||||
def test_expm_multiply_interval_status_1(self):
|
||||
self._help_test_specific_expm_interval_status(1)
|
||||
|
||||
def test_expm_multiply_interval_status_2(self):
|
||||
self._help_test_specific_expm_interval_status(2)
|
||||
|
||||
def _help_test_specific_expm_interval_status(self, target_status):
|
||||
np.random.seed(1234)
|
||||
start = 0.1
|
||||
stop = 3.2
|
||||
num = 13
|
||||
endpoint = True
|
||||
n = 5
|
||||
k = 2
|
||||
nrepeats = 10
|
||||
nsuccesses = 0
|
||||
for num in [14, 13, 2] * nrepeats:
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, k)
|
||||
status = _expm_multiply_interval(A, B,
|
||||
start=start, stop=stop, num=num, endpoint=endpoint,
|
||||
status_only=True)
|
||||
if status == target_status:
|
||||
X, status = _expm_multiply_interval(A, B,
|
||||
start=start, stop=stop, num=num, endpoint=endpoint,
|
||||
status_only=False)
|
||||
assert_equal(X.shape, (num, n, k))
|
||||
samples = np.linspace(start=start, stop=stop,
|
||||
num=num, endpoint=endpoint)
|
||||
for solution, t in zip(X, samples):
|
||||
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
|
||||
nsuccesses += 1
|
||||
if not nsuccesses:
|
||||
msg = 'failed to find a status-' + str(target_status) + ' interval'
|
||||
raise Exception(msg)
|
||||
|
||||
@@ -0,0 +1,367 @@
|
||||
"""Test functions for the sparse.linalg.interface module
|
||||
"""
|
||||
|
||||
from __future__ import division, print_function, absolute_import
|
||||
|
||||
from functools import partial
|
||||
from itertools import product
|
||||
import operator
|
||||
import pytest
|
||||
from pytest import raises as assert_raises, warns
|
||||
from numpy.testing import assert_, assert_equal
|
||||
|
||||
import numpy as np
|
||||
import scipy.sparse as sparse
|
||||
|
||||
from scipy.sparse.linalg import interface
|
||||
|
||||
|
||||
# Only test matmul operator (A @ B) when available (Python 3.5+)
|
||||
TEST_MATMUL = hasattr(operator, 'matmul')
|
||||
|
||||
|
||||
class TestLinearOperator(object):
|
||||
def setup_method(self):
|
||||
self.A = np.array([[1,2,3],
|
||||
[4,5,6]])
|
||||
self.B = np.array([[1,2],
|
||||
[3,4],
|
||||
[5,6]])
|
||||
self.C = np.array([[1,2],
|
||||
[3,4]])
|
||||
|
||||
def test_matvec(self):
|
||||
def get_matvecs(A):
|
||||
return [{
|
||||
'shape': A.shape,
|
||||
'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
|
||||
'rmatvec': lambda x: np.dot(A.T.conj(),
|
||||
x).reshape(A.shape[1])
|
||||
},
|
||||
{
|
||||
'shape': A.shape,
|
||||
'matvec': lambda x: np.dot(A, x),
|
||||
'rmatvec': lambda x: np.dot(A.T.conj(), x),
|
||||
'matmat': lambda x: np.dot(A, x)
|
||||
}]
|
||||
|
||||
for matvecs in get_matvecs(self.A):
|
||||
A = interface.LinearOperator(**matvecs)
|
||||
|
||||
assert_(A.args == ())
|
||||
|
||||
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
|
||||
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
|
||||
assert_equal(A * np.array([1,2,3]), [14,32])
|
||||
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
|
||||
assert_equal(A.dot(np.array([1,2,3])), [14,32])
|
||||
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
|
||||
|
||||
assert_equal(A.matvec(np.matrix([[1],[2],[3]])), [[14],[32]])
|
||||
assert_equal(A * np.matrix([[1],[2],[3]]), [[14],[32]])
|
||||
assert_equal(A.dot(np.matrix([[1],[2],[3]])), [[14],[32]])
|
||||
|
||||
assert_equal((2*A)*[1,1,1], [12,30])
|
||||
assert_equal((2*A).rmatvec([1,1]), [10, 14, 18])
|
||||
assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])
|
||||
assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
|
||||
assert_equal((2*A).matmat([[1],[1],[1]]), [[12],[30]])
|
||||
assert_equal((A*2)*[1,1,1], [12,30])
|
||||
assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
|
||||
assert_equal((2j*A)*[1,1,1], [12j,30j])
|
||||
assert_equal((A+A)*[1,1,1], [12, 30])
|
||||
assert_equal((A+A).rmatvec([1,1]), [10, 14, 18])
|
||||
assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])
|
||||
assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
|
||||
assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
|
||||
assert_equal((-A)*[1,1,1], [-6,-15])
|
||||
assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
|
||||
assert_equal((A-A)*[1,1,1], [0,0])
|
||||
assert_equal((A-A)*[[1],[1],[1]], [[0],[0]])
|
||||
|
||||
z = A+A
|
||||
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
|
||||
z = 2*A
|
||||
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
|
||||
|
||||
assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))
|
||||
assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
|
||||
assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
|
||||
assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
|
||||
assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
|
||||
assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
|
||||
|
||||
assert_(isinstance(A.matvec(np.matrix([[1],[2],[3]])), np.ndarray))
|
||||
assert_(isinstance(A * np.matrix([[1],[2],[3]]), np.ndarray))
|
||||
assert_(isinstance(A.dot(np.matrix([[1],[2],[3]])), np.ndarray))
|
||||
|
||||
assert_(isinstance(2*A, interface._ScaledLinearOperator))
|
||||
assert_(isinstance(2j*A, interface._ScaledLinearOperator))
|
||||
assert_(isinstance(A+A, interface._SumLinearOperator))
|
||||
assert_(isinstance(-A, interface._ScaledLinearOperator))
|
||||
assert_(isinstance(A-A, interface._SumLinearOperator))
|
||||
|
||||
assert_((2j*A).dtype == np.complex_)
|
||||
|
||||
assert_raises(ValueError, A.matvec, np.array([1,2]))
|
||||
assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
|
||||
assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
|
||||
assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
|
||||
|
||||
assert_raises(ValueError, lambda: A*A)
|
||||
assert_raises(ValueError, lambda: A**2)
|
||||
|
||||
for matvecsA, matvecsB in product(get_matvecs(self.A),
|
||||
get_matvecs(self.B)):
|
||||
A = interface.LinearOperator(**matvecsA)
|
||||
B = interface.LinearOperator(**matvecsB)
|
||||
|
||||
assert_equal((A*B)*[1,1], [50,113])
|
||||
assert_equal((A*B)*[[1],[1]], [[50],[113]])
|
||||
assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
|
||||
|
||||
assert_equal((A*B).rmatvec([1,1]), [71,92])
|
||||
assert_equal((A*B).H.matvec([1,1]), [71,92])
|
||||
|
||||
assert_(isinstance(A*B, interface._ProductLinearOperator))
|
||||
|
||||
assert_raises(ValueError, lambda: A+B)
|
||||
assert_raises(ValueError, lambda: A**2)
|
||||
|
||||
z = A*B
|
||||
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
|
||||
|
||||
for matvecsC in get_matvecs(self.C):
|
||||
C = interface.LinearOperator(**matvecsC)
|
||||
|
||||
assert_equal((C**2)*[1,1], [17,37])
|
||||
assert_equal((C**2).rmatvec([1,1]), [22,32])
|
||||
assert_equal((C**2).H.matvec([1,1]), [22,32])
|
||||
assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
|
||||
|
||||
assert_(isinstance(C**2, interface._PowerLinearOperator))
|
||||
|
||||
def test_matmul(self):
|
||||
if not TEST_MATMUL:
|
||||
pytest.skip("matmul is only tested in Python 3.5+")
|
||||
|
||||
D = {'shape': self.A.shape,
|
||||
'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),
|
||||
'rmatvec': lambda x: np.dot(self.A.T.conj(),
|
||||
x).reshape(self.A.shape[1]),
|
||||
'matmat': lambda x: np.dot(self.A, x)}
|
||||
A = interface.LinearOperator(**D)
|
||||
B = np.array([[1, 2, 3],
|
||||
[4, 5, 6],
|
||||
[7, 8, 9]])
|
||||
b = B[0]
|
||||
|
||||
assert_equal(operator.matmul(A, b), A * b)
|
||||
assert_equal(operator.matmul(A, B), A * B)
|
||||
assert_raises(ValueError, operator.matmul, A, 2)
|
||||
assert_raises(ValueError, operator.matmul, 2, A)
|
||||
|
||||
|
||||
class TestAsLinearOperator(object):
|
||||
def setup_method(self):
|
||||
self.cases = []
|
||||
|
||||
def make_cases(dtype):
|
||||
self.cases.append(np.matrix([[1,2,3],[4,5,6]], dtype=dtype))
|
||||
self.cases.append(np.array([[1,2,3],[4,5,6]], dtype=dtype))
|
||||
self.cases.append(sparse.csr_matrix([[1,2,3],[4,5,6]], dtype=dtype))
|
||||
|
||||
# Test default implementations of _adjoint and _rmatvec, which
|
||||
# refer to each other.
|
||||
def mv(x, dtype):
|
||||
y = np.array([1 * x[0] + 2 * x[1] + 3 * x[2],
|
||||
4 * x[0] + 5 * x[1] + 6 * x[2]], dtype=dtype)
|
||||
if len(x.shape) == 2:
|
||||
y = y.reshape(-1, 1)
|
||||
return y
|
||||
|
||||
def rmv(x, dtype):
|
||||
return np.array([1 * x[0] + 4 * x[1],
|
||||
2 * x[0] + 5 * x[1],
|
||||
3 * x[0] + 6 * x[1]], dtype=dtype)
|
||||
|
||||
class BaseMatlike(interface.LinearOperator):
|
||||
def __init__(self, dtype):
|
||||
self.dtype = np.dtype(dtype)
|
||||
self.shape = (2,3)
|
||||
|
||||
def _matvec(self, x):
|
||||
return mv(x, self.dtype)
|
||||
|
||||
class HasRmatvec(BaseMatlike):
|
||||
def _rmatvec(self,x):
|
||||
return rmv(x, self.dtype)
|
||||
|
||||
class HasAdjoint(BaseMatlike):
|
||||
def _adjoint(self):
|
||||
shape = self.shape[1], self.shape[0]
|
||||
matvec = partial(rmv, dtype=self.dtype)
|
||||
rmatvec = partial(mv, dtype=self.dtype)
|
||||
return interface.LinearOperator(matvec=matvec,
|
||||
rmatvec=rmatvec,
|
||||
dtype=self.dtype,
|
||||
shape=shape)
|
||||
|
||||
self.cases.append(HasRmatvec(dtype))
|
||||
self.cases.append(HasAdjoint(dtype))
|
||||
|
||||
make_cases('int32')
|
||||
make_cases('float32')
|
||||
make_cases('float64')
|
||||
|
||||
def test_basic(self):
|
||||
|
||||
for M in self.cases:
|
||||
A = interface.aslinearoperator(M)
|
||||
M,N = A.shape
|
||||
|
||||
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
|
||||
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
|
||||
|
||||
assert_equal(A * np.array([1,2,3]), [14,32])
|
||||
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
|
||||
|
||||
assert_equal(A.rmatvec(np.array([1,2])), [9,12,15])
|
||||
assert_equal(A.rmatvec(np.array([[1],[2]])), [[9],[12],[15]])
|
||||
assert_equal(A.H.matvec(np.array([1,2])), [9,12,15])
|
||||
assert_equal(A.H.matvec(np.array([[1],[2]])), [[9],[12],[15]])
|
||||
|
||||
assert_equal(
|
||||
A.matmat(np.array([[1,4],[2,5],[3,6]])),
|
||||
[[14,32],[32,77]])
|
||||
|
||||
assert_equal(A * np.array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]])
|
||||
|
||||
if hasattr(M,'dtype'):
|
||||
assert_equal(A.dtype, M.dtype)
|
||||
|
||||
def test_dot(self):
|
||||
|
||||
for M in self.cases:
|
||||
A = interface.aslinearoperator(M)
|
||||
M,N = A.shape
|
||||
|
||||
assert_equal(A.dot(np.array([1,2,3])), [14,32])
|
||||
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
|
||||
|
||||
assert_equal(
|
||||
A.dot(np.array([[1,4],[2,5],[3,6]])),
|
||||
[[14,32],[32,77]])
|
||||
|
||||
|
||||
def test_repr():
|
||||
A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)
|
||||
repr_A = repr(A)
|
||||
assert_('unspecified dtype' not in repr_A, repr_A)
|
||||
|
||||
|
||||
def test_identity():
|
||||
ident = interface.IdentityOperator((3, 3))
|
||||
assert_equal(ident * [1, 2, 3], [1, 2, 3])
|
||||
assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))
|
||||
|
||||
assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])
|
||||
|
||||
|
||||
def test_attributes():
|
||||
A = interface.aslinearoperator(np.arange(16).reshape(4, 4))
|
||||
|
||||
def always_four_ones(x):
|
||||
x = np.asarray(x)
|
||||
assert_(x.shape == (3,) or x.shape == (3, 1))
|
||||
return np.ones(4)
|
||||
|
||||
B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)
|
||||
|
||||
for op in [A, B, A * B, A.H, A + A, B + B, A ** 4]:
|
||||
assert_(hasattr(op, "dtype"))
|
||||
assert_(hasattr(op, "shape"))
|
||||
assert_(hasattr(op, "_matvec"))
|
||||
|
||||
def matvec(x):
|
||||
""" Needed for test_pickle as local functions are not pickleable """
|
||||
return np.zeros(3)
|
||||
|
||||
def test_pickle():
|
||||
import pickle
|
||||
|
||||
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
|
||||
A = interface.LinearOperator((3, 3), matvec)
|
||||
s = pickle.dumps(A, protocol=protocol)
|
||||
B = pickle.loads(s)
|
||||
|
||||
for k in A.__dict__:
|
||||
assert_equal(getattr(A, k), getattr(B, k))
|
||||
|
||||
def test_inheritance():
|
||||
class Empty(interface.LinearOperator):
|
||||
pass
|
||||
|
||||
with warns(RuntimeWarning, match="should implement at least"):
|
||||
assert_raises(TypeError, Empty)
|
||||
|
||||
class Identity(interface.LinearOperator):
|
||||
def __init__(self, n):
|
||||
super(Identity, self).__init__(dtype=None, shape=(n, n))
|
||||
|
||||
def _matvec(self, x):
|
||||
return x
|
||||
|
||||
id3 = Identity(3)
|
||||
assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
|
||||
assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
|
||||
|
||||
class MatmatOnly(interface.LinearOperator):
|
||||
def __init__(self, A):
|
||||
super(MatmatOnly, self).__init__(A.dtype, A.shape)
|
||||
self.A = A
|
||||
|
||||
def _matmat(self, x):
|
||||
return self.A.dot(x)
|
||||
|
||||
mm = MatmatOnly(np.random.randn(5, 3))
|
||||
assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))
|
||||
|
||||
def test_dtypes_of_operator_sum():
|
||||
# gh-6078
|
||||
|
||||
mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
|
||||
mat_real = np.random.rand(2,2)
|
||||
|
||||
complex_operator = interface.aslinearoperator(mat_complex)
|
||||
real_operator = interface.aslinearoperator(mat_real)
|
||||
|
||||
sum_complex = complex_operator + complex_operator
|
||||
sum_real = real_operator + real_operator
|
||||
|
||||
assert_equal(sum_real.dtype, np.float64)
|
||||
assert_equal(sum_complex.dtype, np.complex128)
|
||||
|
||||
def test_no_double_init():
|
||||
call_count = [0]
|
||||
|
||||
def matvec(v):
|
||||
call_count[0] += 1
|
||||
return v
|
||||
|
||||
# It should call matvec exactly once (in order to determine the
|
||||
# operator dtype)
|
||||
A = interface.LinearOperator((2, 2), matvec=matvec)
|
||||
assert_equal(call_count[0], 1)
|
||||
|
||||
def test_adjoint_conjugate():
|
||||
X = np.array([[1j]])
|
||||
A = interface.aslinearoperator(X)
|
||||
|
||||
B = 1j * A
|
||||
Y = 1j * X
|
||||
|
||||
v = np.array([1])
|
||||
|
||||
assert_equal(B.dot(v), Y.dot(v))
|
||||
assert_equal(B.H.dot(v), Y.T.conj().dot(v))
|
||||
@@ -0,0 +1,556 @@
|
||||
#
|
||||
# Created by: Pearu Peterson, March 2002
|
||||
#
|
||||
""" Test functions for scipy.linalg.matfuncs module
|
||||
|
||||
"""
|
||||
from __future__ import division, print_function, absolute_import
|
||||
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
from numpy import array, eye, exp, random
|
||||
from numpy.linalg import matrix_power
|
||||
from numpy.testing import (
|
||||
assert_allclose, assert_, assert_array_almost_equal, assert_equal,
|
||||
assert_array_almost_equal_nulp)
|
||||
from scipy._lib._numpy_compat import suppress_warnings
|
||||
|
||||
from scipy.sparse import csc_matrix, SparseEfficiencyWarning
|
||||
from scipy.sparse.construct import eye as speye
|
||||
from scipy.sparse.linalg.matfuncs import (expm, _expm,
|
||||
ProductOperator, MatrixPowerOperator,
|
||||
_onenorm_matrix_power_nnm)
|
||||
from scipy.linalg import logm
|
||||
from scipy.special import factorial, binom
|
||||
import scipy.sparse
|
||||
import scipy.sparse.linalg
|
||||
|
||||
|
||||
def _burkardt_13_power(n, p):
|
||||
"""
|
||||
A helper function for testing matrix functions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : integer greater than 1
|
||||
Order of the square matrix to be returned.
|
||||
p : non-negative integer
|
||||
Power of the matrix.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray representing a square matrix
|
||||
A Forsythe matrix of order n, raised to the power p.
|
||||
|
||||
"""
|
||||
# Input validation.
|
||||
if n != int(n) or n < 2:
|
||||
raise ValueError('n must be an integer greater than 1')
|
||||
n = int(n)
|
||||
if p != int(p) or p < 0:
|
||||
raise ValueError('p must be a non-negative integer')
|
||||
p = int(p)
|
||||
|
||||
# Construct the matrix explicitly.
|
||||
a, b = divmod(p, n)
|
||||
large = np.power(10.0, -n*a)
|
||||
small = large * np.power(10.0, -n)
|
||||
return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)
|
||||
|
||||
|
||||
def test_onenorm_matrix_power_nnm():
|
||||
np.random.seed(1234)
|
||||
for n in range(1, 5):
|
||||
for p in range(5):
|
||||
M = np.random.random((n, n))
|
||||
Mp = np.linalg.matrix_power(M, p)
|
||||
observed = _onenorm_matrix_power_nnm(M, p)
|
||||
expected = np.linalg.norm(Mp, 1)
|
||||
assert_allclose(observed, expected)
|
||||
|
||||
|
||||
class TestExpM(object):
|
||||
def test_zero_ndarray(self):
|
||||
a = array([[0.,0],[0,0]])
|
||||
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
|
||||
|
||||
def test_zero_sparse(self):
|
||||
a = csc_matrix([[0.,0],[0,0]])
|
||||
assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])
|
||||
|
||||
def test_zero_matrix(self):
|
||||
a = np.matrix([[0.,0],[0,0]])
|
||||
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
|
||||
|
||||
def test_misc_types(self):
|
||||
A = expm(np.array([[1]]))
|
||||
assert_allclose(expm(((1,),)), A)
|
||||
assert_allclose(expm([[1]]), A)
|
||||
assert_allclose(expm(np.matrix([[1]])), A)
|
||||
assert_allclose(expm(np.array([[1]])), A)
|
||||
assert_allclose(expm(csc_matrix([[1]])).A, A)
|
||||
B = expm(np.array([[1j]]))
|
||||
assert_allclose(expm(((1j,),)), B)
|
||||
assert_allclose(expm([[1j]]), B)
|
||||
assert_allclose(expm(np.matrix([[1j]])), B)
|
||||
assert_allclose(expm(csc_matrix([[1j]])).A, B)
|
||||
|
||||
def test_bidiagonal_sparse(self):
|
||||
A = csc_matrix([
|
||||
[1, 3, 0],
|
||||
[0, 1, 5],
|
||||
[0, 0, 2]], dtype=float)
|
||||
e1 = math.exp(1)
|
||||
e2 = math.exp(2)
|
||||
expected = np.array([
|
||||
[e1, 3*e1, 15*(e2 - 2*e1)],
|
||||
[0, e1, 5*(e2 - e1)],
|
||||
[0, 0, e2]], dtype=float)
|
||||
observed = expm(A).toarray()
|
||||
assert_array_almost_equal(observed, expected)
|
||||
|
||||
def test_padecases_dtype_float(self):
|
||||
for dtype in [np.float32, np.float64]:
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
A = scale * eye(3, dtype=dtype)
|
||||
observed = expm(A)
|
||||
expected = exp(scale) * eye(3, dtype=dtype)
|
||||
assert_array_almost_equal_nulp(observed, expected, nulp=100)
|
||||
|
||||
def test_padecases_dtype_complex(self):
|
||||
for dtype in [np.complex64, np.complex128]:
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
A = scale * eye(3, dtype=dtype)
|
||||
observed = expm(A)
|
||||
expected = exp(scale) * eye(3, dtype=dtype)
|
||||
assert_array_almost_equal_nulp(observed, expected, nulp=100)
|
||||
|
||||
def test_padecases_dtype_sparse_float(self):
|
||||
# float32 and complex64 lead to errors in spsolve/UMFpack
|
||||
dtype = np.float64
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
a = scale * speye(3, 3, dtype=dtype, format='csc')
|
||||
e = exp(scale) * eye(3, dtype=dtype)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"Changing the sparsity structure of a csc_matrix is expensive.")
|
||||
exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()
|
||||
inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()
|
||||
assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)
|
||||
assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
|
||||
|
||||
def test_padecases_dtype_sparse_complex(self):
|
||||
# float32 and complex64 lead to errors in spsolve/UMFpack
|
||||
dtype = np.complex128
|
||||
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
|
||||
a = scale * speye(3, 3, dtype=dtype, format='csc')
|
||||
e = exp(scale) * eye(3, dtype=dtype)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning,
|
||||
"Changing the sparsity structure of a csc_matrix is expensive.")
|
||||
assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
|
||||
|
||||
def test_logm_consistency(self):
|
||||
random.seed(1234)
|
||||
for dtype in [np.float64, np.complex128]:
|
||||
for n in range(1, 10):
|
||||
for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
|
||||
# make logm(A) be of a given scale
|
||||
A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
|
||||
if np.iscomplexobj(A):
|
||||
A = A + 1j * random.rand(n, n) * scale
|
||||
assert_array_almost_equal(expm(logm(A)), A)
|
||||
|
||||
def test_integer_matrix(self):
|
||||
Q = np.array([
|
||||
[-3, 1, 1, 1],
|
||||
[1, -3, 1, 1],
|
||||
[1, 1, -3, 1],
|
||||
[1, 1, 1, -3]])
|
||||
assert_allclose(expm(Q), expm(1.0 * Q))
|
||||
|
||||
def test_integer_matrix_2(self):
|
||||
# Check for integer overflows
|
||||
Q = np.array([[-500, 500, 0, 0],
|
||||
[0, -550, 360, 190],
|
||||
[0, 630, -630, 0],
|
||||
[0, 0, 0, 0]], dtype=np.int16)
|
||||
assert_allclose(expm(Q), expm(1.0 * Q))
|
||||
|
||||
Q = csc_matrix(Q)
|
||||
assert_allclose(expm(Q).A, expm(1.0 * Q).A)
|
||||
|
||||
def test_triangularity_perturbation(self):
|
||||
# Experiment (1) of
|
||||
# Awad H. Al-Mohy and Nicholas J. Higham (2012)
|
||||
# Improved Inverse Scaling and Squaring Algorithms
|
||||
# for the Matrix Logarithm.
|
||||
A = np.array([
|
||||
[3.2346e-1, 3e4, 3e4, 3e4],
|
||||
[0, 3.0089e-1, 3e4, 3e4],
|
||||
[0, 0, 3.221e-1, 3e4],
|
||||
[0, 0, 0, 3.0744e-1]],
|
||||
dtype=float)
|
||||
A_logm = np.array([
|
||||
[-1.12867982029050462e+00, 9.61418377142025565e+04,
|
||||
-4.52485573953179264e+09, 2.92496941103871812e+14],
|
||||
[0.00000000000000000e+00, -1.20101052953082288e+00,
|
||||
9.63469687211303099e+04, -4.68104828911105442e+09],
|
||||
[0.00000000000000000e+00, 0.00000000000000000e+00,
|
||||
-1.13289322264498393e+00, 9.53249183094775653e+04],
|
||||
[0.00000000000000000e+00, 0.00000000000000000e+00,
|
||||
0.00000000000000000e+00, -1.17947533272554850e+00]],
|
||||
dtype=float)
|
||||
assert_allclose(expm(A_logm), A, rtol=1e-4)
|
||||
|
||||
# Perturb the upper triangular matrix by tiny amounts,
|
||||
# so that it becomes technically not upper triangular.
|
||||
random.seed(1234)
|
||||
tiny = 1e-17
|
||||
A_logm_perturbed = A_logm.copy()
|
||||
A_logm_perturbed[1, 0] = tiny
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, "Ill-conditioned.*")
|
||||
A_expm_logm_perturbed = expm(A_logm_perturbed)
|
||||
rtol = 1e-4
|
||||
atol = 100 * tiny
|
||||
assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
|
||||
|
||||
def test_burkardt_1(self):
|
||||
# This matrix is diagonal.
|
||||
# The calculation of the matrix exponential is simple.
|
||||
#
|
||||
# This is the first of a series of matrix exponential tests
|
||||
# collected by John Burkardt from the following sources.
|
||||
#
|
||||
# Alan Laub,
|
||||
# Review of "Linear System Theory" by Joao Hespanha,
|
||||
# SIAM Review,
|
||||
# Volume 52, Number 4, December 2010, pages 779--781.
|
||||
#
|
||||
# Cleve Moler and Charles Van Loan,
|
||||
# Nineteen Dubious Ways to Compute the Exponential of a Matrix,
|
||||
# Twenty-Five Years Later,
|
||||
# SIAM Review,
|
||||
# Volume 45, Number 1, March 2003, pages 3--49.
|
||||
#
|
||||
# Cleve Moler,
|
||||
# Cleve's Corner: A Balancing Act for the Matrix Exponential,
|
||||
# 23 July 2012.
|
||||
#
|
||||
# Robert Ward,
|
||||
# Numerical computation of the matrix exponential
|
||||
# with accuracy estimate,
|
||||
# SIAM Journal on Numerical Analysis,
|
||||
# Volume 14, Number 4, September 1977, pages 600--610.
|
||||
exp1 = np.exp(1)
|
||||
exp2 = np.exp(2)
|
||||
A = np.array([
|
||||
[1, 0],
|
||||
[0, 2],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[exp1, 0],
|
||||
[0, exp2],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_2(self):
|
||||
# This matrix is symmetric.
|
||||
# The calculation of the matrix exponential is straightforward.
|
||||
A = np.array([
|
||||
[1, 3],
|
||||
[3, 2],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[39.322809708033859, 46.166301438885753],
|
||||
[46.166301438885768, 54.711576854329110],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_3(self):
|
||||
# This example is due to Laub.
|
||||
# This matrix is ill-suited for the Taylor series approach.
|
||||
# As powers of A are computed, the entries blow up too quickly.
|
||||
exp1 = np.exp(1)
|
||||
exp39 = np.exp(39)
|
||||
A = np.array([
|
||||
[0, 1],
|
||||
[-39, -40],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[
|
||||
39/(38*exp1) - 1/(38*exp39),
|
||||
-np.expm1(-38) / (38*exp1)],
|
||||
[
|
||||
39*np.expm1(-38) / (38*exp1),
|
||||
-1/(38*exp1) + 39/(38*exp39)],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_4(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# The example will cause problems for the series summation approach,
|
||||
# as well as for diagonal Pade approximations.
|
||||
A = np.array([
|
||||
[-49, 24],
|
||||
[-64, 31],
|
||||
], dtype=float)
|
||||
U = np.array([[3, 1], [4, 2]], dtype=float)
|
||||
V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)
|
||||
w = np.array([-17, -1], dtype=float)
|
||||
desired = np.dot(U * np.exp(w), V)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_5(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# This matrix is strictly upper triangular
|
||||
# All powers of A are zero beyond some (low) limit.
|
||||
# This example will cause problems for Pade approximations.
|
||||
A = np.array([
|
||||
[0, 6, 0, 0],
|
||||
[0, 0, 6, 0],
|
||||
[0, 0, 0, 6],
|
||||
[0, 0, 0, 0],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[1, 6, 18, 36],
|
||||
[0, 1, 6, 18],
|
||||
[0, 0, 1, 6],
|
||||
[0, 0, 0, 1],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_6(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# This matrix does not have a complete set of eigenvectors.
|
||||
# That means the eigenvector approach will fail.
|
||||
exp1 = np.exp(1)
|
||||
A = np.array([
|
||||
[1, 1],
|
||||
[0, 1],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[exp1, exp1],
|
||||
[0, exp1],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_7(self):
|
||||
# This example is due to Moler and Van Loan.
|
||||
# This matrix is very close to example 5.
|
||||
# Mathematically, it has a complete set of eigenvectors.
|
||||
# Numerically, however, the calculation will be suspect.
|
||||
exp1 = np.exp(1)
|
||||
eps = np.spacing(1)
|
||||
A = np.array([
|
||||
[1 + eps, 1],
|
||||
[0, 1 - eps],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[exp1, exp1],
|
||||
[0, exp1],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_8(self):
|
||||
# This matrix was an example in Wikipedia.
|
||||
exp4 = np.exp(4)
|
||||
exp16 = np.exp(16)
|
||||
A = np.array([
|
||||
[21, 17, 6],
|
||||
[-5, -1, -6],
|
||||
[4, 4, 16],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],
|
||||
[-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],
|
||||
[16*exp16, 16*exp16, 4*exp16],
|
||||
], dtype=float) * 0.25
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_9(self):
|
||||
# This matrix is due to the NAG Library.
|
||||
# It is an example for function F01ECF.
|
||||
A = np.array([
|
||||
[1, 2, 2, 2],
|
||||
[3, 1, 1, 2],
|
||||
[3, 2, 1, 2],
|
||||
[3, 3, 3, 1],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[740.7038, 610.8500, 542.2743, 549.1753],
|
||||
[731.2510, 603.5524, 535.0884, 542.2743],
|
||||
[823.7630, 679.4257, 603.5524, 610.8500],
|
||||
[998.4355, 823.7630, 731.2510, 740.7038],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_10(self):
|
||||
# This is Ward's example #1.
|
||||
# It is defective and nonderogatory.
|
||||
A = np.array([
|
||||
[4, 2, 0],
|
||||
[1, 4, 1],
|
||||
[1, 1, 4],
|
||||
], dtype=float)
|
||||
assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))
|
||||
desired = np.array([
|
||||
[147.8666224463699, 183.7651386463682, 71.79703239999647],
|
||||
[127.7810855231823, 183.7651386463682, 91.88256932318415],
|
||||
[127.7810855231824, 163.6796017231806, 111.9681062463718],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_11(self):
|
||||
# This is Ward's example #2.
|
||||
# It is a symmetric matrix.
|
||||
A = np.array([
|
||||
[29.87942128909879, 0.7815750847907159, -2.289519314033932],
|
||||
[0.7815750847907159, 25.72656945571064, 8.680737820540137],
|
||||
[-2.289519314033932, 8.680737820540137, 34.39400925519054],
|
||||
], dtype=float)
|
||||
assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))
|
||||
desired = np.array([
|
||||
[
|
||||
5.496313853692378E+15,
|
||||
-1.823188097200898E+16,
|
||||
-3.047577080858001E+16],
|
||||
[
|
||||
-1.823188097200899E+16,
|
||||
6.060522870222108E+16,
|
||||
1.012918429302482E+17],
|
||||
[
|
||||
-3.047577080858001E+16,
|
||||
1.012918429302482E+17,
|
||||
1.692944112408493E+17],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_12(self):
|
||||
# This is Ward's example #3.
|
||||
# Ward's algorithm has difficulty estimating the accuracy
|
||||
# of its results.
|
||||
A = np.array([
|
||||
[-131, 19, 18],
|
||||
[-390, 56, 54],
|
||||
[-387, 57, 52],
|
||||
], dtype=float)
|
||||
assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))
|
||||
desired = np.array([
|
||||
[-1.509644158793135, 0.3678794391096522, 0.1353352811751005],
|
||||
[-5.632570799891469, 1.471517758499875, 0.4060058435250609],
|
||||
[-4.934938326088363, 1.103638317328798, 0.5413411267617766],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_13(self):
|
||||
# This is Ward's example #4.
|
||||
# This is a version of the Forsythe matrix.
|
||||
# The eigenvector problem is badly conditioned.
|
||||
# Ward's algorithm has difficulty esimating the accuracy
|
||||
# of its results for this problem.
|
||||
#
|
||||
# Check the construction of one instance of this family of matrices.
|
||||
A4_actual = _burkardt_13_power(4, 1)
|
||||
A4_desired = [[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[1e-4, 0, 0, 0]]
|
||||
assert_allclose(A4_actual, A4_desired)
|
||||
# Check the expm for a few instances.
|
||||
for n in (2, 3, 4, 10):
|
||||
# Approximate expm using Taylor series.
|
||||
# This works well for this matrix family
|
||||
# because each matrix in the summation,
|
||||
# even before dividing by the factorial,
|
||||
# is entrywise positive with max entry 10**(-floor(p/n)*n).
|
||||
k = max(1, int(np.ceil(16/n)))
|
||||
desired = np.zeros((n, n), dtype=float)
|
||||
for p in range(n*k):
|
||||
Ap = _burkardt_13_power(n, p)
|
||||
assert_equal(np.min(Ap), 0)
|
||||
assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
|
||||
desired += Ap / factorial(p)
|
||||
actual = expm(_burkardt_13_power(n, 1))
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_burkardt_14(self):
|
||||
# This is Moler's example.
|
||||
# This badly scaled matrix caused problems for MATLAB's expm().
|
||||
A = np.array([
|
||||
[0, 1e-8, 0],
|
||||
[-(2e10 + 4e8/6.), -3, 2e10],
|
||||
[200./3., 0, -200./3.],
|
||||
], dtype=float)
|
||||
desired = np.array([
|
||||
[0.446849468283175, 1.54044157383952e-09, 0.462811453558774],
|
||||
[-5743067.77947947, -0.0152830038686819, -4526542.71278401],
|
||||
[0.447722977849494, 1.54270484519591e-09, 0.463480648837651],
|
||||
], dtype=float)
|
||||
actual = expm(A)
|
||||
assert_allclose(actual, desired)
|
||||
|
||||
def test_pascal(self):
|
||||
# Test pascal triangle.
|
||||
# Nilpotent exponential, used to trigger a failure (gh-8029)
|
||||
|
||||
for scale in [1.0, 1e-3, 1e-6]:
|
||||
for n in range(120):
|
||||
A = np.diag(np.arange(1, n + 1), -1) * scale
|
||||
B = expm(A)
|
||||
|
||||
sc = scale**np.arange(n, -1, -1)
|
||||
if np.any(sc < 1e-300):
|
||||
continue
|
||||
|
||||
got = B
|
||||
expected = binom(np.arange(n + 1)[:,None],
|
||||
np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
|
||||
err = abs(expected - got).max()
|
||||
atol = 1e-13 * abs(expected).max()
|
||||
assert_allclose(got, expected, atol=atol)
|
||||
|
||||
|
||||
class TestOperators(object):
|
||||
|
||||
def test_product_operator(self):
|
||||
random.seed(1234)
|
||||
n = 5
|
||||
k = 2
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, n)
|
||||
C = np.random.randn(n, n)
|
||||
D = np.random.randn(n, k)
|
||||
op = ProductOperator(A, B, C)
|
||||
assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))
|
||||
assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))
|
||||
|
||||
def test_matrix_power_operator(self):
|
||||
random.seed(1234)
|
||||
n = 5
|
||||
k = 2
|
||||
p = 3
|
||||
nsamples = 10
|
||||
for i in range(nsamples):
|
||||
A = np.random.randn(n, n)
|
||||
B = np.random.randn(n, k)
|
||||
op = MatrixPowerOperator(A, p)
|
||||
assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))
|
||||
assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))
|
||||
|
||||
@@ -0,0 +1,127 @@
|
||||
"""Test functions for the sparse.linalg.norm module
|
||||
"""
|
||||
|
||||
from __future__ import division, print_function, absolute_import
|
||||
|
||||
import numpy as np
|
||||
from numpy.linalg import norm as npnorm
|
||||
from numpy.testing import assert_equal, assert_allclose
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy._lib._version import NumpyVersion
|
||||
import scipy.sparse
|
||||
from scipy.sparse.linalg import norm as spnorm
|
||||
|
||||
|
||||
class TestNorm(object):
|
||||
def setup_method(self):
|
||||
a = np.arange(9) - 4
|
||||
b = a.reshape((3, 3))
|
||||
self.b = scipy.sparse.csr_matrix(b)
|
||||
|
||||
def test_matrix_norm(self):
|
||||
|
||||
# Frobenius norm is the default
|
||||
assert_allclose(spnorm(self.b), 7.745966692414834)
|
||||
assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834)
|
||||
|
||||
assert_allclose(spnorm(self.b, np.inf), 9)
|
||||
assert_allclose(spnorm(self.b, -np.inf), 2)
|
||||
assert_allclose(spnorm(self.b, 1), 7)
|
||||
assert_allclose(spnorm(self.b, -1), 6)
|
||||
|
||||
# _multi_svd_norm is not implemented for sparse matrix
|
||||
assert_raises(NotImplementedError, spnorm, self.b, 2)
|
||||
assert_raises(NotImplementedError, spnorm, self.b, -2)
|
||||
|
||||
def test_matrix_norm_axis(self):
|
||||
for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))):
|
||||
assert_allclose(spnorm(m, axis=axis), 7.745966692414834)
|
||||
assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834)
|
||||
assert_allclose(spnorm(m, np.inf, axis=axis), 9)
|
||||
assert_allclose(spnorm(m, -np.inf, axis=axis), 2)
|
||||
assert_allclose(spnorm(m, 1, axis=axis), 7)
|
||||
assert_allclose(spnorm(m, -1, axis=axis), 6)
|
||||
|
||||
def test_vector_norm(self):
|
||||
v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398]
|
||||
for m, a in (self.b, 0), (self.b.T, 1):
|
||||
for axis in a, (a, ), a-2, (a-2, ):
|
||||
assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7])
|
||||
assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4])
|
||||
assert_allclose(spnorm(m, axis=axis), v)
|
||||
assert_allclose(spnorm(m, ord=2, axis=axis), v)
|
||||
assert_allclose(spnorm(m, ord=None, axis=axis), v)
|
||||
|
||||
def test_norm_exceptions(self):
|
||||
m = self.b
|
||||
assert_raises(TypeError, spnorm, m, None, 1.5)
|
||||
assert_raises(TypeError, spnorm, m, None, [2])
|
||||
assert_raises(ValueError, spnorm, m, None, ())
|
||||
assert_raises(ValueError, spnorm, m, None, (0, 1, 2))
|
||||
assert_raises(ValueError, spnorm, m, None, (0, 0))
|
||||
assert_raises(ValueError, spnorm, m, None, (0, 2))
|
||||
assert_raises(ValueError, spnorm, m, None, (-3, 0))
|
||||
assert_raises(ValueError, spnorm, m, None, 2)
|
||||
assert_raises(ValueError, spnorm, m, None, -3)
|
||||
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0)
|
||||
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1))
|
||||
|
||||
|
||||
class TestVsNumpyNorm(object):
|
||||
_sparse_types = (
|
||||
scipy.sparse.bsr_matrix,
|
||||
scipy.sparse.coo_matrix,
|
||||
scipy.sparse.csc_matrix,
|
||||
scipy.sparse.csr_matrix,
|
||||
scipy.sparse.dia_matrix,
|
||||
scipy.sparse.dok_matrix,
|
||||
scipy.sparse.lil_matrix,
|
||||
)
|
||||
_test_matrices = (
|
||||
(np.arange(9) - 4).reshape((3, 3)),
|
||||
[
|
||||
[1, 2, 3],
|
||||
[-1, 1, 4]],
|
||||
[
|
||||
[1, 0, 3],
|
||||
[-1, 1, 4j]],
|
||||
)
|
||||
|
||||
def test_sparse_matrix_norms(self):
|
||||
for sparse_type in self._sparse_types:
|
||||
for M in self._test_matrices:
|
||||
S = sparse_type(M)
|
||||
assert_allclose(spnorm(S), npnorm(M))
|
||||
assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
|
||||
assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
|
||||
assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
|
||||
assert_allclose(spnorm(S, 1), npnorm(M, 1))
|
||||
assert_allclose(spnorm(S, -1), npnorm(M, -1))
|
||||
|
||||
def test_sparse_matrix_norms_with_axis(self):
|
||||
for sparse_type in self._sparse_types:
|
||||
for M in self._test_matrices:
|
||||
S = sparse_type(M)
|
||||
for axis in None, (0, 1), (1, 0):
|
||||
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
|
||||
for ord in 'fro', np.inf, -np.inf, 1, -1:
|
||||
assert_allclose(spnorm(S, ord, axis=axis),
|
||||
npnorm(M, ord, axis=axis))
|
||||
# Some numpy matrix norms are allergic to negative axes.
|
||||
for axis in (-2, -1), (-1, -2), (1, -2):
|
||||
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
|
||||
assert_allclose(spnorm(S, 'f', axis=axis),
|
||||
npnorm(M, 'f', axis=axis))
|
||||
assert_allclose(spnorm(S, 'fro', axis=axis),
|
||||
npnorm(M, 'fro', axis=axis))
|
||||
|
||||
def test_sparse_vector_norms(self):
|
||||
for sparse_type in self._sparse_types:
|
||||
for M in self._test_matrices:
|
||||
S = sparse_type(M)
|
||||
for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )):
|
||||
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
|
||||
for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
|
||||
assert_allclose(spnorm(S, ord, axis=axis),
|
||||
npnorm(M, ord, axis=axis))
|
||||
@@ -0,0 +1,254 @@
|
||||
"""Test functions for the sparse.linalg._onenormest module
|
||||
"""
|
||||
|
||||
from __future__ import division, print_function, absolute_import
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose, assert_equal, assert_
|
||||
import pytest
|
||||
import scipy.linalg
|
||||
import scipy.sparse.linalg
|
||||
from scipy.sparse.linalg._onenormest import _onenormest_core, _algorithm_2_2
|
||||
|
||||
|
||||
class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
|
||||
"""
|
||||
This is purely for onenormest testing.
|
||||
"""
|
||||
|
||||
def __init__(self, A, B):
|
||||
if A.ndim != 2 or B.ndim != 2:
|
||||
raise ValueError('expected ndarrays representing matrices')
|
||||
if A.shape[1] != B.shape[0]:
|
||||
raise ValueError('incompatible shapes')
|
||||
self.A = A
|
||||
self.B = B
|
||||
self.ndim = 2
|
||||
self.shape = (A.shape[0], B.shape[1])
|
||||
|
||||
def _matvec(self, x):
|
||||
return np.dot(self.A, np.dot(self.B, x))
|
||||
|
||||
def _rmatvec(self, x):
|
||||
return np.dot(np.dot(x, self.A), self.B)
|
||||
|
||||
def _matmat(self, X):
|
||||
return np.dot(self.A, np.dot(self.B, X))
|
||||
|
||||
@property
|
||||
def T(self):
|
||||
return MatrixProductOperator(self.B.T, self.A.T)
|
||||
|
||||
|
||||
class TestOnenormest(object):
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_onenormest_table_3_t_2(self):
|
||||
# This will take multiple seconds if your computer is slow like mine.
|
||||
# It is stochastic, so the tolerance could be too strict.
|
||||
np.random.seed(1234)
|
||||
t = 2
|
||||
n = 100
|
||||
itmax = 5
|
||||
nsamples = 5000
|
||||
observed = []
|
||||
expected = []
|
||||
nmult_list = []
|
||||
nresample_list = []
|
||||
for i in range(nsamples):
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
|
||||
observed.append(est)
|
||||
expected.append(scipy.linalg.norm(A, 1))
|
||||
nmult_list.append(nmults)
|
||||
nresample_list.append(nresamples)
|
||||
observed = np.array(observed, dtype=float)
|
||||
expected = np.array(expected, dtype=float)
|
||||
relative_errors = np.abs(observed - expected) / expected
|
||||
|
||||
# check the mean underestimation ratio
|
||||
underestimation_ratio = observed / expected
|
||||
assert_(0.99 < np.mean(underestimation_ratio) < 1.0)
|
||||
|
||||
# check the max and mean required column resamples
|
||||
assert_equal(np.max(nresample_list), 2)
|
||||
assert_(0.05 < np.mean(nresample_list) < 0.2)
|
||||
|
||||
# check the proportion of norms computed exactly correctly
|
||||
nexact = np.count_nonzero(relative_errors < 1e-14)
|
||||
proportion_exact = nexact / float(nsamples)
|
||||
assert_(0.9 < proportion_exact < 0.95)
|
||||
|
||||
# check the average number of matrix*vector multiplications
|
||||
assert_(3.5 < np.mean(nmult_list) < 4.5)
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_onenormest_table_4_t_7(self):
|
||||
# This will take multiple seconds if your computer is slow like mine.
|
||||
# It is stochastic, so the tolerance could be too strict.
|
||||
np.random.seed(1234)
|
||||
t = 7
|
||||
n = 100
|
||||
itmax = 5
|
||||
nsamples = 5000
|
||||
observed = []
|
||||
expected = []
|
||||
nmult_list = []
|
||||
nresample_list = []
|
||||
for i in range(nsamples):
|
||||
A = np.random.randint(-1, 2, size=(n, n))
|
||||
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
|
||||
observed.append(est)
|
||||
expected.append(scipy.linalg.norm(A, 1))
|
||||
nmult_list.append(nmults)
|
||||
nresample_list.append(nresamples)
|
||||
observed = np.array(observed, dtype=float)
|
||||
expected = np.array(expected, dtype=float)
|
||||
relative_errors = np.abs(observed - expected) / expected
|
||||
|
||||
# check the mean underestimation ratio
|
||||
underestimation_ratio = observed / expected
|
||||
assert_(0.90 < np.mean(underestimation_ratio) < 0.99)
|
||||
|
||||
# check the required column resamples
|
||||
assert_equal(np.max(nresample_list), 0)
|
||||
|
||||
# check the proportion of norms computed exactly correctly
|
||||
nexact = np.count_nonzero(relative_errors < 1e-14)
|
||||
proportion_exact = nexact / float(nsamples)
|
||||
assert_(0.15 < proportion_exact < 0.25)
|
||||
|
||||
# check the average number of matrix*vector multiplications
|
||||
assert_(3.5 < np.mean(nmult_list) < 4.5)
|
||||
|
||||
def test_onenormest_table_5_t_1(self):
|
||||
# "note that there is no randomness and hence only one estimate for t=1"
|
||||
t = 1
|
||||
n = 100
|
||||
itmax = 5
|
||||
alpha = 1 - 1e-6
|
||||
A = -scipy.linalg.inv(np.identity(n) + alpha*np.eye(n, k=1))
|
||||
first_col = np.array([1] + [0]*(n-1))
|
||||
first_row = np.array([(-alpha)**i for i in range(n)])
|
||||
B = -scipy.linalg.toeplitz(first_col, first_row)
|
||||
assert_allclose(A, B)
|
||||
est, v, w, nmults, nresamples = _onenormest_core(B, B.T, t, itmax)
|
||||
exact_value = scipy.linalg.norm(B, 1)
|
||||
underest_ratio = est / exact_value
|
||||
assert_allclose(underest_ratio, 0.05, rtol=1e-4)
|
||||
assert_equal(nmults, 11)
|
||||
assert_equal(nresamples, 0)
|
||||
# check the non-underscored version of onenormest
|
||||
est_plain = scipy.sparse.linalg.onenormest(B, t=t, itmax=itmax)
|
||||
assert_allclose(est, est_plain)
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_onenormest_table_6_t_1(self):
|
||||
#TODO this test seems to give estimates that match the table,
|
||||
#TODO even though no attempt has been made to deal with
|
||||
#TODO complex numbers in the one-norm estimation.
|
||||
# This will take multiple seconds if your computer is slow like mine.
|
||||
# It is stochastic, so the tolerance could be too strict.
|
||||
np.random.seed(1234)
|
||||
t = 1
|
||||
n = 100
|
||||
itmax = 5
|
||||
nsamples = 5000
|
||||
observed = []
|
||||
expected = []
|
||||
nmult_list = []
|
||||
nresample_list = []
|
||||
for i in range(nsamples):
|
||||
A_inv = np.random.rand(n, n) + 1j * np.random.rand(n, n)
|
||||
A = scipy.linalg.inv(A_inv)
|
||||
est, v, w, nmults, nresamples = _onenormest_core(A, A.T, t, itmax)
|
||||
observed.append(est)
|
||||
expected.append(scipy.linalg.norm(A, 1))
|
||||
nmult_list.append(nmults)
|
||||
nresample_list.append(nresamples)
|
||||
observed = np.array(observed, dtype=float)
|
||||
expected = np.array(expected, dtype=float)
|
||||
relative_errors = np.abs(observed - expected) / expected
|
||||
|
||||
# check the mean underestimation ratio
|
||||
underestimation_ratio = observed / expected
|
||||
underestimation_ratio_mean = np.mean(underestimation_ratio)
|
||||
assert_(0.90 < underestimation_ratio_mean < 0.99)
|
||||
|
||||
# check the required column resamples
|
||||
max_nresamples = np.max(nresample_list)
|
||||
assert_equal(max_nresamples, 0)
|
||||
|
||||
# check the proportion of norms computed exactly correctly
|
||||
nexact = np.count_nonzero(relative_errors < 1e-14)
|
||||
proportion_exact = nexact / float(nsamples)
|
||||
assert_(0.7 < proportion_exact < 0.8)
|
||||
|
||||
# check the average number of matrix*vector multiplications
|
||||
mean_nmult = np.mean(nmult_list)
|
||||
assert_(4 < mean_nmult < 5)
|
||||
|
||||
def _help_product_norm_slow(self, A, B):
|
||||
# for profiling
|
||||
C = np.dot(A, B)
|
||||
return scipy.linalg.norm(C, 1)
|
||||
|
||||
def _help_product_norm_fast(self, A, B):
|
||||
# for profiling
|
||||
t = 2
|
||||
itmax = 5
|
||||
D = MatrixProductOperator(A, B)
|
||||
est, v, w, nmults, nresamples = _onenormest_core(D, D.T, t, itmax)
|
||||
return est
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_onenormest_linear_operator(self):
|
||||
# Define a matrix through its product A B.
|
||||
# Depending on the shapes of A and B,
|
||||
# it could be easy to multiply this product by a small matrix,
|
||||
# but it could be annoying to look at all of
|
||||
# the entries of the product explicitly.
|
||||
np.random.seed(1234)
|
||||
n = 6000
|
||||
k = 3
|
||||
A = np.random.randn(n, k)
|
||||
B = np.random.randn(k, n)
|
||||
fast_estimate = self._help_product_norm_fast(A, B)
|
||||
exact_value = self._help_product_norm_slow(A, B)
|
||||
assert_(fast_estimate <= exact_value <= 3*fast_estimate,
|
||||
'fast: %g\nexact:%g' % (fast_estimate, exact_value))
|
||||
|
||||
def test_returns(self):
|
||||
np.random.seed(1234)
|
||||
A = scipy.sparse.rand(50, 50, 0.1)
|
||||
|
||||
s0 = scipy.linalg.norm(A.todense(), 1)
|
||||
s1, v = scipy.sparse.linalg.onenormest(A, compute_v=True)
|
||||
s2, w = scipy.sparse.linalg.onenormest(A, compute_w=True)
|
||||
s3, v2, w2 = scipy.sparse.linalg.onenormest(A, compute_w=True, compute_v=True)
|
||||
|
||||
assert_allclose(s1, s0, rtol=1e-9)
|
||||
assert_allclose(np.linalg.norm(A.dot(v), 1), s0*np.linalg.norm(v, 1), rtol=1e-9)
|
||||
assert_allclose(A.dot(v), w, rtol=1e-9)
|
||||
|
||||
|
||||
class TestAlgorithm_2_2(object):
|
||||
|
||||
def test_randn_inv(self):
|
||||
np.random.seed(1234)
|
||||
n = 20
|
||||
nsamples = 100
|
||||
for i in range(nsamples):
|
||||
|
||||
# Choose integer t uniformly between 1 and 3 inclusive.
|
||||
t = np.random.randint(1, 4)
|
||||
|
||||
# Choose n uniformly between 10 and 40 inclusive.
|
||||
n = np.random.randint(10, 41)
|
||||
|
||||
# Sample the inverse of a matrix with random normal entries.
|
||||
A = scipy.linalg.inv(np.random.randn(n, n))
|
||||
|
||||
# Compute the 1-norm bounds.
|
||||
g, ind = _algorithm_2_2(A, A.T, t)
|
||||
|
||||
Reference in New Issue
Block a user