Static code analysis and corrections

This commit is contained in:
Kristjan Komlosi
2019-07-17 16:06:09 +02:00
parent 674692c2fc
commit 21bfae9fbc
10086 changed files with 2102103 additions and 51 deletions
@@ -0,0 +1,140 @@
#include <Python.h>
#include "math.h"
const double PI = 3.141592653589793238462643383279502884;
static double
_multivariate_typical(int n, double *args)
{
return cos(args[1] * args[0] - args[2] * sin(args[0])) / PI;
}
static double
_multivariate_indefinite(int n, double *args)
{
return -exp(-args[0]) * log(args[0]);
}
static double
_multivariate_sin(int n, double *args)
{
return sin(args[0]);
}
static double
_sin_0(double x, void *user_data)
{
return sin(x);
}
static double
_sin_1(int ndim, double *x, void *user_data)
{
return sin(x[0]);
}
static double
_sin_2(double x)
{
return sin(x);
}
static double
_sin_3(int ndim, double *x)
{
return sin(x[0]);
}
typedef struct {
char *name;
void *ptr;
} routine_t;
static const routine_t routines[] = {
{"_multivariate_typical", &_multivariate_typical},
{"_multivariate_indefinite", &_multivariate_indefinite},
{"_multivariate_sin", &_multivariate_sin},
{"_sin_0", &_sin_0},
{"_sin_1", &_sin_1},
{"_sin_2", &_sin_2},
{"_sin_3", &_sin_3}
};
static int create_pointers(PyObject *module)
{
PyObject *d, *obj = NULL;
int i;
d = PyModule_GetDict(module);
if (d == NULL) {
goto fail;
}
for (i = 0; i < sizeof(routines) / sizeof(routine_t); ++i) {
obj = PyLong_FromVoidPtr(routines[i].ptr);
if (obj == NULL) {
goto fail;
}
if (PyDict_SetItemString(d, routines[i].name, obj)) {
goto fail;
}
Py_DECREF(obj);
obj = NULL;
}
Py_XDECREF(obj);
return 0;
fail:
Py_XDECREF(obj);
return -1;
}
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_test_multivariate",
NULL,
-1,
NULL, /* Empty methods section */
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC
PyInit__test_multivariate(void)
{
PyObject *m;
m = PyModule_Create(&moduledef);
if (m == NULL) {
return NULL;
}
if (create_pointers(m)) {
Py_DECREF(m);
return NULL;
}
return m;
}
#else
PyMODINIT_FUNC
init_test_multivariate(void)
{
PyObject *m;
m = Py_InitModule("_test_multivariate", NULL);
if (m == NULL) {
return;
}
create_pointers(m);
}
#endif
@@ -0,0 +1,240 @@
c banded5x5.f
c
c This Fortran library contains implementations of the
c differential equation
c dy/dt = A*y
c where A is a 5x5 banded matrix (see below for the actual
c values). These functions will be used to test
c scipy.integrate.odeint.
c
c The idea is to solve the system two ways: pure Fortran, and
c using odeint. The "pure Fortran" solver is implemented in
c the subroutine banded5x5_solve below. It calls LSODA to
c solve the system.
c
c To solve the same system using odeint, the functions in this
c file are given a python wrapper using f2py. Then the code
c in test_odeint_jac.py uses the wrapper to implement the
c equation and Jacobian functions required by odeint. Because
c those functions ultimately call the Fortran routines defined
c in this file, the two method (pure Fortran and odeint) should
c produce exactly the same results. (That's assuming floating
c point calculations are deterministic, which can be an
c incorrect assumption.) If we simply re-implemented the
c equation and Jacobian functions using just python and numpy,
c the floating point calculations would not be performed in
c the same sequence as in the Fortran code, and we would obtain
c different answers. The answer for either method would be
c numerically "correct", but the errors would be different,
c and the counts of function and Jacobian evaluations would
c likely be different.
c
block data jacobian
implicit none
double precision bands
dimension bands(4,5)
common /jac/ bands
c The data for a banded Jacobian stored in packed banded
c format. The full Jacobian is
c
c -1, 0.25, 0, 0, 0
c 0.25, -5, 0.25, 0, 0
c 0.10, 0.25, -25, 0.25, 0
c 0, 0.10, 0.25, -125, 0.25
c 0, 0, 0.10, 0.25, -625
c
c The columns in the following layout of numbers are
c the upper diagonal, main diagonal and two lower diagonals
c (i.e. each row in the layout is a column of the packed
c banded Jacobian). The values 0.00D0 are in the "don't
c care" positions.
data bands/
+ 0.00D0, -1.0D0, 0.25D0, 0.10D0,
+ 0.25D0, -5.0D0, 0.25D0, 0.10D0,
+ 0.25D0, -25.0D0, 0.25D0, 0.10D0,
+ 0.25D0, -125.0D0, 0.25D0, 0.00D0,
+ 0.25D0, -625.0D0, 0.00D0, 0.00D0
+ /
end
subroutine getbands(jac)
double precision jac
dimension jac(4, 5)
cf2py intent(out) jac
double precision bands
dimension bands(4,5)
common /jac/ bands
integer i, j
do 5 i = 1, 4
do 5 j = 1, 5
jac(i, j) = bands(i, j)
5 continue
return
end
c
c Differential equations, right-hand-side
c
subroutine banded5x5(n, t, y, f)
implicit none
integer n
double precision t, y, f
dimension y(n), f(n)
double precision bands
dimension bands(4,5)
common /jac/ bands
f(1) = bands(2,1)*y(1) + bands(1,2)*y(2)
f(2) = bands(3,1)*y(1) + bands(2,2)*y(2) + bands(1,3)*y(3)
f(3) = bands(4,1)*y(1) + bands(3,2)*y(2) + bands(2,3)*y(3)
+ + bands(1,4)*y(4)
f(4) = bands(4,2)*y(2) + bands(3,3)*y(3) + bands(2,4)*y(4)
+ + bands(1,5)*y(5)
f(5) = bands(4,3)*y(3) + bands(3,4)*y(4) + bands(2,5)*y(5)
return
end
c
c Jacobian
c
c The subroutine assumes that the full Jacobian is to be computed.
c ml and mu are ignored, and nrowpd is assumed to be n.
c
subroutine banded5x5_jac(n, t, y, ml, mu, jac, nrowpd)
implicit none
integer n, ml, mu, nrowpd
double precision t, y, jac
dimension y(n), jac(nrowpd, n)
integer i, j
double precision bands
dimension bands(4,5)
common /jac/ bands
do 15 i = 1, 4
do 15 j = 1, 5
if ((i - j) .gt. 0) then
jac(i - j, j) = bands(i, j)
end if
15 continue
return
end
c
c Banded Jacobian
c
c ml = 2, mu = 1
c
subroutine banded5x5_bjac(n, t, y, ml, mu, bjac, nrowpd)
implicit none
integer n, ml, mu, nrowpd
double precision t, y, bjac
dimension y(5), bjac(nrowpd, n)
integer i, j
double precision bands
dimension bands(4,5)
common /jac/ bands
do 20 i = 1, 4
do 20 j = 1, 5
bjac(i, j) = bands(i, j)
20 continue
return
end
subroutine banded5x5_solve(y, nsteps, dt, jt, nst, nfe, nje)
c jt is the Jacobian type:
c jt = 1 Use the full Jacobian.
c jt = 4 Use the banded Jacobian.
c nst, nfe and nje are outputs:
c nst: Total number of internal steps
c nfe: Total number of function (i.e. right-hand-side)
c evaluations
c nje: Total number of Jacobian evaluations
implicit none
external banded5x5
external banded5x5_jac
external banded5x5_bjac
external LSODA
c Arguments...
double precision y, dt
integer nsteps, jt, nst, nfe, nje
cf2py intent(inout) y
cf2py intent(in) nsteps, dt, jt
cf2py intent(out) nst, nfe, nje
c Local variables...
double precision atol, rtol, t, tout, rwork
integer iwork
dimension y(5), rwork(500), iwork(500)
integer neq, i
integer itol, iopt, itask, istate, lrw, liw
c Common block...
double precision jacband
dimension jacband(4,5)
common /jac/ jacband
c --- t range ---
t = 0.0D0
c --- Solver tolerances ---
rtol = 1.0D-11
atol = 1.0D-13
itol = 1
c --- Other LSODA parameters ---
neq = 5
itask = 1
istate = 1
iopt = 0
iwork(1) = 2
iwork(2) = 1
lrw = 500
liw = 500
c --- Call LSODA in a loop to compute the solution ---
do 40 i = 1, nsteps
tout = i*dt
if (jt .eq. 1) then
call LSODA(banded5x5, neq, y, t, tout,
& itol, rtol, atol, itask, istate, iopt,
& rwork, lrw, iwork, liw,
& banded5x5_jac, jt)
else
call LSODA(banded5x5, neq, y, t, tout,
& itol, rtol, atol, itask, istate, iopt,
& rwork, lrw, iwork, liw,
& banded5x5_bjac, jt)
end if
40 if (istate .lt. 0) goto 80
nst = iwork(11)
nfe = iwork(12)
nje = iwork(13)
return
80 write (6,89) istate
89 format(1X,"Error: istate=",I3)
return
end
@@ -0,0 +1,224 @@
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import assert_allclose
from scipy.integrate import ode
def _band_count(a):
"""Returns ml and mu, the lower and upper band sizes of a."""
nrows, ncols = a.shape
ml = 0
for k in range(-nrows+1, 0):
if np.diag(a, k).any():
ml = -k
break
mu = 0
for k in range(nrows-1, 0, -1):
if np.diag(a, k).any():
mu = k
break
return ml, mu
def _linear_func(t, y, a):
"""Linear system dy/dt = a * y"""
return a.dot(y)
def _linear_jac(t, y, a):
"""Jacobian of a * y is a."""
return a
def _linear_banded_jac(t, y, a):
"""Banded Jacobian."""
ml, mu = _band_count(a)
bjac = []
for k in range(mu, 0, -1):
bjac.append(np.r_[[0] * k, np.diag(a, k)])
bjac.append(np.diag(a))
for k in range(-1, -ml-1, -1):
bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
return bjac
def _solve_linear_sys(a, y0, tend=1, dt=0.1,
solver=None, method='bdf', use_jac=True,
with_jacobian=False, banded=False):
"""Use scipy.integrate.ode to solve a linear system of ODEs.
a : square ndarray
Matrix of the linear system to be solved.
y0 : ndarray
Initial condition
tend : float
Stop time.
dt : float
Step size of the output.
solver : str
If not None, this must be "vode", "lsoda" or "zvode".
method : str
Either "bdf" or "adams".
use_jac : bool
Determines if the jacobian function is passed to ode().
with_jacobian : bool
Passed to ode.set_integrator().
banded : bool
Determines whether a banded or full jacobian is used.
If `banded` is True, `lband` and `uband` are determined by the
values in `a`.
"""
if banded:
lband, uband = _band_count(a)
else:
lband = None
uband = None
if use_jac:
if banded:
r = ode(_linear_func, _linear_banded_jac)
else:
r = ode(_linear_func, _linear_jac)
else:
r = ode(_linear_func)
if solver is None:
if np.iscomplexobj(a):
solver = "zvode"
else:
solver = "vode"
r.set_integrator(solver,
with_jacobian=with_jacobian,
method=method,
lband=lband, uband=uband,
rtol=1e-9, atol=1e-10,
)
t0 = 0
r.set_initial_value(y0, t0)
r.set_f_params(a)
r.set_jac_params(a)
t = [t0]
y = [y0]
while r.successful() and r.t < tend:
r.integrate(r.t + dt)
t.append(r.t)
y.append(r.y)
t = np.array(t)
y = np.array(y)
return t, y
def _analytical_solution(a, y0, t):
"""
Analytical solution to the linear differential equations dy/dt = a*y.
The solution is only valid if `a` is diagonalizable.
Returns a 2-d array with shape (len(t), len(y0)).
"""
lam, v = np.linalg.eig(a)
c = np.linalg.solve(v, y0)
e = c * np.exp(lam * t.reshape(-1, 1))
sol = e.dot(v.T)
return sol
def test_banded_ode_solvers():
# Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
# with a system that has a banded Jacobian matrix.
t_exact = np.linspace(0, 1.0, 5)
# --- Real arrays for testing the "lsoda" and "vode" solvers ---
# lband = 2, uband = 1:
a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
[0.2, -0.5, 0.9, 0.0, 0.0],
[0.1, 0.1, -0.4, 0.1, 0.0],
[0.0, 0.3, -0.1, -0.9, -0.3],
[0.0, 0.0, 0.1, 0.1, -0.7]])
# lband = 0, uband = 1:
a_real_upper = np.triu(a_real)
# lband = 2, uband = 0:
a_real_lower = np.tril(a_real)
# lband = 0, uband = 0:
a_real_diag = np.triu(a_real_lower)
real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
real_solutions = []
for a in real_matrices:
y0 = np.arange(1, a.shape[0] + 1)
y_exact = _analytical_solution(a, y0, t_exact)
real_solutions.append((y0, t_exact, y_exact))
def check_real(idx, solver, meth, use_jac, with_jac, banded):
a = real_matrices[idx]
y0, t_exact, y_exact = real_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(real_matrices)):
p = [['vode', 'lsoda'], # solver
['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
check_real(idx, solver, meth, use_jac, with_jac, banded)
# --- Complex arrays for testing the "zvode" solver ---
# complex, lband = 2, uband = 1:
a_complex = a_real - 0.5j * a_real
# complex, lband = 0, uband = 0:
a_complex_diag = np.diag(np.diag(a_complex))
complex_matrices = [a_complex, a_complex_diag]
complex_solutions = []
for a in complex_matrices:
y0 = np.arange(1, a.shape[0] + 1) + 1j
y_exact = _analytical_solution(a, y0, t_exact)
complex_solutions.append((y0, t_exact, y_exact))
def check_complex(idx, solver, meth, use_jac, with_jac, banded):
a = complex_matrices[idx]
y0, t_exact, y_exact = complex_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(complex_matrices)):
p = [['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for meth, use_jac, with_jac, banded in itertools.product(*p):
check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
@@ -0,0 +1,553 @@
from __future__ import division, print_function, absolute_import
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from numpy.testing import (assert_, assert_array_equal, assert_allclose,
assert_equal)
from pytest import raises as assert_raises
from scipy.sparse import coo_matrix
from scipy.special import erf
from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
estimate_bc_jac, compute_jac_indices,
construct_global_jac, solve_bvp)
def exp_fun(x, y):
return np.vstack((y[1], y[0]))
def exp_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = 1
df_dy[1, 1] = 0
return df_dy
def exp_bc(ya, yb):
return np.hstack((ya[0] - 1, yb[0]))
def exp_bc_complex(ya, yb):
return np.hstack((ya[0] - 1 - 1j, yb[0]))
def exp_bc_jac(ya, yb):
dbc_dya = np.array([
[1, 0],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def exp_sol(x):
return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
def sl_fun(x, y, p):
return np.vstack((y[1], -p[0]**2 * y[0]))
def sl_fun_jac(x, y, p):
n, m = y.shape
df_dy = np.empty((n, 2, m))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -p[0]**2
df_dy[1, 1] = 0
df_dp = np.empty((n, 1, m))
df_dp[0, 0] = 0
df_dp[1, 0] = -2 * p[0] * y[0]
return df_dy, df_dp
def sl_bc(ya, yb, p):
return np.hstack((ya[0], yb[0], ya[1] - p[0]))
def sl_bc_jac(ya, yb, p):
dbc_dya = np.zeros((3, 2))
dbc_dya[0, 0] = 1
dbc_dya[2, 1] = 1
dbc_dyb = np.zeros((3, 2))
dbc_dyb[1, 0] = 1
dbc_dp = np.zeros((3, 1))
dbc_dp[2, 0] = -1
return dbc_dya, dbc_dyb, dbc_dp
def sl_sol(x, p):
return np.sin(p[0] * x)
def emden_fun(x, y):
return np.vstack((y[1], -y[0]**5))
def emden_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -5 * y[0]**4
df_dy[1, 1] = 0
return df_dy
def emden_bc(ya, yb):
return np.array([ya[1], yb[0] - (3/4)**0.5])
def emden_bc_jac(ya, yb):
dbc_dya = np.array([
[0, 1],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def emden_sol(x):
return (1 + x**2/3)**-0.5
def undefined_fun(x, y):
return np.zeros_like(y)
def undefined_bc(ya, yb):
return np.array([ya[0], yb[0] - 1])
def big_fun(x, y):
f = np.zeros_like(y)
f[::2] = y[1::2]
return f
def big_bc(ya, yb):
return np.hstack((ya[::2], yb[::2] - 1))
def big_sol(x, n):
y = np.ones((2 * n, x.size))
y[::2] = x
return x
def shock_fun(x, y):
eps = 1e-3
return np.vstack((
y[1],
-(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
np.pi * x * np.sin(np.pi * x)) / eps
))
def shock_bc(ya, yb):
return np.array([ya[0] + 2, yb[0]])
def shock_sol(x):
eps = 1e-3
k = np.sqrt(2 * eps)
return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
def test_modify_mesh():
x = np.array([0, 1, 3, 9], dtype=float)
x_new = modify_mesh(x, np.array([0]), np.array([2]))
assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
x = np.array([-6, -3, 0, 3, 6], dtype=float)
x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
def test_compute_fun_jac():
x = np.linspace(0, 1, 5)
y = np.empty((2, x.shape[0]))
y[0] = 0.01
y[1] = 0.02
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
df_dy_an = exp_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
x = np.linspace(0, np.pi, 5)
y = np.empty((2, x.shape[0]))
y[0] = np.sin(x)
y[1] = np.cos(x)
p = np.array([1.0])
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
assert_allclose(df_dy, df_dy_an)
assert_allclose(df_dp, df_dp_an)
x = np.linspace(0, 1, 10)
y = np.empty((2, x.shape[0]))
y[0] = (3/4)**0.5
y[1] = 1e-4
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
df_dy_an = emden_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
def test_compute_bc_jac():
ya = np.array([-1.0, 2])
yb = np.array([0.5, 3])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
ya = np.array([0.0, 1])
yb = np.array([0.0, -1])
p = np.array([0.5])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_allclose(dbc_dp, dbc_dp_an)
ya = np.array([0.5, 100])
yb = np.array([-1000, 10.5])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
def test_compute_jac_indices():
n = 2
m = 4
k = 2
i, j = compute_jac_indices(n, m, k)
s = coo_matrix((np.ones_like(i), (i, j))).toarray()
s_true = np.array([
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
])
assert_array_equal(s, s_true)
def test_compute_global_jac():
n = 2
m = 5
k = 1
i_jac, j_jac = compute_jac_indices(2, 5, 1)
x = np.linspace(0, 1, 5)
h = np.diff(x)
y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
p = np.array([3.0])
f = sl_fun(x, y, p)
x_middle = x[:-1] + 0.5 * h
y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
df_dy, df_dp = sl_fun_jac(x, y, p)
df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
def J_block(h, p):
return np.array([
[h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
[0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
])
J_true = np.zeros((m * n + k, m * n + k))
for i in range(m - 1):
J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p)
J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
h**2/6 * (y[1, :-1] - y[1, 1:]))
J_true[8, 0] = 1
J_true[9, 8] = 1
J_true[10, 1] = 1
J_true[10, 10] = -1
assert_allclose(J, J_true, rtol=1e-10)
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
assert_allclose(J, J_true, rtol=1e-8, atol=1e-9)
def test_parameter_validation():
x = [0, 1, 0.5]
y = np.zeros((2, 3))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
x = np.linspace(0, 1, 5)
y = np.zeros((2, 4))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
fun = lambda x, y, p: exp_fun(x, y)
bc = lambda ya, yb, p: exp_bc(ya, yb)
y = np.zeros((2, x.shape[0]))
assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
def wrong_shape_fun(x, y):
return np.zeros(3)
assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
S = np.array([[0, 0]])
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
def test_no_params():
x = np.linspace(0, 1, 5)
x_test = np.linspace(0, 1, 100)
y = np.zeros((2, x.shape[0]))
for fun_jac in [None, exp_fun_jac]:
for bc_jac in [None, exp_bc_jac]:
sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_equal(sol.x.size, 5)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
f_test = exp_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res**2, axis=0)**0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_with_params():
x = np.linspace(0, np.pi, 5)
x_test = np.linspace(0, np.pi, 100)
y = np.ones((2, x.shape[0]))
for fun_jac in [None, sl_fun_jac]:
for bc_jac in [None, sl_bc_jac]:
sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 10)
assert_allclose(sol.p, [1], rtol=1e-4)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], sl_sol(x_test, [1]),
rtol=1e-4, atol=1e-4)
f_test = sl_fun(x_test, sol_test, [1])
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_singular_term():
x = np.linspace(0, 1, 10)
x_test = np.linspace(0.05, 1, 100)
y = np.empty((2, 10))
y[0] = (3/4)**0.5
y[1] = 1e-4
S = np.array([[0, 0], [0, -2]])
for fun_jac in [None, emden_fun_jac]:
for bc_jac in [None, emden_bc_jac]:
sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_equal(sol.x.size, 10)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5)
f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_complex():
# The test is essentially the same as test_no_params, but boundary
# conditions are turned into complex.
x = np.linspace(0, 1, 5)
x_test = np.linspace(0, 1, 100)
y = np.zeros((2, x.shape[0]), dtype=complex)
for fun_jac in [None, exp_fun_jac]:
for bc_jac in [None, exp_bc_jac]:
sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5)
assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5)
f_test = exp_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)),
axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_failures():
x = np.linspace(0, 1, 2)
y = np.zeros((2, x.size))
res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5)
assert_equal(res.status, 1)
assert_(not res.success)
x = np.linspace(0, 1, 5)
y = np.zeros((2, x.size))
res = solve_bvp(undefined_fun, undefined_bc, x, y)
assert_equal(res.status, 2)
assert_(not res.success)
def test_big_problem():
n = 30
x = np.linspace(0, 1, 5)
y = np.zeros((2 * n, x.size))
sol = solve_bvp(big_fun, big_bc, x, y)
assert_equal(sol.status, 0)
assert_(sol.success)
sol_test = sol.sol(x)
assert_allclose(sol_test[0], big_sol(x, n))
f_test = big_fun(x, sol_test)
r = sol.sol(x, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_shock_layer():
x = np.linspace(-1, 1, 5)
x_test = np.linspace(-1, 1, 100)
y = np.zeros((2, x.size))
sol = solve_bvp(shock_fun, shock_bc, x, y)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 110)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5)
f_test = shock_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_verbose():
# Smoke test that checks the printing does something and does not crash
x = np.linspace(0, 1, 5)
y = np.zeros((2, x.shape[0]))
for verbose in [0, 1, 2]:
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose)
text = sys.stdout.getvalue()
finally:
sys.stdout = old_stdout
assert_(sol.success)
if verbose == 0:
assert_(not text, text)
if verbose >= 1:
assert_("Solved in" in text, text)
if verbose >= 2:
assert_("Max residual" in text, text)
@@ -0,0 +1,835 @@
# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
"""
Tests for numerical integration.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
allclose)
from scipy._lib._numpy_compat import _assert_warns
from scipy._lib.six import xrange
from numpy.testing import (
assert_, assert_array_almost_equal,
assert_allclose, assert_array_equal, assert_equal)
from pytest import raises as assert_raises
from scipy.integrate import odeint, ode, complex_ode
#------------------------------------------------------------------------------
# Test ODE integrators
#------------------------------------------------------------------------------
class TestOdeint(object):
# Check integrate.odeint
def _do_problem(self, problem):
t = arange(0.0, problem.stop_t, 0.05)
# Basic case
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
assert_(problem.verify(z, t))
# Use tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
if hasattr(problem, 'jac'):
# Use Dfun
z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
full_output=True)
assert_(problem.verify(z, t))
# Use Dfun and tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
Dfun=lambda t, y: problem.jac(y, t),
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
def test_odeint(self):
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem)
class TestODEClass(object):
ode_class = None # Set in subclass.
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
f = lambda t, z: problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
jac = lambda t, z: problem.jac(z, t)
integrator_params = {}
if problem.lband is not None or problem.uband is not None:
integrator_params['uband'] = problem.uband
integrator_params['lband'] = problem.lband
ig = self.ode_class(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method,
**integrator_params)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert_array_equal(z, ig.y)
assert_(ig.successful(), (problem, method))
assert_(ig.get_return_code() > 0, (problem, method))
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
class TestOde(TestODEClass):
ode_class = ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
self._do_problem(problem, 'vode', 'bdf')
def test_zvode(self):
# Check the zvode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'zvode', 'adams')
self._do_problem(problem, 'zvode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
def test_concurrent_fail(self):
for sol in ('vode', 'zvode', 'lsoda'):
f = lambda t, y: 1.0
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_raises(RuntimeError, r.integrate, r.t + 0.1)
def test_concurrent_ok(self):
f = lambda t, y: 1.0
for k in xrange(3):
for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.1)
assert_allclose(r2.y, 0.2)
for sol in ('dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.3)
assert_allclose(r2.y, 0.2)
class TestComplexOde(TestODEClass):
ode_class = complex_ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
else:
self._do_problem(problem, 'vode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
class TestSolout(object):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_after_initial_test(self, integrator):
# Check if solout works even if it is set after the initial value.
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_initial_value(y0, t0)
ig.set_solout(solout)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout_after_initial(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_after_initial_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
class TestComplexSolout(object):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
#------------------------------------------------------------------------------
# Test problems
#------------------------------------------------------------------------------
class ODE:
"""
ODE problem
"""
stiff = False
cmplx = False
stop_t = 1
z0 = []
lband = None
uband = None
atol = 1e-6
rtol = 1e-5
class SimpleOscillator(ODE):
r"""
Free vibration of a simple oscillator::
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
Solution::
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
"""
stop_t = 1 + 0.09
z0 = array([1.0, 0.1], float)
k = 4.0
m = 1.0
def f(self, z, t):
tmp = zeros((2, 2), float)
tmp[0, 1] = 1.0
tmp[1, 0] = -self.k / self.m
return dot(tmp, z)
def verify(self, zs, t):
omega = sqrt(self.k / self.m)
u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
class ComplexExp(ODE):
r"""The equation :lm:`\dot u = i u`"""
stop_t = 1.23*pi
z0 = exp([1j, 2j, 3j, 4j, 5j])
cmplx = True
def f(self, z, t):
return 1j*z
def jac(self, z, t):
return 1j*eye(5)
def verify(self, zs, t):
u = self.z0 * exp(1j*t)
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
class Pi(ODE):
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
stop_t = 20
z0 = [0]
cmplx = True
def f(self, z, t):
return array([1./(t - 10 + 1j)])
def verify(self, zs, t):
u = -2j * np.arctan(10)
return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
class CoupledDecay(ODE):
r"""
3 coupled decays suited for banded treatment
(banded mode makes it necessary when N>>3)
"""
stiff = True
stop_t = 0.5
z0 = [5.0, 7.0, 13.0]
lband = 1
uband = 0
lmbd = [0.17, 0.23, 0.29] # fictitious decay constants
def f(self, z, t):
lmbd = self.lmbd
return np.array([-lmbd[0]*z[0],
-lmbd[1]*z[1] + lmbd[0]*z[0],
-lmbd[2]*z[2] + lmbd[1]*z[1]])
def jac(self, z, t):
# The full Jacobian is
#
# [-lmbd[0] 0 0 ]
# [ lmbd[0] -lmbd[1] 0 ]
# [ 0 lmbd[1] -lmbd[2]]
#
# The lower and upper bandwidths are lband=1 and uband=0, resp.
# The representation of this array in packed format is
#
# [-lmbd[0] -lmbd[1] -lmbd[2]]
# [ lmbd[0] lmbd[1] 0 ]
lmbd = self.lmbd
j = np.zeros((self.lband + self.uband + 1, 3), order='F')
def set_j(ri, ci, val):
j[self.uband + ri - ci, ci] = val
set_j(0, 0, -lmbd[0])
set_j(1, 0, lmbd[0])
set_j(1, 1, -lmbd[1])
set_j(2, 1, lmbd[1])
set_j(2, 2, -lmbd[2])
return j
def verify(self, zs, t):
# Formulae derived by hand
lmbd = np.array(self.lmbd)
d10 = lmbd[1] - lmbd[0]
d21 = lmbd[2] - lmbd[1]
d20 = lmbd[2] - lmbd[0]
e0 = np.exp(-lmbd[0] * t)
e1 = np.exp(-lmbd[1] * t)
e2 = np.exp(-lmbd[2] * t)
u = np.vstack((
self.z0[0] * e0,
self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
lmbd[1] * lmbd[0] * self.z0[0] / d10 *
(1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
#------------------------------------------------------------------------------
def f(t, x):
dxdt = [x[1], -x[0]]
return dxdt
def jac(t, x):
j = array([[0.0, 1.0],
[-1.0, 0.0]])
return j
def f1(t, x, omega):
dxdt = [omega*x[1], -omega*x[0]]
return dxdt
def jac1(t, x, omega):
j = array([[0.0, omega],
[-omega, 0.0]])
return j
def f2(t, x, omega1, omega2):
dxdt = [omega1*x[1], -omega2*x[0]]
return dxdt
def jac2(t, x, omega1, omega2):
j = array([[0.0, omega1],
[-omega2, 0.0]])
return j
def fv(t, x, omega):
dxdt = [omega[0]*x[1], -omega[1]*x[0]]
return dxdt
def jacv(t, x, omega):
j = array([[0.0, omega[0]],
[-omega[1], 0.0]])
return j
class ODECheckParameterUse(object):
"""Call an ode-class solver with several cases of parameter use."""
# solver_name must be set before tests can be run with this class.
# Set these in subclasses.
solver_name = ''
solver_uses_jac = False
def _get_solver(self, f, jac):
solver = ode(f, jac)
if self.solver_uses_jac:
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
with_jacobian=self.solver_uses_jac)
else:
# XXX Shouldn't set_integrator *always* accept the keyword arg
# 'with_jacobian', and perhaps raise an exception if it is set
# to True if the solver can't actually use it?
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
return solver
def _check_solver(self, solver):
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
solver.integrate(pi)
assert_array_almost_equal(solver.y, [-1.0, 0.0])
def test_no_params(self):
solver = self._get_solver(f, jac)
self._check_solver(solver)
def test_one_scalar_param(self):
solver = self._get_solver(f1, jac1)
omega = 1.0
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_two_scalar_params(self):
solver = self._get_solver(f2, jac2)
omega1 = 1.0
omega2 = 1.0
solver.set_f_params(omega1, omega2)
if self.solver_uses_jac:
solver.set_jac_params(omega1, omega2)
self._check_solver(solver)
def test_vector_param(self):
solver = self._get_solver(fv, jacv)
omega = [1.0, 1.0]
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_warns_on_failure(self):
# Set nsteps small to ensure failure
solver = self._get_solver(f, jac)
solver.set_integrator(self.solver_name, nsteps=1)
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
_assert_warns(UserWarning, solver.integrate, pi)
class TestDOPRI5CheckParameterUse(ODECheckParameterUse):
solver_name = 'dopri5'
solver_uses_jac = False
class TestDOP853CheckParameterUse(ODECheckParameterUse):
solver_name = 'dop853'
solver_uses_jac = False
class TestVODECheckParameterUse(ODECheckParameterUse):
solver_name = 'vode'
solver_uses_jac = True
class TestZVODECheckParameterUse(ODECheckParameterUse):
solver_name = 'zvode'
solver_uses_jac = True
class TestLSODACheckParameterUse(ODECheckParameterUse):
solver_name = 'lsoda'
solver_uses_jac = True
def test_odeint_trivial_time():
# Test that odeint succeeds when given a single time point
# and full_output=True. This is a regression test for gh-4282.
y0 = 1
t = [0]
y, info = odeint(lambda y, t: -y, y0, t, full_output=True)
assert_array_equal(y, np.array([[y0]]))
def test_odeint_banded_jacobian():
# Test the use of the `Dfun`, `ml` and `mu` options of odeint.
def func(y, t, c):
return c.dot(y)
def jac(y, t, c):
return c
def jac_transpose(y, t, c):
return c.T.copy(order='C')
def bjac_rows(y, t, c):
jac = np.row_stack((np.r_[0, np.diag(c, 1)],
np.diag(c),
np.r_[np.diag(c, -1), 0],
np.r_[np.diag(c, -2), 0, 0]))
return jac
def bjac_cols(y, t, c):
return bjac_rows(y, t, c).T.copy(order='C')
c = array([[-205, 0.01, 0.00, 0.0],
[0.1, -2.50, 0.02, 0.0],
[1e-3, 0.01, -2.0, 0.01],
[0.00, 0.00, 0.1, -1.0]])
y0 = np.ones(4)
t = np.array([0, 5, 10, 100])
# Use the full Jacobian.
sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=jac)
# Use the transposed full Jacobian, with col_deriv=True.
sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=jac_transpose, col_deriv=True)
# Use the banded Jacobian.
sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=bjac_rows, ml=2, mu=1)
# Use the transposed banded Jacobian, with col_deriv=True.
sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True,
atol=1e-13, rtol=1e-11, mxstep=10000,
Dfun=bjac_cols, ml=2, mu=1, col_deriv=True)
assert_allclose(sol1, sol2, err_msg="sol1 != sol2")
assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3")
assert_allclose(sol3, sol4, err_msg="sol3 != sol4")
# Verify that the number of jacobian evaluations was the same for the
# calls of odeint with a full jacobian and with a banded jacobian. This is
# a regression test--there was a bug in the handling of banded jacobians
# that resulted in an incorrect jacobian matrix being passed to the LSODA
# code. That would cause errors or excessive jacobian evaluations.
assert_array_equal(info1['nje'], info2['nje'])
assert_array_equal(info3['nje'], info4['nje'])
# Test the use of tfirst
sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,),
full_output=True, atol=1e-13, rtol=1e-11,
mxstep=10000,
Dfun=lambda t, y, c: jac(y, t, c), tfirst=True)
# The code should execute the exact same sequence of floating point
# calculations, so these should be exactly equal. We'll be safe and use
# a small tolerance.
assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty")
def test_odeint_errors():
def sys1d(x, t):
return -100*x
def bad1(x, t):
return 1.0/0
def bad2(x, t):
return "foo"
def bad_jac1(x, t):
return 1.0/0
def bad_jac2(x, t):
return [["foo"]]
def sys2d(x, t):
return [-100*x[0], -0.1*x[1]]
def sys2d_bad_jac(x, t):
return [[1.0/0, 0], [0, -0.1]]
assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
Dfun=sys2d_bad_jac)
def test_odeint_bad_shapes():
# Tests of some errors that can occur with odeint.
def badrhs(x, t):
return [1, -1]
def sys1(x, t):
return -100*x
def badjac(x, t):
return [[0, 0, 0]]
# y0 must be at most 1-d.
bad_y0 = [[0, 0], [0, 0]]
assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1])
# t must be at most 1-d.
bad_t = [[0, 1], [2, 3]]
assert_raises(ValueError, odeint, sys1, [10.0], bad_t)
# y0 is 10, but badrhs(x, t) returns [1, -1].
assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1])
# shape of array returned by badjac(x, t) is not correct.
assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac)
def test_repeated_t_values():
"""Regression test for gh-8217."""
def func(x, t):
return -0.25*x
t = np.zeros(10)
sol = odeint(func, [1.], t)
assert_array_equal(sol, np.ones((len(t), 1)))
tau = 4*np.log(2)
t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau]
sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12)
expected_sol = np.array([[1.0, 2.0]]*9 +
[[0.5, 1.0],
[0.25, 0.5],
[0.25, 0.5],
[0.125, 0.25]])
assert_allclose(sol, expected_sol)
# Edge case: empty t sequence.
sol = odeint(func, [1.], [])
assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1)))
# t values are not monotonic.
assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0])
assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3])
@@ -0,0 +1,793 @@
from __future__ import division, print_function, absolute_import
from itertools import product
from numpy.testing import (assert_, assert_allclose,
assert_equal, assert_no_warnings)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
import numpy as np
from scipy.optimize._numdiff import group_columns
from scipy.integrate import solve_ivp, RK23, RK45, Radau, BDF, LSODA
from scipy.integrate import OdeSolution
from scipy.integrate._ivp.common import num_jac
from scipy.integrate._ivp.base import ConstantDenseOutput
from scipy.sparse import coo_matrix, csc_matrix
def fun_linear(t, y):
return np.array([-y[0] - 5 * y[1], y[0] + y[1]])
def jac_linear():
return np.array([[-1, -5], [1, 1]])
def sol_linear(t):
return np.vstack((-5 * np.sin(2 * t),
2 * np.cos(2 * t) + np.sin(2 * t)))
def fun_rational(t, y):
return np.array([y[1] / t,
y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])
def fun_rational_vectorized(t, y):
return np.vstack((y[1] / t,
y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))
def jac_rational(t, y):
return np.array([
[0, 1 / t],
[-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
(y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
])
def jac_rational_sparse(t, y):
return csc_matrix([
[0, 1 / t],
[-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),
(y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]
])
def sol_rational(t):
return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))
def fun_medazko(t, y):
n = y.shape[0] // 2
k = 100
c = 4
phi = 2 if t <= 5 else 0
y = np.hstack((phi, 0, y, y[-2]))
d = 1 / n
j = np.arange(n) + 1
alpha = 2 * (j * d - 1) ** 3 / c ** 2
beta = (j * d - 1) ** 4 / c ** 2
j_2_p1 = 2 * j + 2
j_2_m3 = 2 * j - 2
j_2_m1 = 2 * j
j_2 = 2 * j + 1
f = np.empty(2 * n)
f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +
beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -
k * y[j_2_m1] * y[j_2])
f[1::2] = -k * y[j_2] * y[j_2_m1]
return f
def medazko_sparsity(n):
cols = []
rows = []
i = np.arange(n) * 2
cols.append(i[1:])
rows.append(i[1:] - 2)
cols.append(i)
rows.append(i)
cols.append(i)
rows.append(i + 1)
cols.append(i[:-1])
rows.append(i[:-1] + 2)
i = np.arange(n) * 2 + 1
cols.append(i)
rows.append(i)
cols.append(i)
rows.append(i - 1)
cols = np.hstack(cols)
rows = np.hstack(rows)
return coo_matrix((np.ones_like(cols), (cols, rows)))
def fun_complex(t, y):
return -y
def jac_complex(t, y):
return -np.eye(y.shape[0])
def jac_complex_sparse(t, y):
return csc_matrix(jac_complex(t, y))
def sol_complex(t):
y = (0.5 + 1j) * np.exp(-t)
return y.reshape((1, -1))
def compute_error(y, y_true, rtol, atol):
e = (y - y_true) / (atol + rtol * np.abs(y_true))
return np.sqrt(np.sum(np.real(e * e.conj()), axis=0) / e.shape[0])
def test_integration():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for vectorized, method, t_span, jac in product(
[False, True],
['RK23', 'RK45', 'Radau', 'BDF', 'LSODA'],
[[5, 9], [5, 1]],
[None, jac_rational, jac_rational_sparse]):
if vectorized:
fun = fun_rational_vectorized
else:
fun = fun_rational
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The following arguments have no effect for a chosen solver: `jac`")
res = solve_ivp(fun, t_span, y0, rtol=rtol,
atol=atol, method=method, dense_output=True,
jac=jac, vectorized=vectorized)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_(res.nfev < 40)
if method in ['RK23', 'RK45', 'LSODA']:
assert_equal(res.njev, 0)
assert_equal(res.nlu, 0)
else:
assert_(0 < res.njev < 3)
assert_(0 < res.nlu < 10)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
tc = (t_span[0] + t_span[-1]) / 2
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
# LSODA for some reasons doesn't pass the polynomial through the
# previous points exactly after the order change. It might be some
# bug in LSOSA implementation or maybe we missing something.
if method != 'LSODA':
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
def test_integration_complex():
rtol = 1e-3
atol = 1e-6
y0 = [0.5 + 1j]
t_span = [0, 1]
tc = np.linspace(t_span[0], t_span[1])
for method, jac in product(['RK23', 'RK45', 'BDF'],
[None, jac_complex, jac_complex_sparse]):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"The following arguments have no effect for a chosen solver: `jac`")
res = solve_ivp(fun_complex, t_span, y0, method=method,
dense_output=True, rtol=rtol, atol=atol, jac=jac)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_(res.nfev < 25)
if method == 'BDF':
assert_equal(res.njev, 1)
assert_(res.nlu < 6)
else:
assert_equal(res.njev, 0)
assert_equal(res.nlu, 0)
y_true = sol_complex(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
yc_true = sol_complex(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
def test_integration_sparse_difference():
n = 200
t_span = [0, 20]
y0 = np.zeros(2 * n)
y0[1::2] = 1
sparsity = medazko_sparsity(n)
for method in ['BDF', 'Radau']:
res = solve_ivp(fun_medazko, t_span, y0, method=method,
jac_sparsity=sparsity)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)
assert_allclose(res.y[79, -1], 0, atol=1e-3)
assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)
assert_allclose(res.y[149, -1], 0, atol=1e-3)
assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)
assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)
assert_allclose(res.y[238, -1], 0, atol=1e-3)
assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)
def test_integration_const_jac():
rtol = 1e-3
atol = 1e-6
y0 = [0, 2]
t_span = [0, 2]
J = jac_linear()
J_sparse = csc_matrix(J)
for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):
res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,
method=method, dense_output=True, jac=jac)
assert_equal(res.t[0], t_span[0])
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_(res.nfev < 100)
assert_equal(res.njev, 0)
assert_(0 < res.nlu < 15)
y_true = sol_linear(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 10))
tc = np.linspace(*t_span)
yc_true = sol_linear(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 15))
assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)
def test_events():
def event_rational_1(t, y):
return y[0] - y[1] ** 0.7
def event_rational_2(t, y):
return y[1] ** 0.6 - y[0]
def event_rational_3(t, y):
return t - 7.4
event_rational_3.terminal = True
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = 1
event_rational_2.direction = 1
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_(5.3 < res.t_events[0][0] < 5.7)
event_rational_1.direction = -1
event_rational_2.direction = -1
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = 0
event_rational_2.direction = 0
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=(event_rational_1, event_rational_2,
event_rational_3), dense_output=True)
assert_equal(res.status, 1)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_equal(res.t_events[2].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[2][0] < 7.5)
res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,
events=event_rational_1, dense_output=True)
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
# Also test that termination by event doesn't break interpolants.
tc = np.linspace(res.t[0], res.t[-1])
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, 1e-3, 1e-6)
assert_(np.all(e < 5))
# Test in backward direction.
event_rational_1.direction = 0
event_rational_2.direction = 0
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 1)
assert_(5.3 < res.t_events[0][0] < 5.7)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = -1
event_rational_2.direction = -1
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 1)
assert_equal(res.t_events[1].size, 0)
assert_(5.3 < res.t_events[0][0] < 5.7)
event_rational_1.direction = 1
event_rational_2.direction = 1
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2))
assert_equal(res.status, 0)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
event_rational_1.direction = 0
event_rational_2.direction = 0
res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,
events=(event_rational_1, event_rational_2,
event_rational_3), dense_output=True)
assert_equal(res.status, 1)
assert_equal(res.t_events[0].size, 0)
assert_equal(res.t_events[1].size, 1)
assert_equal(res.t_events[2].size, 1)
assert_(7.3 < res.t_events[1][0] < 7.7)
assert_(7.3 < res.t_events[2][0] < 7.5)
# Also test that termination by event doesn't break interpolants.
tc = np.linspace(res.t[-1], res.t[0])
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, 1e-3, 1e-6)
assert_(np.all(e < 5))
def test_max_step():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for method in [RK23, RK45, Radau, BDF, LSODA]:
for t_span in ([5, 9], [5, 1]):
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
max_step=0.5, atol=atol, method=method,
dense_output=True)
assert_equal(res.t[0], t_span[0])
assert_equal(res.t[-1], t_span[-1])
assert_(np.all(np.abs(np.diff(res.t)) <= 0.5))
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
# See comment in test_integration.
if method is not LSODA:
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
assert_raises(ValueError, method, fun_rational, t_span[0], y0,
t_span[1], max_step=-1)
if method is not LSODA:
solver = method(fun_rational, t_span[0], y0, t_span[1],
rtol=rtol, atol=atol, max_step=1e-20)
message = solver.step()
assert_equal(solver.status, 'failed')
assert_("step size is less" in message)
assert_raises(RuntimeError, solver.step)
def test_first_step():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
first_step = 0.1
for method in [RK23, RK45, Radau, BDF, LSODA]:
for t_span in ([5, 9], [5, 1]):
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,
max_step=0.5, atol=atol, method=method,
dense_output=True, first_step=first_step)
assert_equal(res.t[0], t_span[0])
assert_equal(res.t[-1], t_span[-1])
assert_allclose(first_step, np.abs(res.t[1] - 5))
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
tc = np.linspace(*t_span)
yc_true = sol_rational(tc)
yc = res.sol(tc)
e = compute_error(yc, yc_true, rtol, atol)
assert_(np.all(e < 5))
# See comment in test_integration.
if method is not LSODA:
assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
assert_raises(ValueError, method, fun_rational, t_span[0], y0,
t_span[1], first_step=-1)
assert_raises(ValueError, method, fun_rational, t_span[0], y0,
t_span[1], first_step=5)
def test_t_eval():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
for t_span in ([5, 9], [5, 1]):
t_eval = np.linspace(t_span[0], t_span[1], 10)
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [5, 5.01, 7, 8, 8.01, 9]
res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]
res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
t_eval = [5.01, 7, 8, 8.01]
res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
t_eval = [4.99, 3, 1.5, 1.1, 1.01]
res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,
t_eval=t_eval)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
t_eval = [4, 6]
assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,
rtol=rtol, atol=atol, t_eval=t_eval)
def test_t_eval_dense_output():
rtol = 1e-3
atol = 1e-6
y0 = [1/3, 2/9]
t_span = [5, 9]
t_eval = np.linspace(t_span[0], t_span[1], 10)
res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
t_eval=t_eval)
res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,
t_eval=t_eval, dense_output=True)
assert_equal(res.t, t_eval)
assert_(res.t_events is None)
assert_(res.success)
assert_equal(res.status, 0)
assert_equal(res.t, res_d.t)
assert_equal(res.y, res_d.y)
assert_(res_d.t_events is None)
assert_(res_d.success)
assert_equal(res_d.status, 0)
# if t and y are equal only test values for one case
y_true = sol_rational(res.t)
e = compute_error(res.y, y_true, rtol, atol)
assert_(np.all(e < 5))
def test_no_integration():
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],
method=method, dense_output=True)
assert_equal(sol.sol(4), [2, 3])
assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])
def test_no_integration_class():
for method in [RK23, RK45, Radau, BDF, LSODA]:
solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)
solver.step()
assert_equal(solver.status, 'finished')
sol = solver.dense_output()
assert_equal(sol(0.0), [10.0, 0.0])
assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])
solver = method(lambda t, y: -y, 0.0, [], np.inf)
solver.step()
assert_equal(solver.status, 'finished')
sol = solver.dense_output()
assert_equal(sol(100.0), [])
assert_equal(sol([0, 1, 2]), np.empty((0, 3)))
def test_empty():
def fun(t, y):
return np.zeros((0,))
y0 = np.zeros((0,))
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,
method=method, dense_output=True)
assert_equal(sol.sol(10), np.zeros((0,)))
assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
for method in ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA']:
sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,
method=method, dense_output=True)
assert_equal(sol.sol(10), np.zeros((0,)))
assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))
def test_ConstantDenseOutput():
sol = ConstantDenseOutput(0, 1, np.array([1, 2]))
assert_allclose(sol(1.5), [1, 2])
assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])
sol = ConstantDenseOutput(0, 1, np.array([]))
assert_allclose(sol(1.5), np.empty(0))
assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))
def test_classes():
y0 = [1 / 3, 2 / 9]
for cls in [RK23, RK45, Radau, BDF, LSODA]:
solver = cls(fun_rational, 5, y0, np.inf)
assert_equal(solver.n, 2)
assert_equal(solver.status, 'running')
assert_equal(solver.t_bound, np.inf)
assert_equal(solver.direction, 1)
assert_equal(solver.t, 5)
assert_equal(solver.y, y0)
assert_(solver.step_size is None)
if cls is not LSODA:
assert_(solver.nfev > 0)
assert_(solver.njev >= 0)
assert_equal(solver.nlu, 0)
else:
assert_equal(solver.nfev, 0)
assert_equal(solver.njev, 0)
assert_equal(solver.nlu, 0)
assert_raises(RuntimeError, solver.dense_output)
message = solver.step()
assert_equal(solver.status, 'running')
assert_equal(message, None)
assert_equal(solver.n, 2)
assert_equal(solver.t_bound, np.inf)
assert_equal(solver.direction, 1)
assert_(solver.t > 5)
assert_(not np.all(np.equal(solver.y, y0)))
assert_(solver.step_size > 0)
assert_(solver.nfev > 0)
assert_(solver.njev >= 0)
assert_(solver.nlu >= 0)
sol = solver.dense_output()
assert_allclose(sol(5), y0, rtol=1e-15, atol=0)
def test_OdeSolution():
ts = np.array([0, 2, 5], dtype=float)
s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
sol = OdeSolution(ts, [s1, s2])
assert_equal(sol(-1), [-1])
assert_equal(sol(1), [-1])
assert_equal(sol(2), [-1])
assert_equal(sol(3), [1])
assert_equal(sol(5), [1])
assert_equal(sol(6), [1])
assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),
np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))
ts = np.array([10, 4, -3])
s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))
s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))
sol = OdeSolution(ts, [s1, s2])
assert_equal(sol(11), [-1])
assert_equal(sol(10), [-1])
assert_equal(sol(5), [-1])
assert_equal(sol(4), [-1])
assert_equal(sol(0), [1])
assert_equal(sol(-3), [1])
assert_equal(sol(-4), [1])
assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),
np.array([[-1, 1, -1, 1, -1, 1, -1]]))
ts = np.array([1, 1])
s = ConstantDenseOutput(1, 1, np.array([10]))
sol = OdeSolution(ts, [s])
assert_equal(sol(0), [10])
assert_equal(sol(1), [10])
assert_equal(sol(2), [10])
assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))
def test_num_jac():
def fun(t, y):
return np.vstack([
-0.04 * y[0] + 1e4 * y[1] * y[2],
0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,
3e7 * y[1] ** 2
])
def jac(t, y):
return np.array([
[-0.04, 1e4 * y[2], 1e4 * y[1]],
[0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],
[0, 6e7 * y[1], 0]
])
t = 1
y = np.array([1, 0, 0])
J_true = jac(t, y)
threshold = 1e-5
f = fun(t, y).ravel()
J_num, factor = num_jac(fun, t, y, f, threshold, None)
assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
J_num, factor = num_jac(fun, t, y, f, threshold, factor)
assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)
def test_num_jac_sparse():
def fun(t, y):
e = y[1:]**3 - y[:-1]**2
z = np.zeros(y.shape[1])
return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))
def structure(n):
A = np.zeros((n, n), dtype=int)
A[0, 0] = 1
A[0, 1] = 1
for i in range(1, n - 1):
A[i, i - 1: i + 2] = 1
A[-1, -1] = 1
A[-1, -2] = 1
return A
np.random.seed(0)
n = 20
y = np.random.randn(n)
A = structure(n)
groups = group_columns(A)
f = fun(0, y[:, None]).ravel()
# Compare dense and sparse results, assuming that dense implementation
# is correct (as it is straightforward).
J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,
sparsity=(A, groups))
J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)
assert_allclose(J_num_dense, J_num_sparse.toarray(),
rtol=1e-12, atol=1e-14)
assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
# Take small factors to trigger their recomputing inside.
factor = np.random.uniform(0, 1e-12, size=n)
J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,
sparsity=(A, groups))
J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)
assert_allclose(J_num_dense, J_num_sparse.toarray(),
rtol=1e-12, atol=1e-14)
assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)
@@ -0,0 +1,75 @@
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy.integrate import odeint
import scipy.integrate._test_odeint_banded as banded5x5
def rhs(y, t):
dydt = np.zeros_like(y)
banded5x5.banded5x5(t, y, dydt)
return dydt
def jac(y, t):
n = len(y)
jac = np.zeros((n, n), order='F')
banded5x5.banded5x5_jac(t, y, 1, 1, jac)
return jac
def bjac(y, t):
n = len(y)
bjac = np.zeros((4, n), order='F')
banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
return bjac
JACTYPE_FULL = 1
JACTYPE_BANDED = 4
def check_odeint(jactype):
if jactype == JACTYPE_FULL:
ml = None
mu = None
jacobian = jac
elif jactype == JACTYPE_BANDED:
ml = 2
mu = 1
jacobian = bjac
else:
raise ValueError("invalid jactype: %r" % (jactype,))
y0 = np.arange(1.0, 6.0)
# These tolerances must match the tolerances used in banded5x5.f.
rtol = 1e-11
atol = 1e-13
dt = 0.125
nsteps = 64
t = dt * np.arange(nsteps+1)
sol, info = odeint(rhs, y0, t,
Dfun=jacobian, ml=ml, mu=mu,
atol=atol, rtol=rtol, full_output=True)
yfinal = sol[-1]
odeint_nst = info['nst'][-1]
odeint_nfe = info['nfe'][-1]
odeint_nje = info['nje'][-1]
y1 = y0.copy()
# Pure Fortran solution. y1 is modified in-place.
nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
# It is likely that yfinal and y1 are *exactly* the same, but
# we'll be cautious and use assert_allclose.
assert_allclose(yfinal, y1, rtol=1e-12)
assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
def test_odeint_full_jac():
check_odeint(JACTYPE_FULL)
def test_odeint_banded_jac():
check_odeint(JACTYPE_BANDED)
@@ -0,0 +1,418 @@
from __future__ import division, print_function, absolute_import
import sys
import math
import numpy as np
from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf
from numpy.testing import (assert_,
assert_allclose, assert_array_less, assert_almost_equal)
import pytest
from pytest import raises as assert_raises
from scipy.integrate import quad, dblquad, tplquad, nquad
from scipy._lib.six import xrange
from scipy._lib._ccallback import LowLevelCallable
import ctypes
import ctypes.util
from scipy._lib._ccallback_c import sine_ctypes
import scipy.integrate._test_multivariate as clib_test
def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if errTol is not None:
assert_array_less(err, errTol)
def get_clib_test_routine(name, restype, *argtypes):
ptr = getattr(clib_test, name)
return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
class TestCtypesQuad(object):
def setup_method(self):
if sys.platform == 'win32':
if sys.version_info < (3, 5):
files = [ctypes.util.find_msvcrt()]
else:
files = ['api-ms-win-crt-math-l1-1-0.dll']
elif sys.platform == 'darwin':
files = ['libm.dylib']
else:
files = ['libm.so', 'libm.so.6']
for file in files:
try:
self.lib = ctypes.CDLL(file)
break
except OSError:
pass
else:
# This test doesn't work on some Linux platforms (Fedora for
# example) that put an ld script in libm.so - see gh-5370
self.skipTest("Ctypes can't import libm.so")
restype = ctypes.c_double
argtypes = (ctypes.c_double,)
for name in ['sin', 'cos', 'tan']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
def test_typical(self):
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
def test_ctypes_sine(self):
quad(LowLevelCallable(sine_ctypes), 0, 1)
def test_ctypes_variants(self):
sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
ctypes.c_double, ctypes.c_void_p)
sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double),
ctypes.c_void_p)
sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
ctypes.c_double)
sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double))
sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.c_double)
all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
legacy_sigs = [sin_2, sin_4]
legacy_only_sigs = [sin_4]
# LowLevelCallables work for new signatures
for j, func in enumerate(all_sigs):
callback = LowLevelCallable(func)
if func in legacy_only_sigs:
assert_raises(ValueError, quad, callback, 0, pi)
else:
assert_allclose(quad(callback, 0, pi)[0], 2.0)
# Plain ctypes items work only for legacy signatures
for j, func in enumerate(legacy_sigs):
if func in legacy_sigs:
assert_allclose(quad(func, 0, pi)[0], 2.0)
else:
assert_raises(ValueError, quad, func, 0, pi)
class TestMultivariateCtypesQuad(object):
def setup_method(self):
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = get_clib_test_routine(name, restype, *argtypes)
setattr(self, name, func)
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self._multivariate_indefinite, 0, Inf),
0.577215664901532860606512)
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
class TestQuad(object):
def test_typical(self):
# 1) Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
def test_singular(self):
# 3) Singular points in region of integration.
def myfunc(x):
if 0 < x < 2.5:
return sin(x)
elif 2.5 <= x <= 5.0:
return exp(-x)
else:
return 0.0
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
def test_sine_weighted_finite(self):
# 4) Sine weighted integral (finite limits)
def myfunc(x, a):
return exp(a*(x-1))
ome = 2.0**3.4
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
(20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
def test_sine_weighted_infinite(self):
# 5) Sine weighted integral (infinite limits)
def myfunc(x, a):
return exp(-x*a)
a = 4.0
ome = 3.0
assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
ome/(a**2 + ome**2))
def test_cosine_weighted_infinite(self):
# 6) Cosine weighted integral (negative infinite limits)
def myfunc(x, a):
return exp(x*a)
a = 2.5
ome = 2.3
assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
a/(a**2 + ome**2))
def test_algebraic_log_weight(self):
# 6) Algebraic-logarithmic weight.
def myfunc(x, a):
return 1/(1+x+2**(-a))
a = 1.5
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
wvar=(-0.5, -0.5)),
pi/sqrt((1+2**(-a))**2 - 1))
def test_cauchypv_weight(self):
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
def myfunc(x, a):
return 2.0**(-a)/((x-1)**2+4.0**(-a))
a = 0.4
tabledValue = ((2.0**(-0.4)*log(1.5) -
2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
arctan(2.0**(a+2)) -
arctan(2.0**a)) /
(4.0**(-a) + 1))
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
tabledValue, errTol=1.9e-8)
def test_b_less_than_a(self):
def f(x, p, q):
return p * np.exp(-q*x)
val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_2(self):
def f(x, s):
return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_3(self):
def f(x):
return 1.0
val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_full_output(self):
def f(x):
return 1.0
res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
err = max(res_1[1], res_2[1])
assert_allclose(res_1[0], -res_2[0], atol=err)
def test_double_integral(self):
# 8) Double Integral test
def simpfunc(y, x): # Note order of arguments.
return x+y
a, b = 1.0, 2.0
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
5/6.0 * (b**3.0-a**3.0))
def test_double_integral2(self):
def func(x0, x1, t0, t1):
return x0 + x1 + t0 + t1
g = lambda x: x
h = lambda x: 2 * x
args = 1, 2
assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
def test_double_integral3(self):
def func(x0, x1):
return x0 + x1 + 1 + 2
assert_quad(dblquad(func, 1, 2, 1, 2),6.)
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x, t): # Note order of arguments.
return (x+y+z)*t
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2*x,
lambda x, y: x - y, lambda x, y: x + y,
(2.,)),
2*8/3.0 * (b**4.0 - a**4.0))
class TestNQuad(object):
def test_fixed_limits(self):
def func1(x0, x1, x2, x3):
val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
(1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
return val
def opts_basic(*args):
return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
opts=[opts_basic, {}, {}, {}], full_output=True)
assert_quad(res[:-1], 1.5267454070738635)
assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
def test_variable_limits(self):
scale = .1
def func2(x0, x1, x2, x3, t0, t1):
val = (x0*x1*x3**2 + np.sin(x2) + 1 +
(1 if x0 + t1*x1 - t0 > 0 else 0))
return val
def lim0(x1, x2, x3, t0, t1):
return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
def lim1(x2, x3, t0, t1):
return [scale * (t0*x2 + t1*x3) - 1,
scale * (t0*x2 + t1*x3) + 1]
def lim2(x3, t0, t1):
return [scale * (x3 + t0**2*t1**3) - 1,
scale * (x3 + t0**2*t1**3) + 1]
def lim3(t0, t1):
return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
def opts0(x1, x2, x3, t0, t1):
return {'points': [t0 - t1*x1]}
def opts1(x2, x3, t0, t1):
return {}
def opts2(x3, t0, t1):
return {}
def opts3(t0, t1):
return {}
res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
opts=[opts0, opts1, opts2, opts3])
assert_quad(res, 25.066666666666663)
def test_square_separate_ranges_and_opts(self):
def f(y, x):
return 1.0
assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
def test_square_aliased_ranges_and_opts(self):
def f(y, x):
return 1.0
r = [-1, 1]
opt = {}
assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
def test_square_separate_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range0(*args):
return (-1, 1)
def fn_range1(*args):
return (-1, 1)
def fn_opt0(*args):
return {}
def fn_opt1(*args):
return {}
ranges = [fn_range0, fn_range1]
opts = [fn_opt0, fn_opt1]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_square_aliased_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range(*args):
return (-1, 1)
def fn_opt(*args):
return {}
ranges = [fn_range, fn_range]
opts = [fn_opt, fn_opt]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_matching_quad(self):
def func(x):
return x**2 + 1
res, reserr = quad(func, 0, 4)
res2, reserr2 = nquad(func, ranges=[[0, 4]])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_dblquad(self):
def func2d(x0, x1):
return x0**2 + x1**3 - x0 * x1 + 1
res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_tplquad(self):
def func3d(x0, x1, x2, c0, c1):
return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
lambda x, y: -np.pi, lambda x, y: np.pi,
args=(2, 3))
res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
assert_almost_equal(res, res2)
def test_dict_as_opts(self):
try:
out = nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
except(TypeError):
assert False
@@ -0,0 +1,233 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import cos, sin, pi
from numpy.testing import assert_equal, \
assert_almost_equal, assert_allclose, assert_
from scipy._lib._numpy_compat import suppress_warnings
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
cumtrapz, quad, simps, fixed_quad)
from scipy.integrate.quadrature import AccuracyWarning
class TestFixedQuad(object):
def test_scalar(self):
n = 4
func = lambda x: x**(2*n - 1)
expected = 1/(2*n)
got, _ = fixed_quad(func, 0, 1, n=n)
# quadrature exact for this input
assert_allclose(got, expected, rtol=1e-12)
def test_vector(self):
n = 4
p = np.arange(1, 2*n)
func = lambda x: x**p[:,None]
expected = 1/(p + 1)
got, _ = fixed_quad(func, 0, 1, n=n)
assert_allclose(got, expected, rtol=1e-12)
class TestQuadrature(object):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_quadrature_rtol(self):
def myfunc(x, n, z): # Bessel function integrand
return 1e90 * cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_quadrature_miniter(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
table_val = 0.30614353532540296487
for miniter in [5, 52]:
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
assert_almost_equal(val, table_val, decimal=7)
assert_(err < 1.0)
def test_quadrature_single_args(self):
def myfunc(x, n):
return 1e90 * cos(n*x-1.8*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg_rtol(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return 1e19*cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
table_val = 1e19*0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romb(self):
assert_equal(romb(np.arange(17)), 128)
def test_romb_gh_3731(self):
# Check that romb makes maximal use of data points
x = np.arange(2**4+1)
y = np.cos(0.2*x)
val = romb(y)
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
assert_allclose(val, val2, rtol=1e-8, atol=0)
# should be equal to romb with 2**k+1 samples
with suppress_warnings() as sup:
sup.filter(AccuracyWarning, "divmax .4. exceeded")
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*np.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
def test_newton_cotes2(self):
"""Test newton_cotes with points that are not evenly spaced."""
x = np.array([0.0, 1.5, 2.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 8.0/3
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
x = np.array([0.0, 1.4, 2.1, 3.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 9.0
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
def test_simps(self):
y = np.arange(17)
assert_equal(simps(y), 128)
assert_equal(simps(y, dx=0.5), 64)
assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(simps(y, x=x, even='avg'), 13.875)
assert_equal(simps(y, x=x, even='first'), 13.75)
assert_equal(simps(y, x=x, even='last'), 14)
class TestCumtrapz(object):
def test_1d(self):
x = np.linspace(-2, 2, num=5)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = [0., -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, x, initial=None)
assert_allclose(y_int, y_expected[1:])
def test_y_nd_x_nd(self):
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = np.array([[[0., 0.5, 2., 4.5],
[0., 4.5, 10., 16.5]],
[[0., 8.5, 18., 28.5],
[0., 12.5, 26., 40.5]],
[[0., 16.5, 34., 52.5],
[0., 20.5, 42., 64.5]]])
assert_allclose(y_int, y_expected)
# Try with all axes
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
for axis, shape in zip([0, 1, 2], shapes):
y_int = cumtrapz(y, x, initial=3.45, axis=axis)
assert_equal(y_int.shape, (3, 2, 4))
y_int = cumtrapz(y, x, initial=None, axis=axis)
assert_equal(y_int.shape, shape)
def test_y_nd_x_1d(self):
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
x = np.arange(4)**2
# Try with all axes
ys_expected = (
np.array([[[4., 5., 6., 7.],
[8., 9., 10., 11.]],
[[40., 44., 48., 52.],
[56., 60., 64., 68.]]]),
np.array([[[2., 3., 4., 5.]],
[[10., 11., 12., 13.]],
[[18., 19., 20., 21.]]]),
np.array([[[0.5, 5., 17.5],
[4.5, 21., 53.5]],
[[8.5, 37., 89.5],
[12.5, 53., 125.5]],
[[16.5, 69., 161.5],
[20.5, 85., 197.5]]]))
for axis, y_expected in zip([0, 1, 2], ys_expected):
y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None)
assert_allclose(y_int, y_expected)
def test_x_none(self):
y = np.linspace(-2, 2, num=5)
y_int = cumtrapz(y)
y_expected = [-1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, initial=1.23)
y_expected = [1.23, -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3)
y_expected = [-4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3, initial=1.23)
y_expected = [1.23, -4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)