pruned venvs

This commit is contained in:
d3m1g0d
2019-03-12 21:57:16 +01:00
parent 33f0511081
commit e441f4f7f7
5988 changed files with 0 additions and 1353666 deletions
@@ -1,668 +0,0 @@
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are
noted.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings or raise
exceptions when an error occurs. By default this is disabled; to
query and control the current error handling state the following
functions are provided.
.. autosummary::
:toctree: generated/
geterr -- Get the current way of handling special-function errors.
seterr -- Set how special-function errors are handled.
errstate -- Context manager for special-function error handling.
SpecialFunctionWarning -- Warning that can be emitted by special functions.
SpecialFunctionError -- Exception that can be raised by special functions.
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- [+]Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- [+]Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1
ellipkinc -- Incomplete elliptic integral of the first kind
ellipe -- Complete elliptic integral of the second kind
ellipeinc -- Incomplete elliptic integral of the second kind
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and complex argument.
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and real argument.
yv -- Bessel function of the second kind of real order and complex argument.
yve -- Exponentially scaled Bessel function of the second kind of real order.
kn -- Modified Bessel function of the second kind of integer order `n`
kv -- Modified Bessel function of the second kind of real order `v`
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the first kind
hankel1 -- Hankel function of the first kind
hankel1e -- Exponentially scaled Hankel function of the first kind
hankel2 -- Hankel function of the second kind
hankel2e -- Exponentially scaled Hankel function of the second kind
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- [+]Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- [+]Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- [+]Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- [+]Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- [+]Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- [+]Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0
it2j0y0 -- Integrals related to Bessel functions of order 0
iti0k0 -- Integrals of modified Bessel functions of order 0
it2i0k0 -- Integrals related to modified Bessel functions of order 0
besselpoly -- [+]Weighted integral of a Bessel function.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- [+]Compute Ricatti-Bessel function of the second kind and its derivative.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
btdtr -- Cumulative density function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x)
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd
gdtr -- Gamma distribution cumulative density function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
pdtr -- Poisson cumulative distribution function
pdtrc -- Poisson survival function
pdtri -- Inverse to `pdtr` vs m
pdtrik -- Inverse to `pdtr` vs k
stdtr -- Student t distribution cumulative density function
stdtridf -- Inverse of `stdtr` vs df
stdtrit -- Inverse of `stdtr` vs `t`
chdtr -- Chi square cumulative distribution function
chdtrc -- Chi square survival function
chdtri -- Inverse to `chdtrc`
chdtriv -- Inverse to `chdtr` vs `v`
ndtr -- Gaussian cumulative distribution function.
log_ndtr -- Logarithm of Gaussian cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x
chndtr -- Non-central chi square cumulative distribution function
chndtridf -- Inverse to `chndtr` vs `df`
chndtrinc -- Inverse to `chndtr` vs `nc`
chndtrix -- Inverse to `chndtr` vs `x`
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function
smirnovi -- Inverse to `smirnov`
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution
kolmogi -- Inverse function to `kolmogorov`
tklmbda -- Tukey-Lambda cumulative distribution function
logit -- Logit ufunc for ndarrays.
expit -- Expit ufunc for ndarrays.
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
owens_t -- Owen's T Function.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out])
poch -- Rising factorial (z)_m
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals
modfresnelm -- Modified Fresnel negative integrals
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- [+]Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- [+]Compute nt complex zeros of sine Fresnel integral S(z).
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre function of the first kind for complex arguments.
lpn -- [+]Legendre function of the first kind.
lqn -- [+]Legendre function of the second kind.
lpmn -- [+]Sequence of associated Legendre functions of the first kind.
lqmn -- [+]Sequence of associated Legendre functions of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l)
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l)
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial.
chebyt -- [+]Chebyshev polynomial of the first kind.
chebyu -- [+]Chebyshev polynomial of the second kind.
chebyc -- [+]Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- [+]Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- [+]Jacobi polynomial.
laguerre -- [+]Laguerre polynomial.
genlaguerre -- [+]Generalized (associated) Laguerre polynomial.
hermite -- [+]Physicist's Hermite polynomial.
hermitenorm -- [+]Normalized (probabilist's) Hermite polynomial.
gegenbauer -- [+]Gegenbauer (ultraspherical) polynomial.
sh_legendre -- [+]Shifted Legendre polynomial.
sh_chebyt -- [+]Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- [+]Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- [+]Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x)
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind
hyp0f1 -- Confluent hypergeometric limit function 0F1.
hyp2f0 -- Hypergeometric function 2F0 in y and an error estimate
hyp1f2 -- Hypergeometric function 1F2 and error estimate
hyp3f0 -- Hypergeometric function 3F0 in y and an error estimate
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D
pbvv -- Parabolic cylinder function V
pbwa -- Parabolic cylinder function W
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- [+]Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- [+]Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions
mathieu_b -- Characteristic value of odd Mathieu functions
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative
mathieu_sem -- Odd Mathieu function and its derivative
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function
obl_cv -- Characteristic value of oblate spheroidal function
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers
kelvin_zeros -- [+]Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`
beip -- Derivative of the Kelvin function `bei`
ker -- Kelvin function ker
kei -- Kelvin function ker
kerp -- Derivative of the Kelvin function ker
keip -- Derivative of the Kelvin function kei
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- [+]Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- [+]Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- [+]Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- [+]Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- [+]Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- [+]Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- [+]Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]The number of combinations of N things taken k at a time.
perm -- [+]Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and Related Functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n
exp1 -- Exponential integral E_1 of complex argument z
expi -- Exponential integral Ei
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- [+]Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
softmax -- Softmax function.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`
exp10 -- 10**x
exp2 -- 2**x
radian -- Convert from degrees to radians
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero
expm1 -- exp(x) - 1 for use when `x` is near zero.
cosm1 -- cos(x) - 1 for use when `x` is near zero.
round -- Round to nearest integer
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
logsumexp -- Compute the log of the sum of exponentials of input elements.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from .sf_error import SpecialFunctionWarning, SpecialFunctionError
from ._ufuncs import *
from .basic import *
from ._logsumexp import logsumexp, softmax
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,
spherical_kn)
from numpy import deprecate
hyp2f0 = deprecate(hyp2f0, message="hyp2f0 is deprecated in SciPy 1.2")
hyp1f2 = deprecate(hyp1f2, message="hyp1f2 is deprecated in SciPy 1.2")
hyp3f0 = deprecate(hyp3f0, message="hyp3f0 is deprecated in SciPy 1.2")
del deprecate
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
@@ -1,209 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from ._ufuncs import _ellip_harm
from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm
def ellip_harm(h2, k2, n, p, s, signm=1, signn=1):
r"""
Ellipsoidal harmonic functions E^p_n(l)
These are also known as Lame functions of the first kind, and are
solutions to the Lame equation:
.. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0
where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not
returned) corresponding to the solutions.
Parameters
----------
h2 : float
``h**2``
k2 : float
``k**2``; should be larger than ``h**2``
n : int
Degree
s : float
Coordinate
p : int
Order, can range between [1,2n+1]
signm : {1, -1}, optional
Sign of prefactor of functions. Can be +/-1. See Notes.
signn : {1, -1}, optional
Sign of prefactor of functions. Can be +/-1. See Notes.
Returns
-------
E : float
the harmonic :math:`E^p_n(s)`
See Also
--------
ellip_harm_2, ellip_normal
Notes
-----
The geometric interpretation of the ellipsoidal functions is
explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the
sign of prefactors for functions according to their type::
K : +1
L : signm
M : signn
N : signm*signn
.. versionadded:: 0.15.0
References
----------
.. [1] Digital Library of Mathematical Functions 29.12
https://dlmf.nist.gov/29.12
.. [2] Bardhan and Knepley, "Computational science and
re-discovery: open-source implementations of
ellipsoidal harmonics for problems in potential theory",
Comput. Sci. Disc. 5, 014006 (2012)
:doi:`10.1088/1749-4699/5/1/014006`.
.. [3] David J.and Dechambre P, "Computation of Ellipsoidal
Gravity Field Harmonics for small solar system bodies"
pp. 30-36, 2000
.. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications"
pp. 418, 2012
Examples
--------
>>> from scipy.special import ellip_harm
>>> w = ellip_harm(5,8,1,1,2.5)
>>> w
2.5
Check that the functions indeed are solutions to the Lame equation:
>>> from scipy.interpolate import UnivariateSpline
>>> def eigenvalue(f, df, ddf):
... r = ((s**2 - h**2)*(s**2 - k**2)*ddf + s*(2*s**2 - h**2 - k**2)*df - n*(n+1)*s**2*f)/f
... return -r.mean(), r.std()
>>> s = np.linspace(0.1, 10, 200)
>>> k, h, n, p = 8.0, 2.2, 3, 2
>>> E = ellip_harm(h**2, k**2, n, p, s)
>>> E_spl = UnivariateSpline(s, E)
>>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2))
>>> a, a_err
(583.44366156701483, 6.4580890640310646e-11)
"""
return _ellip_harm(h2, k2, n, p, s, signm, signn)
_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d')
def ellip_harm_2(h2, k2, n, p, s):
r"""
Ellipsoidal harmonic functions F^p_n(l)
These are also known as Lame functions of the second kind, and are
solutions to the Lame equation:
.. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0
where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not
returned) corresponding to the solutions.
Parameters
----------
h2 : float
``h**2``
k2 : float
``k**2``; should be larger than ``h**2``
n : int
Degree.
p : int
Order, can range between [1,2n+1].
s : float
Coordinate
Returns
-------
F : float
The harmonic :math:`F^p_n(s)`
Notes
-----
Lame functions of the second kind are related to the functions of the first kind:
.. math::
F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s}\frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}}
.. versionadded:: 0.15.0
See Also
--------
ellip_harm, ellip_normal
Examples
--------
>>> from scipy.special import ellip_harm_2
>>> w = ellip_harm_2(5,8,2,1,10)
>>> w
0.00108056853382
"""
with np.errstate(all='ignore'):
return _ellip_harm_2_vec(h2, k2, n, p, s)
def _ellip_normal_vec(h2, k2, n, p):
return _ellipsoid_norm(h2, k2, n, p)
_ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d')
def ellip_normal(h2, k2, n, p):
r"""
Ellipsoidal harmonic normalization constants gamma^p_n
The normalization constant is defined as
.. math::
\gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy\frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)}
Parameters
----------
h2 : float
``h**2``
k2 : float
``k**2``; should be larger than ``h**2``
n : int
Degree.
p : int
Order, can range between [1,2n+1].
Returns
-------
gamma : float
The normalization constant :math:`\gamma^p_n`
See Also
--------
ellip_harm, ellip_harm_2
Notes
-----
.. versionadded:: 0.15.0
Examples
--------
>>> from scipy.special import ellip_normal
>>> w = ellip_normal(5,8,3,7)
>>> w
1723.38796997
"""
with np.errstate(all='ignore'):
return _ellip_normal_vec(h2, k2, n, p)
File diff suppressed because it is too large Load Diff
@@ -1,215 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib._util import _asarray_validated
__all__ = ["logsumexp", "softmax"]
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
.. versionadded:: 0.11.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
.. versionadded:: 0.15.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`. These values may be negative in order to
implement subtraction.
.. versionadded:: 0.12.0
return_sign : bool, optional
If this is set to True, the result will be a pair containing sign
information; if False, results that are negative will be returned
as NaN. Default is False (no sign information).
.. versionadded:: 0.16.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
sgn : ndarray
If return_sign is True, this will be an array of floating-point
numbers matching res and +1, 0, or -1 depending on the sign
of the result. If False, only one result is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> from scipy.special import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
Returning a sign flag
>>> logsumexp([1,2],b=[1,-1],return_sign=True)
(1.5413248546129181, -1.0)
Notice that `logsumexp` does not directly support masked arrays. To use it
on a masked array, convert the mask into zero weights:
>>> a = np.ma.array([np.log(2), 2, np.log(3)],
... mask=[False, True, False])
>>> b = (~a.mask).astype(int)
>>> logsumexp(a.data, b=b), np.log(5)
1.6094379124341005, 1.6094379124341005
"""
a = _asarray_validated(a, check_finite=False)
if b is not None:
a, b = np.broadcast_arrays(a, b)
if np.any(b == 0):
a = a + 0. # promote to at least float
a[b == 0] = -np.inf
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
if b is not None:
b = np.asarray(b)
tmp = b * np.exp(a - a_max)
else:
tmp = np.exp(a - a_max)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = np.sum(tmp, axis=axis, keepdims=keepdims)
if return_sign:
sgn = np.sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = np.log(s)
if not keepdims:
a_max = np.squeeze(a_max, axis=axis)
out += a_max
if return_sign:
return out, sgn
else:
return out
def softmax(x, axis=None):
r"""
Softmax function
The softmax function transforms each element of a collection by
computing the exponential of each element divided by the sum of the
exponentials of all the elements. That is, if `x` is a one-dimensional
numpy array::
softmax(x) = np.exp(x)/sum(np.exp(x))
Parameters
----------
x : array_like
Input array.
axis : int or tuple of ints, optional
Axis to compute values along. Default is None and softmax will be
computed over the entire array `x`.
Returns
-------
s : ndarray
An array the same shape as `x`. The result will sum to 1 along the
specified axis.
Notes
-----
The formula for the softmax function :math:`\sigma(x)` for a vector
:math:`x = \{x_0, x_1, ..., x_{n-1}\}` is
.. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}}
The `softmax` function is the gradient of `logsumexp`.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy.special import softmax
>>> np.set_printoptions(precision=5)
>>> x = np.array([[1, 0.5, 0.2, 3],
... [1, -1, 7, 3],
... [2, 12, 13, 3]])
...
Compute the softmax transformation over the entire array.
>>> m = softmax(x)
>>> m
array([[ 4.48309e-06, 2.71913e-06, 2.01438e-06, 3.31258e-05],
[ 4.48309e-06, 6.06720e-07, 1.80861e-03, 3.31258e-05],
[ 1.21863e-05, 2.68421e-01, 7.29644e-01, 3.31258e-05]])
>>> m.sum()
1.0000000000000002
Compute the softmax transformation along the first axis (i.e. the columns).
>>> m = softmax(x, axis=0)
>>> m
array([[ 2.11942e-01, 1.01300e-05, 2.75394e-06, 3.33333e-01],
[ 2.11942e-01, 2.26030e-06, 2.47262e-03, 3.33333e-01],
[ 5.76117e-01, 9.99988e-01, 9.97525e-01, 3.33333e-01]])
>>> m.sum(axis=0)
array([ 1., 1., 1., 1.])
Compute the softmax transformation along the second axis (i.e. the rows).
>>> m = softmax(x, axis=1)
>>> m
array([[ 1.05877e-01, 6.42177e-02, 4.75736e-02, 7.82332e-01],
[ 2.42746e-03, 3.28521e-04, 9.79307e-01, 1.79366e-02],
[ 1.22094e-05, 2.68929e-01, 7.31025e-01, 3.31885e-05]])
>>> m.sum(axis=1)
array([ 1., 1., 1.])
"""
# compute in log space for numerical stability
return np.exp(x - logsumexp(x, axis=axis, keepdims=True))
@@ -1,455 +0,0 @@
from __future__ import division, print_function, absolute_import
import os
import sys
import time
import numpy as np
from numpy.testing import assert_
import pytest
from scipy._lib.six import reraise
from scipy.special._testutils import assert_func_equal
try:
import mpmath
except ImportError:
pass
# ------------------------------------------------------------------------------
# Machinery for systematic tests with mpmath
# ------------------------------------------------------------------------------
class Arg(object):
"""Generate a set of numbers on the real axis, concentrating on
'interesting' regions and covering all orders of magnitude.
"""
def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True):
if a > b:
raise ValueError("a should be less than or equal to b")
if a == -np.inf:
a = -0.5*np.finfo(float).max
if b == np.inf:
b = 0.5*np.finfo(float).max
self.a, self.b = a, b
self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b
def _positive_values(self, a, b, n):
if a < 0:
raise ValueError("a should be positive")
# Try to put half of the points into a linspace between a and
# 10 the other half in a logspace.
if n % 2 == 0:
nlogpts = n//2
nlinpts = nlogpts
else:
nlogpts = n//2
nlinpts = nlogpts + 1
if a >= 10:
# Outside of linspace range; just return a logspace.
pts = np.logspace(np.log10(a), np.log10(b), n)
elif a > 0 and b < 10:
# Outside of logspace range; just return a linspace
pts = np.linspace(a, b, n)
elif a > 0:
# Linspace between a and 10 and a logspace between 10 and
# b.
linpts = np.linspace(a, 10, nlinpts, endpoint=False)
logpts = np.logspace(1, np.log10(b), nlogpts)
pts = np.hstack((linpts, logpts))
elif a == 0 and b <= 10:
# Linspace between 0 and b and a logspace between 0 and
# the smallest positive point of the linspace
linpts = np.linspace(0, b, nlinpts)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts = np.logspace(-30, right, nlogpts, endpoint=False)
pts = np.hstack((logpts, linpts))
else:
# Linspace between 0 and 10, logspace between 0 and the
# smallest positive point of the linspace, and a logspace
# between 10 and b.
if nlogpts % 2 == 0:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1
else:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1 + 1
linpts = np.linspace(0, 10, nlinpts, endpoint=False)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False)
logpts2 = np.logspace(1, np.log10(b), nlogpts2)
pts = np.hstack((logpts1, linpts, logpts2))
return np.sort(pts)
def values(self, n):
"""Return an array containing n numbers."""
a, b = self.a, self.b
if a == b:
return np.zeros(n)
if not self.inclusive_a:
n += 1
if not self.inclusive_b:
n += 1
if n % 2 == 0:
n1 = n//2
n2 = n1
else:
n1 = n//2
n2 = n1 + 1
if a >= 0:
pospts = self._positive_values(a, b, n)
negpts = []
elif b <= 0:
pospts = []
negpts = -self._positive_values(-b, -a, n)
else:
pospts = self._positive_values(0, b, n1)
negpts = -self._positive_values(0, -a, n2 + 1)
# Don't want to get zero twice
negpts = negpts[1:]
pts = np.hstack((negpts[::-1], pospts))
if not self.inclusive_a:
pts = pts[1:]
if not self.inclusive_b:
pts = pts[:-1]
return pts
class FixedArg(object):
def __init__(self, values):
self._values = np.asarray(values)
def values(self, n):
return self._values
class ComplexArg(object):
def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)):
self.real = Arg(a.real, b.real)
self.imag = Arg(a.imag, b.imag)
def values(self, n):
m = int(np.floor(np.sqrt(n)))
x = self.real.values(m)
y = self.imag.values(m + 1)
return (x[:,None] + 1j*y[None,:]).ravel()
class IntArg(object):
def __init__(self, a=-1000, b=1000):
self.a = a
self.b = b
def values(self, n):
v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int)
v2 = np.arange(-5, 5)
v = np.unique(np.r_[v1, v2])
v = v[(v >= self.a) & (v < self.b)]
return v
def get_args(argspec, n):
if isinstance(argspec, np.ndarray):
args = argspec.copy()
else:
nargs = len(argspec)
ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec])
ms = (n**(ms/sum(ms))).astype(int) + 1
args = []
for spec, m in zip(argspec, ms):
args.append(spec.values(m))
args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T
return args
class MpmathData(object):
def __init__(self, scipy_func, mpmath_func, arg_spec, name=None,
dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300,
ignore_inf_sign=False, distinguish_nan_and_inf=True,
nan_ok=True, param_filter=None):
# mpmath tests are really slow (see gh-6989). Use a small number of
# points by default, increase back to 5000 (old default) if XSLOW is
# set
if n is None:
try:
is_xslow = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
is_xslow = False
n = 5000 if is_xslow else 500
self.scipy_func = scipy_func
self.mpmath_func = mpmath_func
self.arg_spec = arg_spec
self.dps = dps
self.prec = prec
self.n = n
self.rtol = rtol
self.atol = atol
self.ignore_inf_sign = ignore_inf_sign
self.nan_ok = nan_ok
if isinstance(self.arg_spec, np.ndarray):
self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating)
else:
self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec])
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not name or name == '<lambda>':
name = getattr(scipy_func, '__name__', None)
if not name or name == '<lambda>':
name = getattr(mpmath_func, '__name__', None)
self.name = name
self.param_filter = param_filter
def check(self):
np.random.seed(1234)
# Generate values for the arguments
argarr = get_args(self.arg_spec, self.n)
# Check
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
if self.dps is not None:
dps_list = [self.dps]
else:
dps_list = [20]
if self.prec is not None:
mpmath.mp.prec = self.prec
# Proper casting of mpmath input and output types. Using
# native mpmath types as inputs gives improved precision
# in some cases.
if np.issubdtype(argarr.dtype, np.complexfloating):
pytype = mpc2complex
def mptype(x):
return mpmath.mpc(complex(x))
else:
def mptype(x):
return mpmath.mpf(float(x))
def pytype(x):
if abs(x.imag) > 1e-16*(1 + abs(x.real)):
return np.nan
else:
return mpf2float(x.real)
# Try out different dps until one (or none) works
for j, dps in enumerate(dps_list):
mpmath.mp.dps = dps
try:
assert_func_equal(self.scipy_func,
lambda *a: pytype(self.mpmath_func(*map(mptype, a))),
argarr,
vectorized=False,
rtol=self.rtol, atol=self.atol,
ignore_inf_sign=self.ignore_inf_sign,
distinguish_nan_and_inf=self.distinguish_nan_and_inf,
nan_ok=self.nan_ok,
param_filter=self.param_filter)
break
except AssertionError:
if j >= len(dps_list)-1:
reraise(*sys.exc_info())
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
def __repr__(self):
if self.is_complex:
return "<MpmathData: %s (complex)>" % (self.name,)
else:
return "<MpmathData: %s>" % (self.name,)
def assert_mpmath_equal(*a, **kw):
d = MpmathData(*a, **kw)
d.check()
def nonfunctional_tooslow(func):
return pytest.mark.skip(reason=" Test not yet functional (too slow), needs more work.")(func)
# ------------------------------------------------------------------------------
# Tools for dealing with mpmath quirks
# ------------------------------------------------------------------------------
def mpf2float(x):
"""
Convert an mpf to the nearest floating point number. Just using
float directly doesn't work because of results like this:
with mp.workdps(50):
float(mpf("0.99999999999999999")) = 0.9999999999999999
"""
return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0))
def mpc2complex(x):
return complex(mpf2float(x.real), mpf2float(x.imag))
def trace_args(func):
def tofloat(x):
if isinstance(x, mpmath.mpc):
return complex(x)
else:
return float(x)
def wrap(*a, **kw):
sys.stderr.write("%r: " % (tuple(map(tofloat, a)),))
sys.stderr.flush()
try:
r = func(*a, **kw)
sys.stderr.write("-> %r" % r)
finally:
sys.stderr.write("\n")
sys.stderr.flush()
return r
return wrap
try:
import posix
import signal
POSIX = ('setitimer' in dir(signal))
except ImportError:
POSIX = False
class TimeoutError(Exception):
pass
def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
"""
Decorator for setting a timeout for pure-Python functions.
If the function does not return within `timeout` seconds, the
value `return_val` is returned instead.
On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
used. Do not use this with threads: the SIGALRM implementation
does probably not work well. The settrace implementation only
traces the current thread.
The settrace implementation slows down execution speed. Slowdown
by a factor around 10 is probably typical.
"""
if POSIX and use_sigalrm:
def sigalrm_handler(signum, frame):
raise TimeoutError()
def deco(func):
def wrap(*a, **kw):
old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return func(*a, **kw)
except TimeoutError:
return return_val
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return wrap
else:
def deco(func):
def wrap(*a, **kw):
start_time = time.time()
def trace(frame, event, arg):
if time.time() - start_time > timeout:
raise TimeoutError()
return trace
sys.settrace(trace)
try:
return func(*a, **kw)
except TimeoutError:
sys.settrace(None)
return return_val
finally:
sys.settrace(None)
return wrap
return deco
def exception_to_nan(func):
"""Decorate function to return nan if it raises an exception"""
def wrap(*a, **kw):
try:
return func(*a, **kw)
except Exception:
return np.nan
return wrap
def inf_to_nan(func):
"""Decorate function to return nan if it returns inf"""
def wrap(*a, **kw):
v = func(*a, **kw)
if not np.isfinite(v):
return np.nan
return v
return wrap
def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
"""
Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
can be done to higher precision than double.
"""
try:
len(res)
except TypeError:
res = list(res)
n = len(std)
if len(res) != n:
raise AssertionError("Lengths of inputs not equal.")
failures = []
for k in range(n):
try:
assert_(mpmath.fabs(res[k] - std[k]) <= atol + rtol*mpmath.fabs(std[k]))
except AssertionError:
failures.append(k)
ndigits = int(abs(np.log10(rtol)))
msg = [""]
msg.append("Bad results ({} out of {}) for the following points:"
.format(len(failures), n))
for k in failures:
resrep = mpmath.nstr(res[k], ndigits, min_fixed=0, max_fixed=0)
stdrep = mpmath.nstr(std[k], ndigits, min_fixed=0, max_fixed=0)
if std[k] == 0:
rdiff = "inf"
else:
rdiff = mpmath.fabs((res[k] - std[k])/std[k])
rdiff = mpmath.nstr(rdiff, 3)
msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff))
if failures:
assert_(False, "\n".join(msg))
@@ -1,61 +0,0 @@
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/8.20#ii
"""
from __future__ import division, print_function, absolute_import
import os
from scipy._lib._numpy_compat import suppress_warnings
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated")
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py.
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write("#define nA {}\n".format(len(A)))
for k, Ak in enumerate(A):
tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write("static const double A{}[] = {{{}}};\n".format(k, tmp))
tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
f.write("static const double *A[] = {{{}}};\n".format(tmp))
tmp = ", ".join([str(Ak.degree()) for Ak in A])
f.write("static const int Adegs[] = {{{}}};\n".format(tmp))
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
@@ -1,119 +0,0 @@
"""
Precompute coefficients of Temme's asymptotic expansion for gammainc.
This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram.
Sources:
[1] NIST, "Digital Library of Mathematical Functions",
https://dlmf.nist.gov/
"""
from __future__ import division, print_function, absolute_import
import os
from scipy.special._precompute.utils import lagrange_inversion
try:
import mpmath as mp
except ImportError:
pass
def compute_a(n):
"""a_k from DLMF 5.11.6"""
a = [mp.sqrt(2)/2]
for k in range(1, n):
ak = a[-1]/k
for j in range(1, len(a)):
ak -= a[j]*a[-j]/(j + 1)
ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
a.append(ak)
return a
def compute_g(n):
"""g_k from DLMF 5.11.3/5.11.5"""
a = compute_a(2*n)
g = []
for k in range(n):
g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k])
return g
def eta(lam):
"""Function from DLMF 8.12.1 shifted to be centered at 0."""
if lam > 0:
return mp.sqrt(2*(lam - mp.log(lam + 1)))
elif lam < 0:
return -mp.sqrt(2*(lam - mp.log(lam + 1)))
else:
return 0
def compute_alpha(n):
"""alpha_n from DLMF 8.12.13"""
coeffs = mp.taylor(eta, 0, n - 1)
return lagrange_inversion(coeffs)
def compute_d(K, N):
"""d_{k, n} from DLMF 8.12.12"""
M = N + 2*K
d0 = [-mp.mpf(1)/3]
alpha = compute_alpha(M + 2)
for n in range(1, M):
d0.append((n + 2)*alpha[n+2])
d = [d0]
g = compute_g(K)
for k in range(1, K):
dk = []
for n in range(M - 2*k):
dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
d.append(dk)
for k in range(K):
d[k] = d[k][:N]
return d
header = \
r"""/* This file was automatically generated by _precomp/gammainc.py.
* Do not edit it manually!
*/
#ifndef IGAM_H
#define IGAM_H
#define K {}
#define N {}
static const double d[K][N] =
{{"""
footer = \
r"""
#endif
"""
def main():
print(__doc__)
K = 25
N = 25
with mp.workdps(50):
d = compute_d(K, N)
fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
with open(fn + '.new', 'w') as f:
f.write(header.format(K, N))
for k, row in enumerate(d):
row = map(lambda x: mp.nstr(x, 17, min_fixed=0, max_fixed=0), row)
f.write('{')
f.write(", ".join(row))
if k < K - 1:
f.write('},\n')
else:
f.write('}};\n')
f.write(footer)
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
@@ -1,126 +0,0 @@
"""Compute gammainc and gammaincc for large arguments and parameters
and save the values to data files for use in tests. We can't just
compare to mpmath's gammainc in test_mpmath.TestSystematic because it
would take too long.
Note that mpmath's gammainc is computed using hypercomb, but since it
doesn't allow the user to increase the maximum number of terms used in
the series it doesn't converge for many arguments. To get around this
we copy the mpmath implementation but use more terms.
This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB
ram.
Sources:
[1] Fredrik Johansson and others. mpmath: a Python library for
arbitrary-precision floating-point arithmetic (version 0.19),
December 2013. http://mpmath.org/.
"""
from __future__ import division, print_function, absolute_import
import os
from time import time
import numpy as np
from numpy import pi
from scipy.special._mptestutils import mpf2float
try:
import mpmath as mp
except ImportError:
pass
def gammainc(a, x, dps=50, maxterms=10**8):
"""Compute gammainc exactly like mpmath does but allow for more
summands in hypercomb. See
mpmath/functions/expintegrals.py#L134
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x)
G = [z]
negb = mp.fneg(b, exact=True)
def h(z):
T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
return (T1,)
res = mp.hypercomb(h, [z], maxterms=maxterms)
return mpf2float(res)
def gammaincc(a, x, dps=50, maxterms=10**8):
"""Compute gammaincc exactly like mpmath does but allow for more
terms in hypercomb. See
mpmath/functions/expintegrals.py#L187
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a = a, x
if mp.isint(z):
try:
# mpmath has a fast integer path
return mpf2float(mp.gammainc(z, a=a, regularized=True))
except mp.libmp.NoConvergence:
pass
nega = mp.fneg(a, exact=True)
G = [z]
# Use 2F0 series when possible; fall back to lower gamma representation
try:
def h(z):
r = z-1
return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
return mpf2float(mp.hypercomb(h, [z], force_series=True))
except mp.libmp.NoConvergence:
def h(z):
T1 = [], [1, z-1], [z], G, [], [], 0
T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
return T1, T2
return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms))
def main():
t0 = time()
# It would be nice to have data for larger values, but either this
# requires prohibitively large precision (dps > 800) or mpmath has
# a bug. For example, gammainc(1e20, 1e20, dps=800) returns a
# value around 0.03, while the true value should be close to 0.5
# (DLMF 8.12.15).
print(__doc__)
pwd = os.path.dirname(__file__)
r = np.logspace(4, 14, 30)
ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30)
utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30)
regimes = [(gammainc, ltheta), (gammaincc, utheta)]
for func, theta in regimes:
rg, thetag = np.meshgrid(r, theta)
a, x = rg*np.cos(thetag), rg*np.sin(thetag)
a, x = a.flatten(), x.flatten()
dataset = []
for i, (a0, x0) in enumerate(zip(a, x)):
if func == gammaincc:
# Exploit the fast integer path in gammaincc whenever
# possible so that the computation doesn't take too
# long
a0, x0 = np.floor(a0), np.floor(x0)
dataset.append((a0, x0, func(a0, x0)))
dataset = np.array(dataset)
filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
'{}.txt'.format(func.__name__))
np.savetxt(filename, dataset)
print("{} minutes elapsed".format((time() - t0)/60))
if __name__ == "__main__":
main()
@@ -1,72 +0,0 @@
"""Compute a Pade approximation for the principle branch of the
Lambert W function around 0 and compare it to various other
approximations.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
try:
import mpmath
import matplotlib.pyplot as plt
except ImportError:
pass
def lambertw_pade():
derivs = []
for n in range(6):
derivs.append(mpmath.diff(mpmath.lambertw, 0, n=n))
p, q = mpmath.pade(derivs, 3, 2)
return p, q
def main():
print(__doc__)
with mpmath.workdps(50):
p, q = lambertw_pade()
p, q = p[::-1], q[::-1]
print("p = {}".format(p))
print("q = {}".format(q))
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
x, y = np.meshgrid(x, y)
z = x + 1j*y
lambertw_std = []
for z0 in z.flatten():
lambertw_std.append(complex(mpmath.lambertw(z0)))
lambertw_std = np.array(lambertw_std).reshape(x.shape)
fig, axes = plt.subplots(nrows=3, ncols=1)
# Compare Pade approximation to true result
p = np.array([float(p0) for p0 in p])
q = np.array([float(q0) for q0 in q])
pade_approx = np.polyval(p, z)/np.polyval(q, z)
pade_err = abs(pade_approx - lambertw_std)
axes[0].pcolormesh(x, y, pade_err)
# Compare two terms of asymptotic series to true result
asy_approx = np.log(z) - np.log(np.log(z))
asy_err = abs(asy_approx - lambertw_std)
axes[1].pcolormesh(x, y, asy_err)
# Compare two terms of the series around the branch point to the
# true result
p = np.sqrt(2*(np.exp(1)*z + 1))
series_approx = -1 + p - p**2/3
series_err = abs(series_approx - lambertw_std)
im = axes[2].pcolormesh(x, y, series_err)
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1)
pade_better = pade_err < asy_err
im = ax.pcolormesh(x, y, pade_better)
t = np.linspace(-0.3, 0.3)
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
fig.colorbar(im, ax=ax)
plt.show()
if __name__ == '__main__':
main()
@@ -1,46 +0,0 @@
"""Precompute series coefficients for log-Gamma."""
from __future__ import division, print_function, absolute_import
try:
import mpmath
except ImportError:
pass
def stirling_series(N):
coeffs = []
with mpmath.workdps(100):
for n in range(1, N + 1):
coeffs.append(mpmath.bernoulli(2*n)/(2*n*(2*n - 1)))
return coeffs
def taylor_series_at_1(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append(-mpmath.euler)
for n in range(2, N + 1):
coeffs.append((-1)**n*mpmath.zeta(n)/n)
return coeffs
def main():
print(__doc__)
print()
stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in stirling_series(8)[::-1]]
taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in taylor_series_at_1(23)[::-1]]
print("Stirling series coefficients")
print("----------------------------")
print("\n".join(stirling_coeffs))
print()
print("Taylor series coefficients")
print("--------------------------")
print("\n".join(taylor_coeffs))
print()
if __name__ == '__main__':
main()
@@ -1,13 +0,0 @@
from __future__ import division, print_function, absolute_import
def configuration(parent_name='special', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_precompute', parent_name, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration().todict())
@@ -1,122 +0,0 @@
"""
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
@@ -1,46 +0,0 @@
from __future__ import division, print_function, absolute_import
from scipy._lib._numpy_compat import suppress_warnings
try:
import mpmath as mp
except ImportError:
pass
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated")
from sympy.abc import x
except ImportError:
pass
def lagrange_inversion(a):
"""Given a series
f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
use the Lagrange inversion formula to compute a series
g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
necessarily b[0] = 0 too.
The algorithm is naive and could be improved, but speed isn't an
issue here and it's easy to read.
"""
n = len(a)
f = sum(a[i]*x**i for i in range(len(a)))
h = (x/f).series(x, 0, n).removeO()
hpower = [h**0]
for k in range(n):
hpower.append((hpower[-1]*h).expand())
b = [mp.mpf(0)]
for k in range(1, n):
b.append(hpower[k].coeff(x, k - 1)/k)
b = map(lambda x: mp.mpf(x), b)
return b
@@ -1,29 +0,0 @@
"""Compute the Taylor series for zeta(x) - 1 around x = 0."""
from __future__ import division, print_function, absolute_import
try:
import mpmath
except ImportError:
pass
def zetac_series(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append(-1.5)
for n in range(1, N):
coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n)
coeffs.append(coeff)
return coeffs
def main():
print(__doc__)
coeffs = zetac_series(10)
coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in coeffs]
print("\n".join(coeffs[::-1]))
if __name__ == '__main__':
main()
@@ -1,205 +0,0 @@
from __future__ import division, print_function, absolute_import
from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in,
_spherical_kn, _spherical_jn_d, _spherical_yn_d,
_spherical_in_d, _spherical_kn_d)
def spherical_jn(n, z, derivative=False):
r"""Spherical Bessel function of the first kind or its derivative.
Defined as [1]_,
.. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z),
where :math:`J_n` is the Bessel function of the first kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
jn : ndarray
Notes
-----
For real arguments greater than the order, the function is computed
using the ascending recurrence [2]_. For small real or complex
arguments, the definitional relation to the cylindrical Bessel function
of the first kind is used.
The derivative is computed using the relations [3]_,
.. math::
j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z).
j_0'(z) = -j_1(z)
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E3
.. [2] https://dlmf.nist.gov/10.51.E1
.. [3] https://dlmf.nist.gov/10.51.E2
"""
if derivative:
return _spherical_jn_d(n, z)
else:
return _spherical_jn(n, z)
def spherical_yn(n, z, derivative=False):
r"""Spherical Bessel function of the second kind or its derivative.
Defined as [1]_,
.. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z),
where :math:`Y_n` is the Bessel function of the second kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
yn : ndarray
Notes
-----
For real arguments, the function is computed using the ascending
recurrence [2]_. For complex arguments, the definitional relation to
the cylindrical Bessel function of the second kind is used.
The derivative is computed using the relations [3]_,
.. math::
y_n' = y_{n-1} - \frac{n + 1}{z} y_n.
y_0' = -y_1
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E4
.. [2] https://dlmf.nist.gov/10.51.E1
.. [3] https://dlmf.nist.gov/10.51.E2
"""
if derivative:
return _spherical_yn_d(n, z)
else:
return _spherical_yn(n, z)
def spherical_in(n, z, derivative=False):
r"""Modified spherical Bessel function of the first kind or its derivative.
Defined as [1]_,
.. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z),
where :math:`I_n` is the modified Bessel function of the first kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
in : ndarray
Notes
-----
The function is computed using its definitional relation to the
modified cylindrical Bessel function of the first kind.
The derivative is computed using the relations [2]_,
.. math::
i_n' = i_{n-1} - \frac{n + 1}{z} i_n.
i_1' = i_0
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E7
.. [2] https://dlmf.nist.gov/10.51.E5
"""
if derivative:
return _spherical_in_d(n, z)
else:
return _spherical_in(n, z)
def spherical_kn(n, z, derivative=False):
r"""Modified spherical Bessel function of the second kind or its derivative.
Defined as [1]_,
.. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z),
where :math:`K_n` is the modified Bessel function of the second kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
kn : ndarray
Notes
-----
The function is computed using its definitional relation to the
modified cylindrical Bessel function of the second kind.
The derivative is computed using the relations [2]_,
.. math::
k_n' = -k_{n-1} - \frac{n + 1}{z} k_n.
k_0' = -k_1
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E9
.. [2] https://dlmf.nist.gov/10.51.E5
"""
if derivative:
return _spherical_kn_d(n, z)
else:
return _spherical_kn(n, z)
@@ -1,319 +0,0 @@
from __future__ import division, print_function, absolute_import
import os
from distutils.version import LooseVersion
import functools
import numpy as np
from numpy.testing import assert_
import pytest
import scipy.special as sc
__all__ = ['with_special_errors', 'assert_func_equal', 'FuncData']
#------------------------------------------------------------------------------
# Check if a module is present to be used in tests
#------------------------------------------------------------------------------
class MissingModule(object):
def __init__(self, name):
self.name = name
def check_version(module, min_ver):
if type(module) == MissingModule:
return pytest.mark.skip(reason="{} is not installed".format(module.name))
return pytest.mark.skipif(LooseVersion(module.__version__) < LooseVersion(min_ver),
reason="{} version >= {} required".format(module.__name__, min_ver))
#------------------------------------------------------------------------------
# Enable convergence and loss of precision warnings -- turn off one by one
#------------------------------------------------------------------------------
def with_special_errors(func):
"""
Enable special function errors (such as underflow, overflow,
loss of precision, etc.)
"""
@functools.wraps(func)
def wrapper(*a, **kw):
with sc.errstate(all='raise'):
res = func(*a, **kw)
return res
return wrapper
#------------------------------------------------------------------------------
# Comparing function values at many data points at once, with helpful
# error reports
#------------------------------------------------------------------------------
def assert_func_equal(func, results, points, rtol=None, atol=None,
param_filter=None, knownfailure=None,
vectorized=True, dtype=None, nan_ok=False,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
if hasattr(points, 'next'):
# it's a generator
points = list(points)
points = np.asarray(points)
if points.ndim == 1:
points = points[:,None]
nparams = points.shape[1]
if hasattr(results, '__name__'):
# function
data = points
result_columns = None
result_func = results
else:
# dataset
data = np.c_[points, results]
result_columns = list(range(nparams, data.shape[1]))
result_func = None
fdata = FuncData(func, data, list(range(nparams)),
result_columns=result_columns, result_func=result_func,
rtol=rtol, atol=atol, param_filter=param_filter,
knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized,
ignore_inf_sign=ignore_inf_sign,
distinguish_nan_and_inf=distinguish_nan_and_inf)
fdata.check()
class FuncData(object):
"""
Data set for checking a special function.
Parameters
----------
func : function
Function to test
data : numpy array
columnar data to use for testing
param_columns : int or tuple of ints
Columns indices in which the parameters to `func` lie.
Can be imaginary integers to indicate that the parameter
should be cast to complex.
result_columns : int or tuple of ints, optional
Column indices for expected results from `func`.
result_func : callable, optional
Function to call to obtain results.
rtol : float, optional
Required relative tolerance. Default is 5*eps.
atol : float, optional
Required absolute tolerance. Default is 5*tiny.
param_filter : function, or tuple of functions/Nones, optional
Filter functions to exclude some parameter ranges.
If omitted, no filtering is done.
knownfailure : str, optional
Known failure error message to raise when the test is run.
If omitted, no exception is raised.
nan_ok : bool, optional
If nan is always an accepted result.
vectorized : bool, optional
Whether all functions passed in are vectorized.
ignore_inf_sign : bool, optional
Whether to ignore signs of infinities.
(Doesn't matter for complex-valued functions.)
distinguish_nan_and_inf : bool, optional
If True, treat numbers which contain nans or infs as as
equal. Sets ignore_inf_sign to be True.
"""
def __init__(self, func, data, param_columns, result_columns=None,
result_func=None, rtol=None, atol=None, param_filter=None,
knownfailure=None, dataname=None, nan_ok=False, vectorized=True,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
self.func = func
self.data = data
self.dataname = dataname
if not hasattr(param_columns, '__len__'):
param_columns = (param_columns,)
self.param_columns = tuple(param_columns)
if result_columns is not None:
if not hasattr(result_columns, '__len__'):
result_columns = (result_columns,)
self.result_columns = tuple(result_columns)
if result_func is not None:
raise ValueError("Only result_func or result_columns should be provided")
elif result_func is not None:
self.result_columns = None
else:
raise ValueError("Either result_func or result_columns should be provided")
self.result_func = result_func
self.rtol = rtol
self.atol = atol
if not hasattr(param_filter, '__len__'):
param_filter = (param_filter,)
self.param_filter = param_filter
self.knownfailure = knownfailure
self.nan_ok = nan_ok
self.vectorized = vectorized
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not self.distinguish_nan_and_inf:
self.ignore_inf_sign = True
def get_tolerances(self, dtype):
if not np.issubdtype(dtype, np.inexact):
dtype = np.dtype(float)
info = np.finfo(dtype)
rtol, atol = self.rtol, self.atol
if rtol is None:
rtol = 5*info.eps
if atol is None:
atol = 5*info.tiny
return rtol, atol
def check(self, data=None, dtype=None, dtypes=None):
"""Check the special function against the data."""
if self.knownfailure:
pytest.xfail(reason=self.knownfailure)
if data is None:
data = self.data
if dtype is None:
dtype = data.dtype
else:
data = data.astype(dtype)
rtol, atol = self.get_tolerances(dtype)
# Apply given filter functions
if self.param_filter:
param_mask = np.ones((data.shape[0],), np.bool_)
for j, filter in zip(self.param_columns, self.param_filter):
if filter:
param_mask &= list(filter(data[:,j]))
data = data[param_mask]
# Pick parameters from the correct columns
params = []
for idx, j in enumerate(self.param_columns):
if np.iscomplexobj(j):
j = int(j.imag)
params.append(data[:,j].astype(complex))
elif dtypes and idx < len(dtypes):
params.append(data[:, j].astype(dtypes[idx]))
else:
params.append(data[:,j])
# Helper for evaluating results
def eval_func_at_params(func, skip_mask=None):
if self.vectorized:
got = func(*params)
else:
got = []
for j in range(len(params[0])):
if skip_mask is not None and skip_mask[j]:
got.append(np.nan)
continue
got.append(func(*tuple([params[i][j] for i in range(len(params))])))
got = np.asarray(got)
if not isinstance(got, tuple):
got = (got,)
return got
# Evaluate function to be tested
got = eval_func_at_params(self.func)
# Grab the correct results
if self.result_columns is not None:
# Correct results passed in with the data
wanted = tuple([data[:,icol] for icol in self.result_columns])
else:
# Function producing correct results passed in
skip_mask = None
if self.nan_ok and len(got) == 1:
# Don't spend time evaluating what doesn't need to be evaluated
skip_mask = np.isnan(got[0])
wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask)
# Check the validity of each output returned
assert_(len(got) == len(wanted))
for output_num, (x, y) in enumerate(zip(got, wanted)):
if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign:
pinf_x = np.isinf(x)
pinf_y = np.isinf(y)
minf_x = np.isinf(x)
minf_y = np.isinf(y)
else:
pinf_x = np.isposinf(x)
pinf_y = np.isposinf(y)
minf_x = np.isneginf(x)
minf_y = np.isneginf(y)
nan_x = np.isnan(x)
nan_y = np.isnan(y)
olderr = np.seterr(all='ignore')
try:
abs_y = np.absolute(y)
abs_y[~np.isfinite(abs_y)] = 0
diff = np.absolute(x - y)
diff[~np.isfinite(diff)] = 0
rdiff = diff / np.absolute(y)
rdiff[~np.isfinite(rdiff)] = 0
finally:
np.seterr(**olderr)
tol_mask = (diff <= atol + rtol*abs_y)
pinf_mask = (pinf_x == pinf_y)
minf_mask = (minf_x == minf_y)
nan_mask = (nan_x == nan_y)
bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)
point_count = bad_j.size
if self.nan_ok:
bad_j &= ~nan_x
bad_j &= ~nan_y
point_count -= (nan_x | nan_y).sum()
if not self.distinguish_nan_and_inf and not self.nan_ok:
# If nan's are okay we've already covered all these cases
inf_x = np.isinf(x)
inf_y = np.isinf(y)
both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y)
bad_j &= ~both_nonfinite
point_count -= both_nonfinite.sum()
if np.any(bad_j):
# Some bad results: inform what, where, and how bad
msg = [""]
msg.append("Max |adiff|: %g" % diff.max())
msg.append("Max |rdiff|: %g" % rdiff.max())
msg.append("Bad results (%d out of %d) for the following points (in output %d):"
% (np.sum(bad_j), point_count, output_num,))
for j in np.nonzero(bad_j)[0]:
j = int(j)
fmt = lambda x: "%30s" % np.array2string(x[j], precision=18)
a = " ".join(map(fmt, params))
b = " ".join(map(fmt, got))
c = " ".join(map(fmt, wanted))
d = fmt(rdiff)
msg.append("%s => %s != %s (rdiff %s)" % (a, b, c, d))
assert_(False, "\n".join(msg))
def __repr__(self):
"""Pretty-printing, esp. for Nose output"""
if np.any(list(map(np.iscomplexobj, self.param_columns))):
is_complex = " (complex)"
else:
is_complex = ""
if self.dataname:
return "<Data for %s%s: %s>" % (self.func.__name__, is_complex,
os.path.basename(self.dataname))
else:
return "<Data for %s%s>" % (self.func.__name__, is_complex)
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -1,236 +0,0 @@
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
ctypedef fused Dd_number_t:
double complex
double
ctypedef fused dfg_number_t:
double
float
long double
ctypedef fused dl_number_t:
double
long
cpdef double agm(double x0, double x1) nogil
cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil
cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil
cpdef double bdtr(dl_number_t x0, dl_number_t x1, double x2) nogil
cpdef double bdtrc(dl_number_t x0, dl_number_t x1, double x2) nogil
cpdef double bdtri(dl_number_t x0, dl_number_t x1, double x2) nogil
cpdef double bdtrik(double x0, double x1, double x2) nogil
cpdef double bdtrin(double x0, double x1, double x2) nogil
cpdef double bei(double x0) nogil
cpdef double beip(double x0) nogil
cpdef double ber(double x0) nogil
cpdef double berp(double x0) nogil
cpdef double besselpoly(double x0, double x1, double x2) nogil
cpdef double beta(double x0, double x1) nogil
cpdef double betainc(double x0, double x1, double x2) nogil
cpdef double betaincinv(double x0, double x1, double x2) nogil
cpdef double betaln(double x0, double x1) nogil
cpdef double binom(double x0, double x1) nogil
cpdef double boxcox(double x0, double x1) nogil
cpdef double boxcox1p(double x0, double x1) nogil
cpdef double btdtr(double x0, double x1, double x2) nogil
cpdef double btdtri(double x0, double x1, double x2) nogil
cpdef double btdtria(double x0, double x1, double x2) nogil
cpdef double btdtrib(double x0, double x1, double x2) nogil
cpdef double cbrt(double x0) nogil
cpdef double chdtr(double x0, double x1) nogil
cpdef double chdtrc(double x0, double x1) nogil
cpdef double chdtri(double x0, double x1) nogil
cpdef double chdtriv(double x0, double x1) nogil
cpdef double chndtr(double x0, double x1, double x2) nogil
cpdef double chndtridf(double x0, double x1, double x2) nogil
cpdef double chndtrinc(double x0, double x1, double x2) nogil
cpdef double chndtrix(double x0, double x1, double x2) nogil
cpdef double cosdg(double x0) nogil
cpdef double cosm1(double x0) nogil
cpdef double cotdg(double x0) nogil
cpdef Dd_number_t dawsn(Dd_number_t x0) nogil
cpdef double ellipe(double x0) nogil
cpdef double ellipeinc(double x0, double x1) nogil
cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) nogil
cpdef double ellipkinc(double x0, double x1) nogil
cpdef double ellipkm1(double x0) nogil
cpdef double entr(double x0) nogil
cpdef Dd_number_t erf(Dd_number_t x0) nogil
cpdef Dd_number_t erfc(Dd_number_t x0) nogil
cpdef Dd_number_t erfcx(Dd_number_t x0) nogil
cpdef Dd_number_t erfi(Dd_number_t x0) nogil
cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) nogil
cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) nogil
cpdef double eval_hermite(long x0, double x1) nogil
cpdef double eval_hermitenorm(long x0, double x1) nogil
cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil
cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil
cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t exp1(Dd_number_t x0) nogil
cpdef double exp10(double x0) nogil
cpdef double exp2(double x0) nogil
cpdef Dd_number_t expi(Dd_number_t x0) nogil
cpdef dfg_number_t expit(dfg_number_t x0) nogil
cpdef Dd_number_t expm1(Dd_number_t x0) nogil
cpdef double expn(dl_number_t x0, double x1) nogil
cpdef double exprel(double x0) nogil
cpdef double fdtr(double x0, double x1, double x2) nogil
cpdef double fdtrc(double x0, double x1, double x2) nogil
cpdef double fdtri(double x0, double x1, double x2) nogil
cpdef double fdtridfd(double x0, double x1, double x2) nogil
cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil
cpdef Dd_number_t gamma(Dd_number_t x0) nogil
cpdef double gammainc(double x0, double x1) nogil
cpdef double gammaincc(double x0, double x1) nogil
cpdef double gammainccinv(double x0, double x1) nogil
cpdef double gammaincinv(double x0, double x1) nogil
cpdef double gammaln(double x0) nogil
cpdef double gammasgn(double x0) nogil
cpdef double gdtr(double x0, double x1, double x2) nogil
cpdef double gdtrc(double x0, double x1, double x2) nogil
cpdef double gdtria(double x0, double x1, double x2) nogil
cpdef double gdtrib(double x0, double x1, double x2) nogil
cpdef double gdtrix(double x0, double x1, double x2) nogil
cpdef double complex hankel1(double x0, double complex x1) nogil
cpdef double complex hankel1e(double x0, double complex x1) nogil
cpdef double complex hankel2(double x0, double complex x1) nogil
cpdef double complex hankel2e(double x0, double complex x1) nogil
cpdef double huber(double x0, double x1) nogil
cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) nogil
cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) nogil
cdef void hyp1f2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cdef void hyp2f0(double x0, double x1, double x2, dl_number_t x3, double *y0, double *y1) nogil
cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) nogil
cdef void hyp3f0(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cpdef double hyperu(double x0, double x1, double x2) nogil
cpdef double i0(double x0) nogil
cpdef double i0e(double x0) nogil
cpdef double i1(double x0) nogil
cpdef double i1e(double x0) nogil
cpdef double inv_boxcox(double x0, double x1) nogil
cpdef double inv_boxcox1p(double x0, double x1) nogil
cdef void it2i0k0(double x0, double *y0, double *y1) nogil
cdef void it2j0y0(double x0, double *y0, double *y1) nogil
cpdef double it2struve0(double x0) nogil
cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) nogil
cdef void iti0k0(double x0, double *y0, double *y1) nogil
cdef void itj0y0(double x0, double *y0, double *y1) nogil
cpdef double itmodstruve0(double x0) nogil
cpdef double itstruve0(double x0) nogil
cpdef Dd_number_t iv(double x0, Dd_number_t x1) nogil
cpdef Dd_number_t ive(double x0, Dd_number_t x1) nogil
cpdef double j0(double x0) nogil
cpdef double j1(double x0) nogil
cpdef Dd_number_t jv(double x0, Dd_number_t x1) nogil
cpdef Dd_number_t jve(double x0, Dd_number_t x1) nogil
cpdef double k0(double x0) nogil
cpdef double k0e(double x0) nogil
cpdef double k1(double x0) nogil
cpdef double k1e(double x0) nogil
cpdef double kei(double x0) nogil
cpdef double keip(double x0) nogil
cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) nogil
cpdef double ker(double x0) nogil
cpdef double kerp(double x0) nogil
cpdef double kl_div(double x0, double x1) nogil
cpdef double kn(dl_number_t x0, double x1) nogil
cpdef double kolmogi(double x0) nogil
cpdef double kolmogorov(double x0) nogil
cpdef Dd_number_t kv(double x0, Dd_number_t x1) nogil
cpdef Dd_number_t kve(double x0, Dd_number_t x1) nogil
cpdef Dd_number_t log1p(Dd_number_t x0) nogil
cpdef Dd_number_t log_ndtr(Dd_number_t x0) nogil
cpdef Dd_number_t loggamma(Dd_number_t x0) nogil
cpdef dfg_number_t logit(dfg_number_t x0) nogil
cpdef double lpmv(double x0, double x1, double x2) nogil
cpdef double mathieu_a(double x0, double x1) nogil
cpdef double mathieu_b(double x0, double x1) nogil
cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) nogil
cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) nogil
cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) nogil
cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) nogil
cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) nogil
cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) nogil
cdef void modfresnelm(double x0, double complex *y0, double complex *y1) nogil
cdef void modfresnelp(double x0, double complex *y0, double complex *y1) nogil
cpdef double modstruve(double x0, double x1) nogil
cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) nogil
cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) nogil
cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) nogil
cpdef double nbdtrik(double x0, double x1, double x2) nogil
cpdef double nbdtrin(double x0, double x1, double x2) nogil
cpdef double ncfdtr(double x0, double x1, double x2, double x3) nogil
cpdef double ncfdtri(double x0, double x1, double x2, double x3) nogil
cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) nogil
cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) nogil
cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) nogil
cpdef double nctdtr(double x0, double x1, double x2) nogil
cpdef double nctdtridf(double x0, double x1, double x2) nogil
cpdef double nctdtrinc(double x0, double x1, double x2) nogil
cpdef double nctdtrit(double x0, double x1, double x2) nogil
cpdef Dd_number_t ndtr(Dd_number_t x0) nogil
cpdef double ndtri(double x0) nogil
cpdef double nrdtrimn(double x0, double x1, double x2) nogil
cpdef double nrdtrisd(double x0, double x1, double x2) nogil
cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
cpdef double obl_cv(double x0, double x1, double x2) nogil
cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
cpdef double owens_t(double x0, double x1) nogil
cdef void pbdv(double x0, double x1, double *y0, double *y1) nogil
cdef void pbvv(double x0, double x1, double *y0, double *y1) nogil
cdef void pbwa(double x0, double x1, double *y0, double *y1) nogil
cpdef double pdtr(dl_number_t x0, double x1) nogil
cpdef double pdtrc(dl_number_t x0, double x1) nogil
cpdef double pdtri(dl_number_t x0, double x1) nogil
cpdef double pdtrik(double x0, double x1) nogil
cpdef double poch(double x0, double x1) nogil
cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
cpdef double pro_cv(double x0, double x1, double x2) nogil
cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil
cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil
cpdef double pseudo_huber(double x0, double x1) nogil
cpdef Dd_number_t psi(Dd_number_t x0) nogil
cpdef double radian(double x0, double x1, double x2) nogil
cpdef double rel_entr(double x0, double x1) nogil
cpdef Dd_number_t rgamma(Dd_number_t x0) nogil
cpdef double round(double x0) nogil
cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil
cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil
cpdef double sindg(double x0) nogil
cpdef double smirnov(dl_number_t x0, double x1) nogil
cpdef double smirnovi(dl_number_t x0, double x1) nogil
cpdef Dd_number_t spence(Dd_number_t x0) nogil
cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) nogil
cpdef double stdtr(double x0, double x1) nogil
cpdef double stdtridf(double x0, double x1) nogil
cpdef double stdtrit(double x0, double x1) nogil
cpdef double struve(double x0, double x1) nogil
cpdef double tandg(double x0) nogil
cpdef double tklmbda(double x0, double x1) nogil
cpdef double complex wofz(double complex x0) nogil
cpdef double complex wrightomega(double complex x0) nogil
cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) nogil
cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) nogil
cpdef double y0(double x0) nogil
cpdef double y1(double x0) nogil
cpdef double yn(dl_number_t x0, double x1) nogil
cpdef Dd_number_t yv(double x0, Dd_number_t x1) nogil
cpdef Dd_number_t yve(double x0, Dd_number_t x1) nogil
cpdef double zetac(double x0) nogil
@@ -1,107 +0,0 @@
from __future__ import division, print_function, absolute_import
from ._ufuncs import _lambertw
def lambertw(z, k=0, tol=1e-8):
r"""
lambertw(z, k=0, tol=1e-8)
Lambert W function.
The Lambert W function `W(z)` is defined as the inverse function
of ``w * exp(w)``. In other words, the value of ``W(z)`` is
such that ``z = W(z) * exp(W(z))`` for any complex number
``z``.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation ``z = w exp(w)``. Here, the branches are indexed by the
integer `k`.
Parameters
----------
z : array_like
Input argument.
k : int, optional
Branch index.
tol : float, optional
Evaluation tolerance.
Returns
-------
w : array
`w` will have the same shape as `z`.
Notes
-----
All branches are supported by `lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real ``z > -1/e``, and the
``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
``k = 0`` have a logarithmic singularity at ``z = 0``.
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at ``-1/e``. In some corner cases, `lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert ``w * exp(w)``, using a first-order
asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
The definition, implementation and choice of branches is based on [2]_.
See Also
--------
wrightomega : the Wright Omega function
References
----------
.. [1] https://en.wikipedia.org/wiki/Lambert_W_function
.. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359.
http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf
Examples
--------
The Lambert W function is the inverse of ``w exp(w)``:
>>> from scipy.special import lambertw
>>> w = lambertw(1)
>>> w
(0.56714329040978384+0j)
>>> w * np.exp(w)
(1.0+0j)
Any branch gives a valid inverse:
>>> w = lambertw(1, k=3)
>>> w
(-2.8535817554090377+17.113535539412148j)
>>> w*np.exp(w)
(1.0000000000000002+1.609823385706477e-15j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower :math:`z^{z^{z^{\ldots}}}`:
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> -lambertw(-np.log(0.5)) / np.log(0.5)
(0.64118574450498589+0j)
"""
return _lambertw(z, k, tol)
File diff suppressed because it is too large Load Diff
@@ -1,160 +0,0 @@
from __future__ import division, print_function, absolute_import
import os
import sys
from os.path import join, dirname
from distutils.sysconfig import get_python_inc
import subprocess
import numpy
from numpy.distutils.misc_util import get_numpy_include_dirs
try:
from numpy.distutils.misc_util import get_info
except ImportError:
raise ValueError("numpy >= 1.4 is required (detected %s from %s)" %
(numpy.__version__, numpy.__file__))
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._build_utils.system_info import get_info as get_system_info
config = Configuration('special', parent_package, top_path)
define_macros = []
if sys.platform == 'win32':
# define_macros.append(('NOINFINITIES',None))
# define_macros.append(('NONANS',None))
define_macros.append(('_USE_MATH_DEFINES',None))
curdir = os.path.abspath(os.path.dirname(__file__))
inc_dirs = [get_python_inc(), os.path.join(curdir, "c_misc")]
if inc_dirs[0] != get_python_inc(plat_specific=1):
inc_dirs.append(get_python_inc(plat_specific=1))
inc_dirs.insert(0, get_numpy_include_dirs())
inc_dirs.append(join(dirname(dirname(__file__)), '_lib'))
# C libraries
c_misc_src = [join('c_misc','*.c')]
c_misc_hdr = [join('c_misc','*.h')]
cephes_src = [join('cephes','*.c')]
cephes_hdr = [join('cephes', '*.h')]
config.add_library('sc_c_misc',sources=c_misc_src,
include_dirs=[curdir] + inc_dirs,
depends=(cephes_hdr + cephes_src
+ c_misc_hdr + cephes_hdr
+ ['*.h']),
macros=define_macros)
config.add_library('sc_cephes',sources=cephes_src,
include_dirs=[curdir] + inc_dirs,
depends=(cephes_hdr + ['*.h']),
macros=define_macros)
# Fortran/C++ libraries
mach_src = [join('mach','*.f')]
amos_src = [join('amos','*.f')]
cdf_src = [join('cdflib','*.f')]
specfun_src = [join('specfun','*.f')]
config.add_library('sc_mach',sources=mach_src,
config_fc={'noopt':(__file__,1)})
config.add_library('sc_amos',sources=amos_src)
config.add_library('sc_cdf',sources=cdf_src)
config.add_library('sc_specfun',sources=specfun_src)
# Extension specfun
config.add_extension('specfun',
sources=['specfun.pyf'],
f2py_options=['--no-wrap-functions'],
depends=specfun_src,
define_macros=[],
libraries=['sc_specfun'])
# Extension _ufuncs
headers = ['*.h', join('c_misc', '*.h'), join('cephes', '*.h')]
ufuncs_src = ['_ufuncs.c', 'sf_error.c', '_logit.c.src',
"amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"]
ufuncs_dep = (headers + ufuncs_src + amos_src + c_misc_src + cephes_src
+ mach_src + cdf_src + specfun_src)
cfg = dict(get_system_info('lapack_opt'))
cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()])
cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach',
'sc_cdf', 'sc_specfun'])
cfg.setdefault('define_macros', []).extend(define_macros)
config.add_extension('_ufuncs',
depends=ufuncs_dep,
sources=ufuncs_src,
extra_info=get_info("npymath"),
**cfg)
# Extension _ufuncs_cxx
ufuncs_cxx_src = ['_ufuncs_cxx.cxx', 'sf_error.c',
'_faddeeva.cxx', 'Faddeeva.cc',
'_wright.cxx', 'wright.cc']
ufuncs_cxx_dep = (headers + ufuncs_cxx_src + cephes_src
+ ['*.hh'])
config.add_extension('_ufuncs_cxx',
sources=ufuncs_cxx_src,
depends=ufuncs_cxx_dep,
include_dirs=[curdir] + inc_dirs,
define_macros=define_macros,
extra_info=get_info("npymath"))
cfg = dict(get_system_info('lapack_opt'))
config.add_extension('_ellip_harm_2',
sources=['_ellip_harm_2.c', 'sf_error.c',],
**cfg
)
# Cython API
config.add_data_files('cython_special.pxd')
cython_special_src = ['cython_special.c', 'sf_error.c', '_logit.c.src',
"amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"]
cython_special_dep = (headers + ufuncs_src + ufuncs_cxx_src + amos_src
+ c_misc_src + cephes_src + mach_src + cdf_src
+ specfun_src)
cfg = dict(get_system_info('lapack_opt'))
cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()])
cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach',
'sc_cdf', 'sc_specfun'])
cfg.setdefault('define_macros', []).extend(define_macros)
config.add_extension('cython_special',
depends=cython_special_dep,
sources=cython_special_src,
extra_info=get_info("npymath"),
**cfg)
# combinatorics
config.add_extension('_comb',
sources=['_comb.c'])
# testing for _round.h
config.add_extension('_test_round',
sources=['_test_round.c'],
depends=['_round.h', 'cephes/dd_idefs.h'],
include_dirs=[numpy.get_include()] + inc_dirs,
extra_info=get_info('npymath'))
config.add_data_files('tests/*.py')
config.add_data_files('tests/data/README')
# regenerate npz data files
makenpz = os.path.join(os.path.dirname(__file__),
'utils', 'makenpz.py')
data_dir = os.path.join(os.path.dirname(__file__),
'tests', 'data')
for name in ['boost', 'gsl', 'local']:
subprocess.check_call([sys.executable, makenpz,
'--use-timestamp',
os.path.join(data_dir, name)])
config.add_data_files('tests/data/*.npz')
config.add_subpackage('_precompute')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
@@ -1,15 +0,0 @@
"""Warnings and Exceptions that can be raised by special functions."""
import warnings
class SpecialFunctionWarning(Warning):
"""Warning that can be emitted by special functions."""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
class SpecialFunctionError(Exception):
"""Exception that can be raised by special functions."""
pass
@@ -1,95 +0,0 @@
# Last Change: Sat Mar 21 02:00 PM 2009 J
# Copyright (c) 2001, 2002 Enthought, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Enthought nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Some more special functions which may be useful for multivariate statistical
analysis."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.special import gammaln as loggam
__all__ = ['multigammaln']
def multigammaln(a, d):
r"""Returns the log of multivariate gamma, also sometimes called the
generalized gamma.
Parameters
----------
a : ndarray
The multivariate gamma is computed for each item of `a`.
d : int
The dimension of the space of integration.
Returns
-------
res : ndarray
The values of the log multivariate gamma at the given points `a`.
Notes
-----
The formal definition of the multivariate gamma of dimension d for a real
`a` is
.. math::
\Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA
with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of
all the positive definite matrices of dimension `d`. Note that `a` is a
scalar: the integrand only is multivariate, the argument is not (the
function is defined over a subset of the real set).
This can be proven to be equal to the much friendlier equation
.. math::
\Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2).
References
----------
R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in
probability and mathematical statistics).
"""
a = np.asarray(a)
if not np.isscalar(d) or (np.floor(d) != d):
raise ValueError("d should be a positive integer (dimension)")
if np.any(a <= 0.5 * (d - 1)):
raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met"
% (a, 0.5 * (d-1)))
res = (d * (d-1) * 0.25) * np.log(np.pi)
res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0)
return res
@@ -1,578 +0,0 @@
This directory contains numerical data for testing special functions.
The data is in version control as text files.
The data is automatically packed into npz files by setup.py.
The npz files should not be checked in version control.
The data in gsl is computed using the GNU scientific library, the data
in local is computed using mpmath, and the data in boost is a copy of
data distributed with the boost library and comes with the following
license:
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
=========
Copyright holders of each file are listed here:
Jamfile.v2:# Copyright Daryle Walker, Hubert Holin, John Maddock 2006 - 2007
acosh_data.ipp:// Copyright John Maddock 2008.
acosh_test.hpp:// (C) Copyright Hubert Holin 2003.
almost_equal.ipp:// Copyright (c) 2006 Johan Rade
asinh_data.ipp:// Copyright John Maddock 2008.
asinh_test.hpp:// (C) Copyright Hubert Holin 2003.
assoc_legendre_p.ipp:// (C) Copyright John Maddock 2006-7.
atanh_data.ipp:// Copyright John Maddock 2008.
atanh_test.hpp:// (C) Copyright Hubert Holin 2003.
bessel_i_data.ipp:// Copyright (c) 2007 John Maddock
bessel_i_int_data.ipp:// Copyright (c) 2007 John Maddock
bessel_j_data.ipp:// Copyright (c) 2007 John Maddock
bessel_j_int_data.ipp:// Copyright (c) 2007 John Maddock
bessel_j_large_data.ipp:// Copyright (c) 2007 John Maddock
bessel_k_data.ipp:// Copyright (c) 2007 John Maddock
bessel_k_int_data.ipp:// Copyright (c) 2007 John Maddock
bessel_y01_data.ipp:// Copyright (c) 2007 John Maddock
bessel_yn_data.ipp:// Copyright (c) 2007 John Maddock
bessel_yv_data.ipp:// Copyright (c) 2007 John Maddock
beta_exp_data.ipp:// (C) Copyright John Maddock 2006.
beta_med_data.ipp:// (C) Copyright John Maddock 2006.
beta_small_data.ipp:// (C) Copyright John Maddock 2006.
binomial_data.ipp:// (C) Copyright John Maddock 2006-7.
binomial_large_data.ipp:// (C) Copyright John Maddock 2006-7.
binomial_quantile.ipp:// (C) Copyright John Maddock 2006-7.
cbrt_data.ipp:// (C) Copyright John Maddock 2006-7.
common_factor_test.cpp:// (C) Copyright Daryle Walker 2001, 2006.
compile_test/tools_rational_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_real_cast_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_remez_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_chi_squared_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_complement_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_sign_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_digamma_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_trunc_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/constants_incl_test.cpp:// Copyright John Maddock 2012.
compile_test/sf_sinc_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_binomial_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_binomial_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_test_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_normal_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_sinhc_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_ellint_rc_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_sin_pi_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_sph_harm_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_poisson_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/test_traits.cpp:// Copyright John Maddock 2007.
compile_test/dist_gamma_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_cos_pi_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_logistic_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/sf_fpclassify_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/compl_atanh_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_precision_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_hankel_incl_test.cpp:// Copyright John Maddock 2012.
compile_test/sf_cbrt_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_nc_beta_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/sf_legendre_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_stats_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_polynomial_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_config_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_exponential_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_students_t_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_inv_gamma_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/compl_acosh_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_beta_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_fisher_f_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_triangular_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/instantiate.hpp:// Copyright John Maddock 2006.
compile_test/instantiate.hpp:// Copyright Paul A. Bristow 2007, 2010.
compile_test/tools_solve_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_next_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/generate.sh:// Copyright John Maddock 2006.
compile_test/generate.sh:// Copyright John Maddock 2006.
compile_test/generate.sh:// Copyright John Maddock 2006.
compile_test/distribution_concept_check.cpp:// Copyright John Maddock 2006.
compile_test/sf_laguerre_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/tr1_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/sf_ellint_rj_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_nc_chi_squ_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/dist_skew_norm_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_modf_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_find_location_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/compl_acos_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_ellint_rd_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_roots_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_test_data_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/compl_abs_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_nc_t_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/sf_factorials_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_gamma_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/compl_atan_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_powm1_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_hypot_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_pareto_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_round_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_weibull_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/std_real_concept_check.cpp:// Copyright John Maddock 2006.
compile_test/dist_hypergeo_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/dist_inv_chi_sq_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_sqrt1pm1_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_log1p_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_jacobi_incl_test.cpp:// Copyright John Maddock 2012.
compile_test/dist_neg_binom_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_nc_f_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/dist_find_scale_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_bessel_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_minima_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/compl_asin_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_extreme_val_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_lanczos_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_uniform_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/test_compile_result.hpp:// Copyright John Maddock 2007.
compile_test/tools_series_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_ellint_3_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_ellint_rf_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_ellint_2_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_hermite_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/poison.hpp:// Copyright John Maddock 2013.
compile_test/sf_zeta_incl_test.cpp:// Copyright John Maddock 2007.
compile_test/dist_laplace_incl_test.cpp:// Copyright John Maddock 2008.
compile_test/sf_expint_incl_test.cpp:// Copyright John Maddock 2007.
compile_test/sf_expm1_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_bernoulli_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/compl_asinh_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_beta_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/tools_fraction_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_owens_t_incl_test.cpp:// Copyright John Maddock 2012.
compile_test/tools_toms748_inc_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_ellint_1_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_erf_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/main.cpp:// Copyright John Maddock 2009.
compile_test/sf_math_fwd_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/sf_airy_incl_test.cpp:// Copyright John Maddock 2012.
compile_test/dist_lognormal_incl_test.cpp:// Copyright John Maddock 2006.
compile_test/dist_cauchy_incl_test.cpp:// Copyright John Maddock 2006.
complex_test.cpp:// (C) Copyright John Maddock 2005.
digamma_data.ipp:// (C) Copyright John Maddock 2006-7.
digamma_neg_data.ipp:// (C) Copyright John Maddock 2006-7.
digamma_root_data.ipp:// (C) Copyright John Maddock 2006-7.
digamma_small_data.ipp:// (C) Copyright John Maddock 2006-7.
e_float_concept_check.cpp:// Copyright John Maddock 2011.
ellint_e2_data.ipp:// Copyright (c) 2006 John Maddock
ellint_e_data.ipp:// Copyright (c) 2006 John Maddock
ellint_f_data.ipp:// Copyright (c) 2006 John Maddock
ellint_k_data.ipp:// (C) Copyright John Maddock 2006-7.
ellint_pi2_data.ipp:// Copyright (c) 2006 John Maddock
ellint_pi3_data.ipp:// Copyright (c) 2006 John Maddock
ellint_pi3_large_data.ipp:// Copyright (c) 2006 John Maddock
ellint_rc_data.ipp:// Copyright (c) 2006 John Maddock
ellint_rd_data.ipp:// Copyright (c) 2006 John Maddock
ellint_rf_data.ipp:// Copyright (c) 2006 John Maddock
ellint_rj_data.ipp:// Copyright (c) 2006 John Maddock
erf_data.ipp:// (C) Copyright John Maddock 2006-7.
erf_inv_data.ipp:// (C) Copyright John Maddock 2006-7.
erf_large_data.ipp:// (C) Copyright John Maddock 2006-7.
erf_small_data.ipp:// (C) Copyright John Maddock 2006.
erfc_inv_big_data.ipp:// (C) Copyright John Maddock 2006-7.
erfc_inv_data.ipp:// (C) Copyright John Maddock 2006-7.
expint_1_data.ipp:// Copyright John Maddock 2008.
expint_data.ipp:// Copyright John Maddock 2008.
expint_small_data.ipp:// Copyright John Maddock 2008.
expinti_data.ipp:// Copyright John Maddock 2008.
expinti_data_double.ipp:// Copyright John Maddock 2008.
expinti_data_long.ipp:// Copyright John Maddock 2008.
functor.hpp:// (C) Copyright John Maddock 2007.
gamma_inv_big_data.ipp:// (C) Copyright John Maddock 2006-7.
gamma_inv_data.ipp:// (C) Copyright John Maddock 2006-7.
gamma_inv_small_data.ipp:// (C) Copyright John Maddock 2006-7.
handle_test_result.hpp:// (C) Copyright John Maddock 2006-7.
hermite.ipp:// (C) Copyright John Maddock 2006-7.
hypergeometric_dist_data2.ipp:// Copyright John Maddock 2008
hypergeometric_test_data.ipp:// Copyright Gautam Sewani 2008
hypot_test.cpp:// (C) Copyright John Maddock 2005.
ibeta_data.ipp:// (C) Copyright John Maddock 2006.
ibeta_int_data.ipp:// (C) Copyright John Maddock 2006-7.
ibeta_inv_data.ipp:// (C) Copyright John Maddock 2006-7.
ibeta_inva_data.ipp:// (C) Copyright John Maddock 2006-7.
ibeta_large_data.ipp:// (C) Copyright John Maddock 2006.
ibeta_small_data.ipp:// (C) Copyright John Maddock 2006.
igamma_big_data.ipp:// (C) Copyright John Maddock 2006.
igamma_int_data.ipp:// (C) Copyright John Maddock 2006-7.
igamma_inva_data.ipp:// (C) Copyright John Maddock 2006-7.
igamma_med_data.ipp:// (C) Copyright John Maddock 2006.
igamma_small_data.ipp:// (C) Copyright John Maddock 2006.
jacobi_elliptic.ipp:// Copyright John Maddock 2012.
jacobi_elliptic_small.ipp:// Copyright John Maddock 2012.
jacobi_large_phi.ipp:// Copyright John Maddock 2012.
jacobi_near_1.ipp:// Copyright John Maddock 2012.
laguerre2.ipp:// (C) Copyright John Maddock 2006-7.
laguerre3.ipp:// (C) Copyright John Maddock 2006-7.
legendre_p.ipp:// (C) Copyright John Maddock 2006-7.
legendre_p_large.ipp:// (C) Copyright John Maddock 2006-7.
log1p_expm1_data.ipp:// (C) Copyright John Maddock 2006-7.
log1p_expm1_test.cpp:// Copyright John Maddock 2005.
log1p_expm1_test.cpp:// Copyright Paul A. Bristow 2010
log1p_expm1_test.hpp:// Copyright John Maddock 2005.
log1p_expm1_test.hpp:// Copyright Paul A. Bristow 2010
mpfr_concept_check.cpp:// Copyright John Maddock 2007-8.
mpreal_concept_check.cpp:// Copyright John Maddock 2007-8.
multiprc_concept_check_1.cpp:// Copyright John Maddock 2013.
multiprc_concept_check_2.cpp:// Copyright John Maddock 2013.
multiprc_concept_check_3.cpp:// Copyright John Maddock 2013.
multiprc_concept_check_4.cpp:// Copyright John Maddock 2013.
ncbeta.ipp:// Copyright John Maddock 2008.
ncbeta_big.ipp:// Copyright John Maddock 2008.
nccs.ipp:// Copyright John Maddock 2008.
nccs_big.ipp:// Copyright John Maddock 2008.
nct.ipp:// Copyright John Maddock 2008.
nct_asym.ipp:// Copyright John Maddock 2012.
nct_small_delta.ipp:// Copyright John Maddock 2012.
negative_binomial_quantile.ipp:// (C) Copyright John Maddock 2006-7.
ntl_concept_check.cpp:// Copyright John Maddock 2007-8.
ntl_concept_check.cpp:// Copyright Paul A. Bristow 2009, 2011
owens_t.ipp:// Copyright John Maddock 2012.
owens_t_T7.hpp:// Copyright (C) Benjamin Sobotta 2012
owens_t_large_data.ipp:// Copyright John Maddock 2012.
pch.hpp:// Copyright John Maddock 2008.
pch_light.hpp:// Copyright John Maddock 2008.
poisson_quantile.ipp:// (C) Copyright John Maddock 2006-7.
pow_test.cpp:// (C) Copyright Bruno Lalande 2008.
powm1_sqrtp1m1_test.cpp:// (C) Copyright John Maddock 2006.
powm1_sqrtp1m1_test.hpp:// Copyright John Maddock 2006.
s_.ipp:// Copyright (c) 2006 Johan Rade
s_.ipp:// Copyright (c) 2012 Paul A. Bristow
sinc_test.hpp:// (C) Copyright Hubert Holin 2003.
sinhc_test.hpp:// (C) Copyright Hubert Holin 2003.
special_functions_test.cpp:// (C) Copyright Hubert Holin 2003.
special_functions_test.cpp: BOOST_TEST_MESSAGE("(C) Copyright Hubert Holin 2003-2005.");
sph_bessel_data.ipp:// Copyright (c) 2007 John Maddock
sph_neumann_data.ipp:// Copyright (c) 2007 John Maddock
spherical_harmonic.ipp:// (C) Copyright John Maddock 2006-7.
std_real_concept_check.cpp:// Copyright John Maddock 2006.
table_type.hpp:// Copyright John Maddock 2012.
test_airy.cpp:// Copyright John Maddock 2012
test_archive.cpp:// Copyright (c) 2006 Johan Rade
test_archive.cpp:// Copyright (c) 2011 Paul A. Bristow - filename changes for boost-trunk.
test_basic_nonfinite.cpp:// Copyright (c) 2006 Johan Rade
test_basic_nonfinite.cpp:// Copyright (c) 2011 Paul A. Bristow comments
test_basic_nonfinite.cpp:// Copyright (c) 2011 John Maddock
test_bernoulli.cpp:// Copyright John Maddock 2006.
test_bernoulli.cpp:// Copyright Paul A. Bristow 2007, 2012.
test_bessel_airy_zeros.cpp:// Copyright John Maddock 2013
test_bessel_airy_zeros.cpp:// Copyright Christopher Kormanyos 2013.
test_bessel_airy_zeros.cpp:// Copyright Paul A. Bristow 2013.
test_bessel_hooks.hpp:// (C) Copyright John Maddock 2007.
test_bessel_i.cpp:// (C) Copyright John Maddock 2007.
test_bessel_i.hpp:// (C) Copyright John Maddock 2007.
test_bessel_j.cpp:// (C) Copyright John Maddock 2007.
test_bessel_j.hpp:// (C) Copyright John Maddock 2007.
test_bessel_k.cpp:// Copyright John Maddock 2006, 2007
test_bessel_k.cpp:// Copyright Paul A. Bristow 2007
test_bessel_k.hpp:// (C) Copyright John Maddock 2007.
test_bessel_y.cpp:// (C) Copyright John Maddock 2007.
test_bessel_y.hpp:// (C) Copyright John Maddock 2007.
test_beta.cpp:// Copyright John Maddock 2006.
test_beta.cpp:// Copyright Paul A. Bristow 2007, 2009
test_beta.hpp:// Copyright John Maddock 2006.
test_beta.hpp:// Copyright Paul A. Bristow 2007, 2009
test_beta_dist.cpp:// Copyright John Maddock 2006.
test_beta_dist.cpp:// Copyright Paul A. Bristow 2007, 2009, 2010, 2012.
test_beta_hooks.hpp:// (C) Copyright John Maddock 2006.
test_binomial.cpp:// Copyright John Maddock 2006.
test_binomial.cpp:// Copyright Paul A. Bristow 2007.
test_binomial_coeff.cpp:// (C) Copyright John Maddock 2006.
test_binomial_coeff.hpp:// Copyright John Maddock 2006.
test_binomial_coeff.hpp:// Copyright Paul A. Bristow 2007, 2009
test_carlson.cpp:// Copyright 2006 John Maddock
test_carlson.cpp:// Copyright Paul A. Bristow 2007.
test_carlson.hpp:// Copyright John Maddock 2006.
test_carlson.hpp:// Copyright Paul A. Bristow 2007, 2009
test_cauchy.cpp:// Copyright John Maddock 2006, 2007.
test_cauchy.cpp:// Copyright Paul A. Bristow 2007
test_cbrt.cpp:// Copyright John Maddock 2006.
test_cbrt.cpp:// Copyright Paul A. Bristow 2010
test_cbrt.hpp:// Copyright John Maddock 2006.
test_cbrt.hpp:// Copyright Paul A. Bristow 2007, 2009
test_chi_squared.cpp:// Copyright Paul A. Bristow 2006.
test_chi_squared.cpp:// Copyright John Maddock 2007.
test_classify.cpp:// Copyright John Maddock 2006.
test_classify.cpp:// Copyright Paul A. Bristow 2007
test_common_factor_gmpxx.cpp:// (C) Copyright John Maddock 2010.
test_constant_generate.cpp:// Copyright John Maddock 2010.
test_constants.cpp:// Copyright Paul Bristow 2007, 2011.
test_constants.cpp:// Copyright John Maddock 2006, 2011.
test_digamma.cpp:// (C) Copyright John Maddock 2006.
test_digamma.hpp:// Copyright John Maddock 2006.
test_digamma.hpp:// Copyright Paul A. Bristow 2007, 2009
test_dist_overloads.cpp:// Copyright John Maddock 2006.
test_dist_overloads.cpp:// Copyright Paul A. Bristow 2007.
test_ellint_1.cpp:// Copyright Xiaogang Zhang 2006
test_ellint_1.cpp:// Copyright John Maddock 2006, 2007
test_ellint_1.cpp:// Copyright Paul A. Bristow 2007
test_ellint_1.hpp:// Copyright John Maddock 2006.
test_ellint_1.hpp:// Copyright Paul A. Bristow 2007, 2009
test_ellint_2.cpp:// Copyright Xiaogang Zhang 2006
test_ellint_2.cpp:// Copyright John Maddock 2006, 2007
test_ellint_2.cpp:// Copyright Paul A. Bristow 2007
test_ellint_2.hpp:// Copyright John Maddock 2006.
test_ellint_2.hpp:// Copyright Paul A. Bristow 2007, 2009
test_ellint_3.cpp:// Copyright Xiaogang Zhang 2006
test_ellint_3.cpp:// Copyright John Maddock 2006, 2007
test_ellint_3.cpp:// Copyright Paul A. Bristow 2007
test_ellint_3.hpp:// Copyright John Maddock 2006.
test_ellint_3.hpp:// Copyright Paul A. Bristow 2007, 2009
test_erf.cpp:// Copyright John Maddock 2006.
test_erf.cpp:// Copyright Paul A. Bristow 2007
test_erf.hpp:// Copyright John Maddock 2006.
test_erf.hpp:// Copyright Paul A. Bristow 2007, 2009
test_erf_hooks.hpp:// (C) Copyright John Maddock 2006.
test_error_handling.cpp:// Copyright Paul A. Bristow 2006-7.
test_error_handling.cpp:// Copyright John Maddock 2006-7.
test_expint.cpp:// (C) Copyright John Maddock 2007.
test_expint.hpp:// Copyright John Maddock 2006.
test_expint.hpp:// Copyright Paul A. Bristow 2007, 2009
test_expint_hooks.hpp:// (C) Copyright John Maddock 2006.
test_exponential_dist.cpp:// Copyright John Maddock 2006.
test_exponential_dist.cpp:// Copyright Paul A. Bristow 2007.
test_extreme_value.cpp:// Copyright John Maddock 2006.
test_factorials.cpp:// Copyright John Maddock 2006.
test_find_location.cpp:// Copyright John Maddock 2007.
test_find_location.cpp:// Copyright Paul A. Bristow 2007.
test_find_scale.cpp:// Copyright John Maddock 2007.
test_find_scale.cpp:// Copyright Paul A. Bristow 2007.
test_fisher_f.cpp:// Copyright Paul A. Bristow 2006.
test_fisher_f.cpp:// Copyright John Maddock 2007.
test_fisher_f.cpp: // Distcalc version 1.2 Copyright 2002 H Lohninger, TU Wein
test_gamma.cpp:// (C) Copyright John Maddock 2006.
test_gamma.hpp:// Copyright John Maddock 2006.
test_gamma.hpp:// Copyright Paul A. Bristow 2007, 2009
test_gamma_data.ipp:// (C) Copyright John Maddock 2006.
test_gamma_dist.cpp:// Copyright John Maddock 2006.
test_gamma_dist.cpp:// Copyright Paul A. Bristow 2007, 2010.
test_gamma_hooks.hpp:// (C) Copyright John Maddock 2006.
test_geometric.cpp:// Copyright Paul A. Bristow 2010.
test_geometric.cpp:// Copyright John Maddock 2010.
test_hankel.cpp:// Copyright John Maddock 2012
test_hermite.cpp:// Copyright John Maddock 2006, 2007
test_hermite.cpp:// Copyright Paul A. Bristow 2007
test_hermite.hpp:// Copyright John Maddock 2006.
test_hermite.hpp:// Copyright Paul A. Bristow 2007, 2009
test_hypergeometric_dist.cpp:// Copyright John Maddock 2008
test_hypergeometric_dist.cpp:// Copyright Paul A. Bristow
test_hypergeometric_dist.cpp:// Copyright Gautam Sewani
test_ibeta.cpp:// (C) Copyright John Maddock 2006.
test_ibeta.hpp:// Copyright John Maddock 2006.
test_ibeta.hpp:// Copyright Paul A. Bristow 2007, 2009
test_ibeta_inv.cpp:// (C) Copyright John Maddock 2006.
test_ibeta_inv.hpp:// Copyright John Maddock 2006.
test_ibeta_inv.hpp:// Copyright Paul A. Bristow 2007, 2009
test_ibeta_inv_ab.cpp:// (C) Copyright John Maddock 2006.
test_ibeta_inv_ab.hpp:// Copyright John Maddock 2006.
test_ibeta_inv_ab.hpp:// Copyright Paul A. Bristow 2007, 2009
test_igamma.cpp:// (C) Copyright John Maddock 2006.
test_igamma.hpp:// Copyright John Maddock 2006.
test_igamma.hpp:// Copyright Paul A. Bristow 2007, 2009
test_igamma_inv.cpp:// (C) Copyright John Maddock 2006.
test_igamma_inv.hpp:// Copyright John Maddock 2006.
test_igamma_inv.hpp:// Copyright Paul A. Bristow 2007, 2009
test_igamma_inva.cpp:// (C) Copyright John Maddock 2006.
test_igamma_inva.hpp:// Copyright John Maddock 2006.
test_igamma_inva.hpp:// Copyright Paul A. Bristow 2007, 2009
test_instances/double_test_instances_4.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_4.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_8.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_9.cpp:// Copyright John Maddock 2011.
test_instances/Jamfile.v2:# Copyright ohn Maddock 2012
test_instances/real_concept_test_instances_5.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_6.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_4.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_7.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_2.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_5.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_9.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_1.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_6.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_6.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_7.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_7.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_3.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_6.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_9.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_2.cpp:// Copyright John Maddock 2011.
test_instances/pch.hpp:// Copyright John Maddock 2012.
test_instances/ldouble_test_instances_2.cpp:// Copyright John Maddock 2011.
test_instances/long_double_test_instances_1.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_7.cpp:// Copyright John Maddock 2011.
test_instances/test_instances.hpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_10.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_3.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_3.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_10.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_5.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_8.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_8.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_1.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_10.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_10.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_9.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_4.cpp:// Copyright John Maddock 2011.
test_instances/real_concept_test_instances_3.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_2.cpp:// Copyright John Maddock 2011.
test_instances/float_test_instances_1.cpp:// Copyright John Maddock 2011.
test_instances/double_test_instances_8.cpp:// Copyright John Maddock 2011.
test_instances/ldouble_test_instances_5.cpp:// Copyright John Maddock 2011.
test_instantiate1.cpp:// Copyright John Maddock 2006.
test_instantiate2.cpp:// Copyright John Maddock 2006.
test_inv_hyp.cpp:// (C) Copyright John Maddock 2006.
test_inverse_chi_squared.cpp:// Copyright Paul A. Bristow 2010.
test_inverse_chi_squared.cpp:// Copyright John Maddock 2010.
test_inverse_chi_squared_distribution.cpp:// Copyright Paul A. Bristow 2010.
test_inverse_chi_squared_distribution.cpp:// Copyright John Maddock 2010.
test_inverse_gamma_distribution.cpp:// Copyright Paul A. Bristow 2010.
test_inverse_gamma_distribution.cpp:// Copyright John Maddock 2010.
test_inverse_gaussian.cpp:// Copyright Paul A. Bristow 2010.
test_inverse_gaussian.cpp:// Copyright John Maddock 2010.
test_jacobi.cpp:// Copyright John Maddock 2012
test_jacobi.hpp:// Copyright John Maddock 2006.
test_jacobi.hpp:// Copyright Paul A. Bristow 2007, 2009
test_laguerre.cpp:// (C) Copyright John Maddock 2006.
test_laguerre.hpp:// Copyright John Maddock 2006.
test_laguerre.hpp:// Copyright Paul A. Bristow 2007, 2009
test_laplace.cpp:// Copyright Thijs van den Berg, 2008.
test_laplace.cpp:// Copyright John Maddock 2008.
test_laplace.cpp:// Copyright Paul A. Bristow 2008, 2009.
test_ldouble_simple.cpp:// Copyright John Maddock 2013.
test_legacy_nonfinite.cpp:// Copyright (c) 2006 Johan Rade
test_legacy_nonfinite.cpp:// Copyright (c) 2011 Paul A. Bristow comments
test_legendre.cpp:// (C) Copyright John Maddock 2006.
test_legendre.hpp:// Copyright John Maddock 2006.
test_legendre.hpp:// Copyright Paul A. Bristow 2007, 2009
test_legendre_hooks.hpp:// (C) Copyright John Maddock 2006.
test_lexical_cast.cpp:// Copyright (c) 2006 Johan Rade
test_lexical_cast.cpp:// Copyright (c) 2011 Paul A. Bristow incorporated Boost.Math
test_logistic_dist.cpp:// Copyright 2008 Gautam Sewani
test_lognormal.cpp:// Copyright John Maddock 2006.
test_lognormal.cpp:// Copyright Paul A. Bristow 2007
test_long_double_support.cpp:// Copyright John Maddock 2009
test_math_fwd.cpp:// Copyright John Maddock 2010.
test_math_fwd.cpp:// Copyright Paul A. Bristow 2010.
test_minima.cpp:// Copyright John Maddock 2006.
test_minima.cpp:// Copyright Paul A. Bristow 2007.
test_nc_beta.cpp:// Copyright John Maddock 2008.
test_nc_chi_squared.cpp:// Copyright John Maddock 2008.
test_nc_f.cpp:// Copyright John Maddock 2008.
test_nc_t.cpp:// Copyright John Maddock 2008, 2012.
test_nc_t.cpp:// Copyright Paul A. Bristow 2012.
test_ncbeta_hooks.hpp:// (C) Copyright John Maddock 2008.
test_nccs_hooks.hpp:// (C) Copyright John Maddock 2008.
test_negative_binomial.cpp:// Copyright Paul A. Bristow 2007.
test_negative_binomial.cpp:// Copyright John Maddock 2006.
test_next.cpp:// (C) Copyright John Maddock 2008.
test_nonfinite_io.cpp:// Copyright 2011 Paul A. Bristow
test_nonfinite_trap.cpp:// Copyright (c) 2006 Johan Rade
test_nonfinite_trap.cpp:// Copyright (c) 2011 Paul A. Bristow To incorporate into Boost.Math
test_normal.cpp:// Copyright Paul A. Bristow 2010.
test_normal.cpp:// Copyright John Maddock 2007.
test_out_of_range.hpp:// Copyright John Maddock 2012.
test_owens_t.cpp:// Copyright Paul A. Bristow 2012.
test_owens_t.cpp:// Copyright Benjamin Sobotta 2012.
test_pareto.cpp:// Copyright Paul A. Bristow 2007, 2009.
test_pareto.cpp:// Copyright John Maddock 2006.
test_poisson.cpp:// Copyright Paul A. Bristow 2007.
test_poisson.cpp:// Copyright John Maddock 2006.
test_policy.cpp:// Copyright John Maddock 2007.
test_policy_2.cpp:// Copyright John Maddock 2007.
test_policy_3.cpp:// Copyright John Maddock 2007.
test_policy_4.cpp:// Copyright John Maddock 2007.
test_policy_5.cpp:// Copyright John Maddock 2007.
test_policy_6.cpp:// Copyright John Maddock 2007.
test_policy_7.cpp:// Copyright John Maddock 2007.
test_policy_8.cpp:// Copyright John Maddock 2007.
test_policy_sf.cpp:// (C) Copyright John Maddock 2007.
test_print_info_on_type.cpp:// Copyright John Maddock 2010.
test_rational_instances/test_rational_ldouble2.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_float2.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_double2.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_double3.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_ldouble1.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_float4.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_double5.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_double4.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_real_concept1.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_real_concept3.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational.hpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_ldouble3.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_float3.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_real_concept5.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_ldouble5.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_ldouble4.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_double1.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_real_concept4.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_real_concept2.cpp:// (C) Copyright John Maddock 2006-7.
test_rational_instances/test_rational_float1.cpp:// (C) Copyright John Maddock 2006-7.
test_rationals.cpp:// (C) Copyright John Maddock 2006.
test_rayleigh.cpp:// Copyright John Maddock 2006.
test_real_concept.cpp:// Copyright John Maddock 2010
test_real_concept_neg_bin.cpp:// Copyright Paul A. Bristow 2010.
test_real_concept_neg_bin.cpp:// Copyright John Maddock 2010.
test_remez.cpp:// Copyright John Maddock 2006
test_remez.cpp:// Copyright Paul A. Bristow 2007
test_roots.cpp:// (C) Copyright John Maddock 2006.
test_round.cpp:// (C) Copyright John Maddock 2007.
test_sign.cpp:#define BOOST_TEST_MAIN// Copyright John Maddock 2008
test_sign.cpp:// (C) Copyright Paul A. Bristow 2011 (added tests for changesign)
test_signed_zero.cpp:// Copyright 2006 Johan Rade
test_signed_zero.cpp:// Copyright 2011 Paul A. Bristow To incorporate into Boost.Math
test_signed_zero.cpp:// Copyright 2012 Paul A. Bristow with new tests.
test_skew_normal.cpp:// Copyright Paul A. Bristow 2012.
test_skew_normal.cpp:// Copyright John Maddock 2012.
test_skew_normal.cpp:// Copyright Benjamin Sobotta 2012
test_spherical_harmonic.cpp:// (C) Copyright John Maddock 2006.
test_students_t.cpp:// Copyright Paul A. Bristow 2006.
test_students_t.cpp:// Copyright John Maddock 2006.
test_tgamma_ratio.cpp:// (C) Copyright John Maddock 2006.
test_tgamma_ratio.hpp:// Copyright John Maddock 2006.
test_tgamma_ratio.hpp:// Copyright Paul A. Bristow 2007, 2009
test_toms748_solve.cpp:// (C) Copyright John Maddock 2006.
test_tr1.c:/* (C) Copyright John Maddock 2008.
test_tr1.cpp:// (C) Copyright John Maddock 2008.
test_triangular.cpp:// Copyright Paul Bristow 2006, 2007.
test_triangular.cpp:// Copyright John Maddock 2006, 2007.
test_uniform.cpp:// Copyright Paul Bristow 2007.
test_uniform.cpp:// Copyright John Maddock 2006.
test_weibull.cpp:// Copyright John Maddock 2006, 2012.
test_weibull.cpp:// Copyright Paul A. Bristow 2007, 2012.
test_zeta.cpp:// (C) Copyright John Maddock 2006.
test_zeta.hpp:// Copyright John Maddock 2006.
test_zeta.hpp:// Copyright Paul A. Bristow 2007, 2009
test_zeta_hooks.hpp:// (C) Copyright John Maddock 2006.
tgamma_delta_ratio_data.ipp:// (C) Copyright John Maddock 2006-7.
tgamma_delta_ratio_int.ipp:// (C) Copyright John Maddock 2006-7.
tgamma_delta_ratio_int2.ipp:// (C) Copyright John Maddock 2006-7.
tgamma_ratio_data.ipp:// (C) Copyright John Maddock 2006-7.
zeta_1_below_data.ipp:// Copyright John Maddock 2008.
zeta_1_up_data.ipp:// Copyright John Maddock 2008.
zeta_data.ipp:// Copyright John Maddock 2008.
zeta_neg_data.ipp:// Copyright John Maddock 2008.
ztest_max_digits10.cpp: // Copyright 2010 Paul A. Bristow
zztest_max_digits10.cpp:// Copyright 2010 Paul A. Bristow
File diff suppressed because it is too large Load Diff
@@ -1,108 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, assert_allclose
from scipy.special import boxcox, boxcox1p, inv_boxcox, inv_boxcox1p
# There are more tests of boxcox and boxcox1p in test_mpmath.py.
def test_boxcox_basic():
x = np.array([0.5, 1, 2, 4])
# lambda = 0 => y = log(x)
y = boxcox(x, 0)
assert_almost_equal(y, np.log(x))
# lambda = 1 => y = x - 1
y = boxcox(x, 1)
assert_almost_equal(y, x - 1)
# lambda = 2 => y = 0.5*(x**2 - 1)
y = boxcox(x, 2)
assert_almost_equal(y, 0.5*(x**2 - 1))
# x = 0 and lambda > 0 => y = -1 / lambda
lam = np.array([0.5, 1, 2])
y = boxcox(0, lam)
assert_almost_equal(y, -1.0 / lam)
def test_boxcox_underflow():
x = 1 + 1e-15
lmbda = 1e-306
y = boxcox(x, lmbda)
assert_allclose(y, np.log(x), rtol=1e-14)
def test_boxcox_nonfinite():
# x < 0 => y = nan
x = np.array([-1, -1, -0.5])
y = boxcox(x, [0.5, 2.0, -1.5])
assert_equal(y, np.array([np.nan, np.nan, np.nan]))
# x = 0 and lambda <= 0 => y = -inf
x = 0
y = boxcox(x, [-2.5, 0])
assert_equal(y, np.array([-np.inf, -np.inf]))
def test_boxcox1p_basic():
x = np.array([-0.25, -1e-20, 0, 1e-20, 0.25, 1, 3])
# lambda = 0 => y = log(1+x)
y = boxcox1p(x, 0)
assert_almost_equal(y, np.log1p(x))
# lambda = 1 => y = x
y = boxcox1p(x, 1)
assert_almost_equal(y, x)
# lambda = 2 => y = 0.5*((1+x)**2 - 1) = 0.5*x*(2 + x)
y = boxcox1p(x, 2)
assert_almost_equal(y, 0.5*x*(2 + x))
# x = -1 and lambda > 0 => y = -1 / lambda
lam = np.array([0.5, 1, 2])
y = boxcox1p(-1, lam)
assert_almost_equal(y, -1.0 / lam)
def test_boxcox1p_underflow():
x = np.array([1e-15, 1e-306])
lmbda = np.array([1e-306, 1e-18])
y = boxcox1p(x, lmbda)
assert_allclose(y, np.log1p(x), rtol=1e-14)
def test_boxcox1p_nonfinite():
# x < -1 => y = nan
x = np.array([-2, -2, -1.5])
y = boxcox1p(x, [0.5, 2.0, -1.5])
assert_equal(y, np.array([np.nan, np.nan, np.nan]))
# x = -1 and lambda <= 0 => y = -inf
x = -1
y = boxcox1p(x, [-2.5, 0])
assert_equal(y, np.array([-np.inf, -np.inf]))
def test_inv_boxcox():
x = np.array([0., 1., 2.])
lam = np.array([0., 1., 2.])
y = boxcox(x, lam)
x2 = inv_boxcox(y, lam)
assert_almost_equal(x, x2)
x = np.array([0., 1., 2.])
lam = np.array([0., 1., 2.])
y = boxcox1p(x, lam)
x2 = inv_boxcox1p(y, lam)
assert_almost_equal(x, x2)
def test_inv_boxcox1p_underflow():
x = 1e-15
lam = 1e-306
y = inv_boxcox1p(x, lam)
assert_allclose(y, x, rtol=1e-14)
@@ -1,409 +0,0 @@
"""
Test cdflib functions versus mpmath, if available.
The following functions still need tests:
- ncfdtr
- ncfdtri
- ncfdtridfn
- ncfdtridfd
- ncfdtrinc
- nbdtrik
- nbdtrin
- nrdtrimn
- nrdtrisd
- pdtrik
- nctdtr
- nctdtrit
- nctdtridf
- nctdtrinc
"""
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import assert_equal
import pytest
import scipy.special as sp
from scipy._lib.six import with_metaclass
from scipy.special._testutils import (
MissingModule, check_version, FuncData)
from scipy.special._mptestutils import (
Arg, IntArg, get_args, mpf2float, assert_mpmath_equal)
try:
import mpmath
except ImportError:
mpmath = MissingModule('mpmath')
class ProbArg(object):
"""Generate a set of probabilities on [0, 1]."""
def __init__(self):
# Include the endpoints for compatibility with Arg et. al.
self.a = 0
self.b = 1
def values(self, n):
"""Return an array containing approximatively n numbers."""
m = max(1, n//3)
v1 = np.logspace(-30, np.log10(0.3), m)
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
v = np.r_[v1, v2, v3]
return np.unique(v)
class EndpointFilter(object):
def __init__(self, a, b, rtol, atol):
self.a = a
self.b = b
self.rtol = rtol
self.atol = atol
def __call__(self, x):
mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol
mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol
return np.where(mask1 | mask2, False, True)
class _CDFData(object):
def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True,
dps=20, n=5000, rtol=None, atol=None,
endpt_rtol=None, endpt_atol=None):
self.spfunc = spfunc
self.mpfunc = mpfunc
self.index = index
self.argspec = argspec
self.spfunc_first = spfunc_first
self.dps = dps
self.n = n
self.rtol = rtol
self.atol = atol
if not isinstance(argspec, list):
self.endpt_rtol = None
self.endpt_atol = None
elif endpt_rtol is not None or endpt_atol is not None:
if isinstance(endpt_rtol, list):
self.endpt_rtol = endpt_rtol
else:
self.endpt_rtol = [endpt_rtol]*len(self.argspec)
if isinstance(endpt_atol, list):
self.endpt_atol = endpt_atol
else:
self.endpt_atol = [endpt_atol]*len(self.argspec)
else:
self.endpt_rtol = None
self.endpt_atol = None
def idmap(self, *args):
if self.spfunc_first:
res = self.spfunc(*args)
if np.isnan(res):
return np.nan
args = list(args)
args[self.index] = res
with mpmath.workdps(self.dps):
res = self.mpfunc(*tuple(args))
# Imaginary parts are spurious
res = mpf2float(res.real)
else:
with mpmath.workdps(self.dps):
res = self.mpfunc(*args)
res = mpf2float(res.real)
args = list(args)
args[self.index] = res
res = self.spfunc(*tuple(args))
return res
def get_param_filter(self):
if self.endpt_rtol is None and self.endpt_atol is None:
return None
filters = []
for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec):
if rtol is None and atol is None:
filters.append(None)
continue
elif rtol is None:
rtol = 0.0
elif atol is None:
atol = 0.0
filters.append(EndpointFilter(spec.a, spec.b, rtol, atol))
return filters
def check(self):
# Generate values for the arguments
args = get_args(self.argspec, self.n)
param_filter = self.get_param_filter()
param_columns = tuple(range(args.shape[1]))
result_columns = args.shape[1]
args = np.hstack((args, args[:,self.index].reshape(args.shape[0], 1)))
FuncData(self.idmap, args,
param_columns=param_columns, result_columns=result_columns,
rtol=self.rtol, atol=self.atol, vectorized=False,
param_filter=param_filter).check()
def _assert_inverts(*a, **kw):
d = _CDFData(*a, **kw)
d.check()
def _binomial_cdf(k, n, p):
k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p)
if k <= 0:
return mpmath.mpf(0)
elif k >= n:
return mpmath.mpf(1)
onemp = mpmath.fsub(1, p, exact=True)
return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True)
def _f_cdf(dfn, dfd, x):
if x < 0:
return mpmath.mpf(0)
dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x)
ub = dfn*x/(dfn*x + dfd)
res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True)
return res
def _student_t_cdf(df, t, dps=None):
if dps is None:
dps = mpmath.mp.dps
with mpmath.workdps(dps):
df, t = mpmath.mpf(df), mpmath.mpf(t)
fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df)
fac *= t*mpmath.gamma(0.5*(df + 1))
fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df)
return 0.5 + fac
def _noncentral_chi_pdf(t, df, nc):
res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t))
res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2
return res
def _noncentral_chi_cdf(x, df, nc, dps=None):
if dps is None:
dps = mpmath.mp.dps
x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc)
with mpmath.workdps(dps):
res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x])
return res
def _tukey_lmbda_quantile(p, lmbda):
# For lmbda != 0
return (p**lmbda - (1 - p)**lmbda)/lmbda
@pytest.mark.slow
@check_version(mpmath, '0.19')
class TestCDFlib(object):
@pytest.mark.xfail(run=False)
def test_bdtrik(self):
_assert_inverts(
sp.bdtrik,
_binomial_cdf,
0, [ProbArg(), IntArg(1, 1000), ProbArg()],
rtol=1e-4)
def test_bdtrin(self):
_assert_inverts(
sp.bdtrin,
_binomial_cdf,
1, [IntArg(1, 1000), ProbArg(), ProbArg()],
rtol=1e-4, endpt_atol=[None, None, 1e-6])
def test_btdtria(self):
_assert_inverts(
sp.btdtria,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
0, [ProbArg(), Arg(0, 1e2, inclusive_a=False),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-6)
def test_btdtrib(self):
# Use small values of a or mpmath doesn't converge
_assert_inverts(
sp.btdtrib,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-7, endpt_atol=[None, 1e-18, 1e-15])
@pytest.mark.xfail(run=False)
def test_fdtridfd(self):
_assert_inverts(
sp.fdtridfd,
_f_cdf,
1, [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)],
rtol=1e-7)
def test_gdtria(self):
_assert_inverts(
sp.gdtria,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
0, [ProbArg(), Arg(0, 1e3, inclusive_a=False),
Arg(0, 1e4, inclusive_a=False)], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_gdtrib(self):
# Use small values of a and x or mpmath doesn't converge
_assert_inverts(
sp.gdtrib,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1e3, inclusive_a=False)], rtol=1e-5)
def test_gdtrix(self):
_assert_inverts(
sp.gdtrix,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
2, [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False),
ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_stdtr(self):
# Ideally the left endpoint for Arg() should be 0.
assert_mpmath_equal(
sp.stdtr,
_student_t_cdf,
[IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7)
@pytest.mark.xfail(run=False)
def test_stdtridf(self):
_assert_inverts(
sp.stdtridf,
_student_t_cdf,
0, [ProbArg(), Arg()], rtol=1e-7)
def test_stdtrit(self):
_assert_inverts(
sp.stdtrit,
_student_t_cdf,
1, [IntArg(1, 100), ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-10])
def test_chdtriv(self):
_assert_inverts(
sp.chdtriv,
lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True),
0, [ProbArg(), IntArg(1, 100)], rtol=1e-4)
@pytest.mark.xfail(run=False)
def test_chndtridf(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtridf,
_noncentral_chi_cdf,
1, [Arg(0, 100, inclusive_a=False), ProbArg(),
Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15)
@pytest.mark.xfail(run=False)
def test_chndtrinc(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrinc,
_noncentral_chi_cdf,
2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()],
n=1000, rtol=1e-4, atol=1e-15)
def test_chndtrix(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrix,
_noncentral_chi_cdf,
0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15,
endpt_atol=[1e-6, None, None])
def test_tklmbda_zero_shape(self):
# When lmbda = 0 the CDF has a simple closed form
one = mpmath.mpf(1)
assert_mpmath_equal(
lambda x: sp.tklmbda(x, 0),
lambda x: one/(mpmath.exp(-x) + one),
[Arg()], rtol=1e-7)
def test_tklmbda_neg_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(-25, 0, inclusive_b=False)],
spfunc_first=False, rtol=1e-5,
endpt_atol=[1e-9, 1e-5])
@pytest.mark.xfail(run=False)
def test_tklmbda_pos_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(0, 100, inclusive_a=False)],
spfunc_first=False, rtol=1e-5)
def test_nonfinite():
funcs = [
("btdtria", 3),
("btdtrib", 3),
("bdtrik", 3),
("bdtrin", 3),
("chdtriv", 2),
("chndtr", 3),
("chndtrix", 3),
("chndtridf", 3),
("chndtrinc", 3),
("fdtridfd", 3),
("ncfdtr", 4),
("ncfdtri", 4),
("ncfdtridfn", 4),
("ncfdtridfd", 4),
("ncfdtrinc", 4),
("gdtrix", 3),
("gdtrib", 3),
("gdtria", 3),
("nbdtrik", 3),
("nbdtrin", 3),
("nrdtrimn", 3),
("nrdtrisd", 3),
("pdtrik", 2),
("stdtr", 2),
("stdtrit", 2),
("stdtridf", 2),
("nctdtr", 3),
("nctdtrit", 3),
("nctdtridf", 3),
("nctdtrinc", 3),
("tklmbda", 2),
]
np.random.seed(1)
for func, numargs in funcs:
func = getattr(sp, func)
args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in
np.random.rand(numargs)]
for args in itertools.product(*args_choices):
res = func(*args)
if any(np.isnan(x) for x in args):
# Nan inputs should result to nan output
assert_equal(res, np.nan)
else:
# All other inputs should return something (but not
# raise exceptions or cause hangs)
pass
@@ -1,332 +0,0 @@
from __future__ import division, print_function, absolute_import
from itertools import product
from numpy.testing import assert_allclose
import pytest
from scipy import special
from scipy.special import cython_special
int_points = [-10, -1, 1, 10]
real_points = [-10.0, -1.0, 1.0, 10.0]
complex_points = [complex(*tup) for tup in product(real_points, repeat=2)]
CYTHON_SIGNATURE_MAP = {
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i':'int',
'l': 'long'
}
TEST_POINTS = {
'f': real_points,
'd': real_points,
'g': real_points,
'F': complex_points,
'D': complex_points,
'G': complex_points,
'i': int_points,
'l': int_points,
}
PARAMS = [
(special.agm, cython_special.agm, ('dd',), None),
(special.airy, cython_special._airy_pywrap, ('d', 'D'), None),
(special.airye, cython_special._airye_pywrap, ('d', 'D'), None),
(special.bdtr, cython_special.bdtr, ('lld', 'ddd'), None),
(special.bdtrc, cython_special.bdtrc, ('lld', 'ddd'), None),
(special.bdtri, cython_special.bdtri, ('lld', 'ddd'), None),
(special.bdtrik, cython_special.bdtrik, ('ddd',), None),
(special.bdtrin, cython_special.bdtrin, ('ddd',), None),
(special.bei, cython_special.bei, ('d',), None),
(special.beip, cython_special.beip, ('d',), None),
(special.ber, cython_special.ber, ('d',), None),
(special.berp, cython_special.berp, ('d',), None),
(special.besselpoly, cython_special.besselpoly, ('ddd',), None),
(special.beta, cython_special.beta, ('dd',), None),
(special.betainc, cython_special.betainc, ('ddd',), None),
(special.betaincinv, cython_special.betaincinv, ('ddd',), None),
(special.betaln, cython_special.betaln, ('dd',), None),
(special.binom, cython_special.binom, ('dd',), None),
(special.boxcox, cython_special.boxcox, ('dd',), None),
(special.boxcox1p, cython_special.boxcox1p, ('dd',), None),
(special.btdtr, cython_special.btdtr, ('ddd',), None),
(special.btdtri, cython_special.btdtri, ('ddd',), None),
(special.btdtria, cython_special.btdtria, ('ddd',), None),
(special.btdtrib, cython_special.btdtrib, ('ddd',), None),
(special.cbrt, cython_special.cbrt, ('d',), None),
(special.chdtr, cython_special.chdtr, ('dd',), None),
(special.chdtrc, cython_special.chdtrc, ('dd',), None),
(special.chdtri, cython_special.chdtri, ('dd',), None),
(special.chdtriv, cython_special.chdtriv, ('dd',), None),
(special.chndtr, cython_special.chndtr, ('ddd',), None),
(special.chndtridf, cython_special.chndtridf, ('ddd',), None),
(special.chndtrinc, cython_special.chndtrinc, ('ddd',), None),
(special.chndtrix, cython_special.chndtrix, ('ddd',), None),
(special.cosdg, cython_special.cosdg, ('d',), None),
(special.cosm1, cython_special.cosm1, ('d',), None),
(special.cotdg, cython_special.cotdg, ('d',), None),
(special.dawsn, cython_special.dawsn, ('d', 'D'), None),
(special.ellipe, cython_special.ellipe, ('d',), None),
(special.ellipeinc, cython_special.ellipeinc, ('dd',), None),
(special.ellipj, cython_special._ellipj_pywrap, ('dd',), None),
(special.ellipkinc, cython_special.ellipkinc, ('dd',), None),
(special.ellipkm1, cython_special.ellipkm1, ('d',), None),
(special.entr, cython_special.entr, ('d',), None),
(special.erf, cython_special.erf, ('d', 'D'), None),
(special.erfc, cython_special.erfc, ('d', 'D'), None),
(special.erfcx, cython_special.erfcx, ('d', 'D'), None),
(special.erfi, cython_special.erfi, ('d', 'D'), None),
(special.eval_chebyc, cython_special.eval_chebyc, ('dd', 'dD', 'ld'), None),
(special.eval_chebys, cython_special.eval_chebys, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyt, cython_special.eval_chebyt, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyu, cython_special.eval_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_gegenbauer, cython_special.eval_gegenbauer, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_genlaguerre, cython_special.eval_genlaguerre, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_hermite, cython_special.eval_hermite, ('ld',), None),
(special.eval_hermitenorm, cython_special.eval_hermitenorm, ('ld',), None),
(special.eval_jacobi, cython_special.eval_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_laguerre, cython_special.eval_laguerre, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_legendre, cython_special.eval_legendre, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyt, cython_special.eval_sh_chebyt, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyu, cython_special.eval_sh_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_sh_jacobi, cython_special.eval_sh_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_sh_legendre, cython_special.eval_sh_legendre, ('dd', 'dD', 'ld'), None),
(special.exp1, cython_special.exp1, ('d', 'D'), None),
(special.exp10, cython_special.exp10, ('d',), None),
(special.exp2, cython_special.exp2, ('d',), None),
(special.expi, cython_special.expi, ('d', 'D'), None),
(special.expit, cython_special.expit, ('f', 'd', 'g'), None),
(special.expm1, cython_special.expm1, ('d', 'D'), None),
(special.expn, cython_special.expn, ('ld', 'dd'), None),
(special.exprel, cython_special.exprel, ('d',), None),
(special.fdtr, cython_special.fdtr, ('ddd',), None),
(special.fdtrc, cython_special.fdtrc, ('ddd',), None),
(special.fdtri, cython_special.fdtri, ('ddd',), None),
(special.fdtridfd, cython_special.fdtridfd, ('ddd',), None),
(special.fresnel, cython_special._fresnel_pywrap, ('d', 'D'), None),
(special.gamma, cython_special.gamma, ('d', 'D'), None),
(special.gammainc, cython_special.gammainc, ('dd',), None),
(special.gammaincc, cython_special.gammaincc, ('dd',), None),
(special.gammainccinv, cython_special.gammainccinv, ('dd',), None),
(special.gammaincinv, cython_special.gammaincinv, ('dd',), None),
(special.gammaln, cython_special.gammaln, ('d',), None),
(special.gammasgn, cython_special.gammasgn, ('d',), None),
(special.gdtr, cython_special.gdtr, ('ddd',), None),
(special.gdtrc, cython_special.gdtrc, ('ddd',), None),
(special.gdtria, cython_special.gdtria, ('ddd',), None),
(special.gdtrib, cython_special.gdtrib, ('ddd',), None),
(special.gdtrix, cython_special.gdtrix, ('ddd',), None),
(special.hankel1, cython_special.hankel1, ('dD',), None),
(special.hankel1e, cython_special.hankel1e, ('dD',), None),
(special.hankel2, cython_special.hankel2, ('dD',), None),
(special.hankel2e, cython_special.hankel2e, ('dD',), None),
(special.huber, cython_special.huber, ('dd',), None),
(special.hyp0f1, cython_special.hyp0f1, ('dd', 'dD'), None),
(special.hyp1f1, cython_special.hyp1f1, ('ddd', 'ddD'), None),
(special.hyp2f1, cython_special.hyp2f1, ('dddd', 'dddD'), None),
(special.hyperu, cython_special.hyperu, ('ddd',), None),
(special.i0, cython_special.i0, ('d',), None),
(special.i0e, cython_special.i0e, ('d',), None),
(special.i1, cython_special.i1, ('d',), None),
(special.i1e, cython_special.i1e, ('d',), None),
(special.inv_boxcox, cython_special.inv_boxcox, ('dd',), None),
(special.inv_boxcox1p, cython_special.inv_boxcox1p, ('dd',), None),
(special.it2i0k0, cython_special._it2i0k0_pywrap, ('d',), None),
(special.it2j0y0, cython_special._it2j0y0_pywrap, ('d',), None),
(special.it2struve0, cython_special.it2struve0, ('d',), None),
(special.itairy, cython_special._itairy_pywrap, ('d',), None),
(special.iti0k0, cython_special._iti0k0_pywrap, ('d',), None),
(special.itj0y0, cython_special._itj0y0_pywrap, ('d',), None),
(special.itmodstruve0, cython_special.itmodstruve0, ('d',), None),
(special.itstruve0, cython_special.itstruve0, ('d',), None),
(special.iv, cython_special.iv, ('dd', 'dD'), None),
(special.ive, cython_special.ive, ('dd', 'dD'), None),
(special.j0, cython_special.j0, ('d',), None),
(special.j1, cython_special.j1, ('d',), None),
(special.jv, cython_special.jv, ('dd', 'dD'), None),
(special.jve, cython_special.jve, ('dd', 'dD'), None),
(special.k0, cython_special.k0, ('d',), None),
(special.k0e, cython_special.k0e, ('d',), None),
(special.k1, cython_special.k1, ('d',), None),
(special.k1e, cython_special.k1e, ('d',), None),
(special.kei, cython_special.kei, ('d',), None),
(special.keip, cython_special.keip, ('d',), None),
(special.kelvin, cython_special._kelvin_pywrap, ('d',), None),
(special.ker, cython_special.ker, ('d',), None),
(special.kerp, cython_special.kerp, ('d',), None),
(special.kl_div, cython_special.kl_div, ('dd',), None),
(special.kn, cython_special.kn, ('ld', 'dd'), None),
(special.kolmogi, cython_special.kolmogi, ('d',), None),
(special.kolmogorov, cython_special.kolmogorov, ('d',), None),
(special.kv, cython_special.kv, ('dd', 'dD'), None),
(special.kve, cython_special.kve, ('dd', 'dD'), None),
(special.log1p, cython_special.log1p, ('d', 'D'), None),
(special.log_ndtr, cython_special.log_ndtr, ('d', 'D'), None),
(special.loggamma, cython_special.loggamma, ('D',), None),
(special.logit, cython_special.logit, ('f', 'd', 'g'), None),
(special.lpmv, cython_special.lpmv, ('ddd',), None),
(special.mathieu_a, cython_special.mathieu_a, ('dd',), None),
(special.mathieu_b, cython_special.mathieu_b, ('dd',), None),
(special.mathieu_cem, cython_special._mathieu_cem_pywrap, ('ddd',), None),
(special.mathieu_modcem1, cython_special._mathieu_modcem1_pywrap, ('ddd',), None),
(special.mathieu_modcem2, cython_special._mathieu_modcem2_pywrap, ('ddd',), None),
(special.mathieu_modsem1, cython_special._mathieu_modsem1_pywrap, ('ddd',), None),
(special.mathieu_modsem2, cython_special._mathieu_modsem2_pywrap, ('ddd',), None),
(special.mathieu_sem, cython_special._mathieu_sem_pywrap, ('ddd',), None),
(special.modfresnelm, cython_special._modfresnelm_pywrap, ('d',), None),
(special.modfresnelp, cython_special._modfresnelp_pywrap, ('d',), None),
(special.modstruve, cython_special.modstruve, ('dd',), None),
(special.nbdtr, cython_special.nbdtr, ('lld', 'ddd'), None),
(special.nbdtrc, cython_special.nbdtrc, ('lld', 'ddd'), None),
(special.nbdtri, cython_special.nbdtri, ('lld', 'ddd'), None),
(special.nbdtrik, cython_special.nbdtrik, ('ddd',), None),
(special.nbdtrin, cython_special.nbdtrin, ('ddd',), None),
(special.ncfdtr, cython_special.ncfdtr, ('dddd',), None),
(special.ncfdtri, cython_special.ncfdtri, ('dddd',), None),
(special.ncfdtridfd, cython_special.ncfdtridfd, ('dddd',), None),
(special.ncfdtridfn, cython_special.ncfdtridfn, ('dddd',), None),
(special.ncfdtrinc, cython_special.ncfdtrinc, ('dddd',), None),
(special.nctdtr, cython_special.nctdtr, ('ddd',), None),
(special.nctdtridf, cython_special.nctdtridf, ('ddd',), None),
(special.nctdtrinc, cython_special.nctdtrinc, ('ddd',), None),
(special.nctdtrit, cython_special.nctdtrit, ('ddd',), None),
(special.ndtr, cython_special.ndtr, ('d', 'D'), None),
(special.ndtri, cython_special.ndtri, ('d',), None),
(special.nrdtrimn, cython_special.nrdtrimn, ('ddd',), None),
(special.nrdtrisd, cython_special.nrdtrisd, ('ddd',), None),
(special.obl_ang1, cython_special._obl_ang1_pywrap, ('dddd',), None),
(special.obl_ang1_cv, cython_special._obl_ang1_cv_pywrap, ('ddddd',), None),
(special.obl_cv, cython_special.obl_cv, ('ddd',), None),
(special.obl_rad1, cython_special._obl_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad1_cv, cython_special._obl_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.obl_rad2, cython_special._obl_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad2_cv, cython_special._obl_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pbdv, cython_special._pbdv_pywrap, ('dd',), None),
(special.pbvv, cython_special._pbvv_pywrap, ('dd',), None),
(special.pbwa, cython_special._pbwa_pywrap, ('dd',), None),
(special.pdtr, cython_special.pdtr, ('ld', 'dd'), None),
(special.pdtrc, cython_special.pdtrc, ('ld', 'dd'), None),
(special.pdtri, cython_special.pdtri, ('ld', 'dd'), None),
(special.pdtrik, cython_special.pdtrik, ('dd',), None),
(special.poch, cython_special.poch, ('dd',), None),
(special.pro_ang1, cython_special._pro_ang1_pywrap, ('dddd',), None),
(special.pro_ang1_cv, cython_special._pro_ang1_cv_pywrap, ('ddddd',), None),
(special.pro_cv, cython_special.pro_cv, ('ddd',), None),
(special.pro_rad1, cython_special._pro_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad1_cv, cython_special._pro_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pro_rad2, cython_special._pro_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad2_cv, cython_special._pro_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pseudo_huber, cython_special.pseudo_huber, ('dd',), None),
(special.psi, cython_special.psi, ('d', 'D'), None),
(special.radian, cython_special.radian, ('ddd',), None),
(special.rel_entr, cython_special.rel_entr, ('dd',), None),
(special.rgamma, cython_special.rgamma, ('d', 'D'), None),
(special.round, cython_special.round, ('d',), None),
(special.shichi, cython_special._shichi_pywrap, ('d', 'D'), None),
(special.sici, cython_special._sici_pywrap, ('d', 'D'), None),
(special.sindg, cython_special.sindg, ('d',), None),
(special.smirnov, cython_special.smirnov, ('ld', 'dd'), None),
(special.smirnovi, cython_special.smirnovi, ('ld', 'dd'), None),
(special.spence, cython_special.spence, ('d', 'D'), None),
(special.sph_harm, cython_special.sph_harm, ('lldd', 'dddd'), None),
(special.stdtr, cython_special.stdtr, ('dd',), None),
(special.stdtridf, cython_special.stdtridf, ('dd',), None),
(special.stdtrit, cython_special.stdtrit, ('dd',), None),
(special.struve, cython_special.struve, ('dd',), None),
(special.tandg, cython_special.tandg, ('d',), None),
(special.tklmbda, cython_special.tklmbda, ('dd',), None),
(special.wofz, cython_special.wofz, ('D',), None),
(special.wrightomega, cython_special.wrightomega, ('D',), None),
(special.xlog1py, cython_special.xlog1py, ('dd', 'DD'), None),
(special.xlogy, cython_special.xlogy, ('dd', 'DD'), None),
(special.y0, cython_special.y0, ('d',), None),
(special.y1, cython_special.y1, ('d',), None),
(special.yn, cython_special.yn, ('ld', 'dd'), None),
(special.yv, cython_special.yv, ('dd', 'dD'), None),
(special.yve, cython_special.yve, ('dd', 'dD'), None),
(special.zetac, cython_special.zetac, ('d',), None),
(special.owens_t, cython_special.owens_t, ('dd',), None)
]
IDS = [x[0].__name__ for x in PARAMS]
def _generate_test_points(typecodes):
axes = tuple(map(lambda x: TEST_POINTS[x], typecodes))
pts = list(product(*axes))
return pts
def test_cython_api_completeness():
# Check that everything is tested
skip = {'hyp2f0', 'hyp1f2', 'hyp3f0'}
for name in dir(cython_special):
func = getattr(cython_special, name)
if callable(func) and not (name.startswith('_') or name in skip):
for _, cyfun, _, _ in PARAMS:
if cyfun is func:
break
else:
raise RuntimeError("{} missing from tests!".format(name))
@pytest.mark.parametrize("param", PARAMS, ids=IDS)
def test_cython_api(param):
pyfunc, cyfunc, specializations, knownfailure = param
if knownfailure:
pytest.xfail(reason=knownfailure)
# Check which parameters are expected to be fused types
values = [set() for code in specializations[0]]
for typecodes in specializations:
for j, v in enumerate(typecodes):
values[j].add(v)
seen = set()
is_fused_code = [False] * len(values)
for j, v in enumerate(values):
vv = tuple(sorted(v))
if vv in seen:
continue
is_fused_code[j] = (len(v) > 1)
seen.add(vv)
# Check results
for typecodes in specializations:
# Pick the correct specialized function
signature = []
for j, code in enumerate(typecodes):
if is_fused_code[j]:
signature.append(CYTHON_SIGNATURE_MAP[code])
if signature:
cy_spec_func = cyfunc[tuple(signature)]
else:
signature = None
cy_spec_func = cyfunc
# Test it
pts = _generate_test_points(typecodes)
for pt in pts:
pyval = pyfunc(*pt)
cyval = cy_spec_func(*pt)
assert_allclose(cyval, pyval, err_msg="{} {} {}".format(pt, typecodes, signature))
@@ -1,500 +0,0 @@
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy import arccosh, arcsinh, arctanh
from scipy._lib._numpy_compat import suppress_warnings
import pytest
from scipy.special import (
lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite,
eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta,
jn, jv, yn, yv, iv, kv, kn,
gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma,
beta, betainc, betaincinv, poch,
ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc, ellipj,
erf, erfc, erfinv, erfcinv, exp1, expi, expn,
bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib,
nbdtrik, pdtrik, owens_t,
mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1,
mathieu_modsem1, mathieu_modcem2, mathieu_modsem2,
ellip_harm, ellip_harm_2, spherical_jn, spherical_yn,
)
from scipy.integrate import IntegrationWarning
from scipy.special._testutils import FuncData
DATASETS_BOOST = np.load(os.path.join(os.path.dirname(__file__),
"data", "boost.npz"))
DATASETS_GSL = np.load(os.path.join(os.path.dirname(__file__),
"data", "gsl.npz"))
DATASETS_LOCAL = np.load(os.path.join(os.path.dirname(__file__),
"data", "local.npz"))
def data(func, dataname, *a, **kw):
kw.setdefault('dataname', dataname)
return FuncData(func, DATASETS_BOOST[dataname], *a, **kw)
def data_gsl(func, dataname, *a, **kw):
kw.setdefault('dataname', dataname)
return FuncData(func, DATASETS_GSL[dataname], *a, **kw)
def data_local(func, dataname, *a, **kw):
kw.setdefault('dataname', dataname)
return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw)
def ellipk_(k):
return ellipk(k*k)
def ellipkinc_(f, k):
return ellipkinc(f, k*k)
def ellipe_(k):
return ellipe(k*k)
def ellipeinc_(f, k):
return ellipeinc(f, k*k)
def ellipj_(k):
return ellipj(k*k)
def zeta_(x):
return zeta(x, 1.)
def assoc_legendre_p_boost_(nu, mu, x):
# the boost test data is for integer orders only
return lpmv(mu, nu.astype(int), x)
def legendre_p_via_assoc_(nu, x):
return lpmv(0, nu, x)
def lpn_(n, x):
return lpn(n.astype('l'), x)[0][-1]
def lqn_(n, x):
return lqn(n.astype('l'), x)[0][-1]
def legendre_p_via_lpmn(n, x):
return lpmn(0, n, x)[0][0,-1]
def legendre_q_via_lqmn(n, x):
return lqmn(0, n, x)[0][0,-1]
def mathieu_ce_rad(m, q, x):
return mathieu_cem(m, q, x*180/np.pi)[0]
def mathieu_se_rad(m, q, x):
return mathieu_sem(m, q, x*180/np.pi)[0]
def mathieu_mc1_scaled(m, q, x):
# GSL follows a different normalization.
# We follow Abramowitz & Stegun, they apparently something else.
return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2)
def mathieu_ms1_scaled(m, q, x):
return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2)
def mathieu_mc2_scaled(m, q, x):
return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2)
def mathieu_ms2_scaled(m, q, x):
return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2)
def eval_legendre_ld(n, x):
return eval_legendre(n.astype('l'), x)
def eval_legendre_dd(n, x):
return eval_legendre(n.astype('d'), x)
def eval_hermite_ld(n, x):
return eval_hermite(n.astype('l'), x)
def eval_laguerre_ld(n, x):
return eval_laguerre(n.astype('l'), x)
def eval_laguerre_dd(n, x):
return eval_laguerre(n.astype('d'), x)
def eval_genlaguerre_ldd(n, a, x):
return eval_genlaguerre(n.astype('l'), a, x)
def eval_genlaguerre_ddd(n, a, x):
return eval_genlaguerre(n.astype('d'), a, x)
def bdtrik_comp(y, n, p):
return bdtrik(1-y, n, p)
def btdtri_comp(a, b, p):
return btdtri(a, b, 1-p)
def btdtria_comp(p, b, x):
return btdtria(1-p, b, x)
def btdtrib_comp(a, p, x):
return btdtrib(a, 1-p, x)
def gdtr_(p, x):
return gdtr(1.0, p, x)
def gdtrc_(p, x):
return gdtrc(1.0, p, x)
def gdtrix_(b, p):
return gdtrix(1.0, b, p)
def gdtrix_comp(b, p):
return gdtrix(1.0, b, 1-p)
def gdtrib_(p, x):
return gdtrib(1.0, p, x)
def gdtrib_comp(p, x):
return gdtrib(1.0, 1-p, x)
def nbdtrik_comp(y, n, p):
return nbdtrik(1-y, n, p)
def pdtrik_comp(p, m):
return pdtrik(1-p, m)
def poch_(z, m):
return 1.0 / poch(z, m)
def poch_minus(z, m):
return 1.0 / poch(z, -m)
def spherical_jn_(n, x):
return spherical_jn(n.astype('l'), x)
def spherical_yn_(n, x):
return spherical_yn(n.astype('l'), x)
def sph_harm_(m, n, theta, phi):
y = sph_harm(m, n, theta, phi)
return (y.real, y.imag)
def cexpm1(x, y):
z = expm1(x + 1j*y)
return z.real, z.imag
def clog1p(x, y):
z = log1p(x + 1j*y)
return z.real, z.imag
BOOST_TESTS = [
data(arccosh, 'acosh_data_ipp-acosh_data', 0, 1, rtol=5e-13),
data(arccosh, 'acosh_data_ipp-acosh_data', 0j, 1, rtol=5e-13),
data(arcsinh, 'asinh_data_ipp-asinh_data', 0, 1, rtol=1e-11),
data(arcsinh, 'asinh_data_ipp-asinh_data', 0j, 1, rtol=1e-11),
data(arctanh, 'atanh_data_ipp-atanh_data', 0, 1, rtol=1e-11),
data(arctanh, 'atanh_data_ipp-atanh_data', 0j, 1, rtol=1e-11),
data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', (0,1,2), 3, rtol=1e-11),
data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=1e-11),
data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=7e-14),
data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False),
data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=7e-14, vectorized=False),
data(lpn_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False),
data(lpn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=3e-13, vectorized=False),
data(eval_legendre_ld, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=6e-14),
data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13),
data(eval_legendre_dd, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=2e-14),
data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13),
data(lqn_, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False),
data(lqn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False),
data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False),
data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False),
data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13),
data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13),
data(beta, 'beta_small_data_ipp-beta_small_data', (0,1), 2),
data(beta, 'beta_med_data_ipp-beta_med_data', (0,1), 2, rtol=5e-13),
data(betainc, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15),
data(betainc, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=5e-13),
data(betainc, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14),
data(betainc, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10),
data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5),
data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15),
data(btdtr, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=4e-13),
data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14),
data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10),
data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5),
data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 4, rtol=8e-7),
data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 3, rtol=5e-9),
data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 4, rtol=5e-9),
data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 5, rtol=5e-9),
data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 6, rtol=5e-9),
data(binom, 'binomial_data_ipp-binomial_data', (0,1), 2, rtol=1e-13),
data(binom, 'binomial_large_data_ipp-binomial_large_data', (0,1), 2, rtol=5e-13),
data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 3, rtol=5e-9),
data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 4, rtol=5e-9),
data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 3, rtol=4e-9),
data(nbdtrik_comp, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 4, rtol=4e-9),
data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 2, rtol=3e-9),
data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 3, rtol=4e-9),
data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0),
data(digamma, 'digamma_data_ipp-digamma_data', 0, 1),
data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1),
data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13),
data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13),
data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15),
data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15),
data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15),
data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14),
data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1),
data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14),
data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1),
data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14),
data(erf, 'erf_data_ipp-erf_data', 0, 1),
data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13),
data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15),
data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1),
data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1),
data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14),
data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1),
data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13),
data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2),
data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1),
data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1),
data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data2', 0, 1),
data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13),
data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9),
data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13),
data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13),
data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2),
data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14),
data(gamma, 'test_gamma_data_ipp-near_0', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_1', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_2', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12),
data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14),
data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13),
data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10),
data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2),
data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15),
data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12),
data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13),
data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9),
data(gammaincc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13),
data(gammaincc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13),
data(gammaincc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14),
data(gammaincc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11),
data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13),
data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13),
data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14),
data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11),
data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9),
data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9),
data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 2, rtol=2e-13),
data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 2,),
data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 2,),
data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 3, rtol=2e-13),
data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 3),
data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 3),
data(eval_hermite_ld, 'hermite_ipp-hermite', (0,1), 2, rtol=2e-14),
data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2', (0,1), 2, rtol=7e-12),
data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2', (0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'),
data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, rtol=2e-13),
data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'),
data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1),
data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2),
data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1), 2, rtol=1e-12),
data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1j), 2, rtol=2e-10, atol=1e-306),
data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1), 2, rtol=1e-9),
data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1j), 2, rtol=2e-10),
data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11),
data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11),
data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12),
data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12),
data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12),
data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12),
data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12),
data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12),
data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12),
data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10),
data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10),
data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, param_filter=(lambda s: s > 1)),
data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, param_filter=(lambda s: s > 1)),
data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, param_filter=(lambda s: s > 1)),
data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, param_filter=(lambda s: s > 1)),
data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=1e-11),
data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=1e-14),
data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2, rtol=1e-11),
data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 3, rtol=1e-12),
data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=1e-14),
data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3, rtol=1e-14),
data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'),
data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=3e-15),
data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2),
data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, knownfailure='gdtrix bad some points'),
data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=6e-15),
data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3),
data(chndtr, 'nccs_ipp-nccs', (2,0,1), 3, rtol=3e-5),
data(chndtr, 'nccs_big_ipp-nccs_big', (2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'),
data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic', (1,0,3,2), (4,5), rtol=5e-11,
param_filter=(lambda p: np.ones(p.shape, '?'),
lambda p: np.ones(p.shape, '?'),
lambda p: np.logical_and(p < 2*np.pi, p >= 0),
lambda p: np.logical_and(p < np.pi, p >= 0))),
data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data', (0,1), 2, rtol=1e-13),
data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data', (0,1), 2, rtol=8e-15),
data(owens_t, 'owenst_data_ipp-owens_t', (0, 1), 2, rtol=5e-14),
data(owens_t, 'owenst_data_ipp-owens_t_alarge', (0, 1), 2, rtol=5e-15),
# -- not used yet (function does not exist in scipy):
# 'ellint_pi2_data_ipp-ellint_pi2_data',
# 'ellint_pi3_data_ipp-ellint_pi3_data',
# 'ellint_pi3_large_data_ipp-ellint_pi3_large_data',
# 'ellint_rc_data_ipp-ellint_rc_data',
# 'ellint_rd_data_ipp-ellint_rd_data',
# 'ellint_rf_data_ipp-ellint_rf_data',
# 'ellint_rj_data_ipp-ellint_rj_data',
# 'ncbeta_big_ipp-ncbeta_big',
# 'ncbeta_ipp-ncbeta',
# 'powm1_sqrtp1m1_test_cpp-powm1_data',
# 'powm1_sqrtp1m1_test_cpp-sqrtp1m1_data',
# 'test_gamma_data_ipp-gammap1m1_data',
# 'tgamma_ratio_data_ipp-tgamma_ratio_data',
]
@pytest.mark.parametrize('test', BOOST_TESTS, ids=repr)
def test_boost(test):
_test_factory(test)
GSL_TESTS = [
data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13),
data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13),
# Also the GSL output has limited accuracy...
data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms', (0, 1, 2), 3, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms', (0, 1, 2), 4, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms', (0, 1, 2), 5, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms', (0, 1, 2), 6, rtol=1e-7, atol=1e-13),
]
@pytest.mark.parametrize('test', GSL_TESTS, ids=repr)
def test_gsl(test):
_test_factory(test)
LOCAL_TESTS = [
data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2),
data_local(ellipkm1, 'ellipkm1', 0, 1),
data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2),
data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14),
data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14),
data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12),
data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11),
data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13),
data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13),
]
@pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr)
def test_local(test):
_test_factory(test)
def _test_factory(test, dtype=np.double):
"""Boost test"""
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected")
olderr = np.seterr(all='ignore')
try:
test.check(dtype=dtype)
finally:
np.seterr(**olderr)
@@ -1,44 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import pi, log, sqrt
from numpy.testing import assert_, assert_equal
from scipy.special._testutils import FuncData
import scipy.special as sc
# Euler-Mascheroni constant
euler = 0.57721566490153286
def test_consistency():
# Make sure the implementation of digamma for real arguments
# agrees with the implementation of digamma for complex arguments.
# It's all poles after -1e16
x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)]
dataset = np.vstack((x + 0j, sc.digamma(x))).T
FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check()
def test_special_values():
# Test special values from Gauss's digamma theorem. See
#
# https://en.wikipedia.org/wiki/Digamma_function
dataset = [(1, -euler),
(0.5, -2*log(2) - euler),
(1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler),
(1/4, -pi/2 - 3*log(2) - euler),
(1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler),
(1/8, -pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler)]
dataset = np.asarray(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
def test_nonfinite():
pts = [0.0, -0.0, np.inf]
std = [-np.inf, np.inf, np.inf]
assert_equal(sc.digamma(pts), std)
assert_(all(np.isnan(sc.digamma([-np.inf, -1]))))
@@ -1,273 +0,0 @@
#
# Tests for the Ellipsoidal Harmonic Function,
# Distributed under the same license as SciPy itself.
#
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_)
from scipy._lib._numpy_compat import suppress_warnings
from scipy.special._testutils import assert_func_equal
from scipy.special import ellip_harm, ellip_harm_2, ellip_normal
from scipy.integrate import IntegrationWarning
from numpy import sqrt, pi
def test_ellip_potential():
def change_coefficient(lambda1, mu, nu, h2, k2):
x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2))
y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2)))
z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2)))
return x, y, z
def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu)
* ellip_harm(h2, k2, n, p, nu))
def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm_2(h2, k2, n, p, lambda1)
* ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu))
def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
tol = 1e-8
sum1 = 0
for n in range(20):
xsum = 0
for p in range(1, 2*n+2):
xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2)
* solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) /
(ellip_normal(h2, k2, n, p)*(2*n + 1)))
if abs(xsum) < 0.1*tol*abs(sum1):
break
sum1 += xsum
return sum1, xsum
def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2)
x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2)
res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
return 1/res
pts = [
(120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25),
(120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20),
]
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
sup.filter(IntegrationWarning, "The maximum number of subdivisions")
for p in pts:
err_msg = repr(p)
exact = potential(*p)
result, last_term = summation(*p)
assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg)
assert_(abs(result - exact) < 10*abs(last_term), err_msg)
def test_ellip_norm():
def G01(h2, k2):
return 4*pi
def G11(h2, k2):
return 4*pi*h2*k2/3
def G12(h2, k2):
return 4*pi*h2*(k2 - h2)/3
def G13(h2, k2):
return 4*pi*k2*(k2 - h2)/3
def G22(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 +
sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G21(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2
+ sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G23(h2, k2):
return 4*pi*h2**2*k2*(k2 - h2)/15
def G24(h2, k2):
return 4*pi*h2*k2**2*(k2 - h2)/15
def G25(h2, k2):
return 4*pi*h2*k2*(k2 - h2)**2/15
def G32(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) +
11*h2*k2*(h2 + k2)))
return 16*pi/13125*k2*h2*res
def G31(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) -
11*h2*k2*(h2 + k2)))
return 16*pi/13125*h2*k2*res
def G34(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 +
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G33(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 -
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G36(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 +
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G35(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 -
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G37(h2, k2):
return 4*pi*h2**2*k2**2*(k2 - h2)**2/105
known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13,
(2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24,
(2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33,
(3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37}
def _ellip_norm(n, p, h2, k2):
func = known_funcs[n, p]
return func(h2, k2)
_ellip_norm = np.vectorize(_ellip_norm)
def ellip_normal_known(h2, k2, n, p):
return _ellip_norm(n, p, h2, k2)
# generate both large and small h2 < k2 pairs
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=1)
k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size))
points = []
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2, k2, n*np.ones(h2.size), p*np.ones(h2.size)))
points = np.array(points)
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12)
def test_ellip_harm_2():
def I1(h2, k2, s):
res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s))
+ ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) +
ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s)))
return res
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8))))
# Values produced by code from arXiv:1204.0267
assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382)
assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809)
assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743)
assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306)
assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454)
def test_ellip_harm():
def E01(h2, k2, s):
return 1
def E11(h2, k2, s):
return s
def E12(h2, k2, s):
return sqrt(abs(s*s - h2))
def E13(h2, k2, s):
return sqrt(abs(s*s - k2))
def E21(h2, k2, s):
return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E22(h2, k2, s):
return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E23(h2, k2, s):
return s * sqrt(abs(s*s - h2))
def E24(h2, k2, s):
return s * sqrt(abs(s*s - k2))
def E25(h2, k2, s):
return sqrt(abs((s*s - h2)*(s*s - k2)))
def E31(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E32(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E33(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E34(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E35(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E36(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E37(h2, k2, s):
return s * sqrt(abs((s*s - h2)*(s*s - k2)))
assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1),
ellip_harm(5, 8, 1, 2, 2.5))
known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13,
(2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24,
(2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33,
(3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37}
point_ref = []
def ellip_harm_known(h2, k2, n, p, s):
for i in range(h2.size):
func = known_funcs[(int(n[i]), int(p[i]))]
point_ref.append(func(h2[i], k2[i], s[i]))
return point_ref
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=30)
k2 = h2*(1 + np.random.pareto(0.5, size=h2.size))
s = np.random.pareto(0.5, size=h2.size)
points = []
for i in range(h2.size):
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2[i], k2[i], n, p, s[i]))
points = np.array(points)
assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12)
@@ -1,46 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
from scipy.special._testutils import FuncData
def test_line():
# Test on the line a = x where a simpler asymptotic expansion
# (analog of DLMF 8.12.15) is available.
def gammainc_line(x):
c = np.array([-1/3, -1/540, 25/6048, 101/155520,
-3184811/3695155200, -2745493/8151736420])
res = 0
xfac = 1
for ck in c:
res -= ck*xfac
xfac /= x
res /= np.sqrt(2*np.pi*x)
res += 0.5
return res
x = np.logspace(np.log10(25), 300, 500)
a = x.copy()
dataset = np.vstack((a, x, gammainc_line(x))).T
FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
def test_gammainc_roundtrip():
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammaincinv(a, sc.gammainc(a, x))
assert_allclose(x, y, rtol=1e-10)
def test_gammaincc_roundtrip():
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammainccinv(a, sc.gammaincc(a, x))
assert_allclose(x, y, rtol=1e-14)
@@ -1,414 +0,0 @@
from __future__ import division, print_function, absolute_import
import itertools
import sys
import pytest
import numpy as np
from numpy.testing import assert_
from scipy.special._testutils import FuncData
from scipy.special import kolmogorov, kolmogi, smirnov, smirnovi
from scipy.special._ufuncs import (_kolmogc, _kolmogci, _kolmogp,
_smirnovc, _smirnovci, _smirnovp)
_rtol = 1e-10
class TestSmirnov(object):
def test_nan(self):
assert_(np.isnan(smirnov(1, np.nan)))
def test_basic(self):
dataset = [(1, 0.1, 0.9),
(1, 0.875, 0.125),
(2, 0.875, 0.125 * 0.125),
(3, 0.875, 0.125 * 0.125 * 0.125)]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0(self):
dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_1(self):
dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0point5(self):
dataset = [(1, 0.5, 0.5),
(2, 0.5, 0.25),
(3, 0.5, 0.166666666667),
(4, 0.5, 0.09375),
(5, 0.5, 0.056),
(6, 0.5, 0.0327932098765),
(7, 0.5, 0.0191958707681),
(8, 0.5, 0.0112953186035),
(9, 0.5, 0.00661933257355),
(10, 0.5, 0.003888705)]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_1(self):
x = np.linspace(0, 1, 101, endpoint=True)
dataset = np.column_stack([[1]*len(x), x, 1-x])
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_2(self):
x = np.linspace(0.5, 1, 101, endpoint=True)
p = np.power(1-x, 2)
n = np.array([2] * len(x))
dataset = np.column_stack([n, x, p])
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_3(self):
x = np.linspace(0.7, 1, 31, endpoint=True)
p = np.power(1-x, 3)
n = np.array([3] * len(x))
dataset = np.column_stack([n, x, p])
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_large(self):
# test for large values of n
# Probabilities should go down as n goes up
x = 0.4
pvals = np.array([smirnov(n, x) for n in range(400, 1100, 20)])
dfs = np.diff(pvals)
assert_(np.all(dfs <= 0), msg='Not all diffs negative %s' % dfs)
class TestSmirnovi(object):
def test_nan(self):
assert_(np.isnan(smirnovi(1, np.nan)))
def test_basic(self):
dataset = [(1, 0.4, 0.6),
(1, 0.6, 0.4),
(1, 0.99, 0.01),
(1, 0.01, 0.99),
(2, 0.125 * 0.125, 0.875),
(3, 0.125 * 0.125 * 0.125, 0.875),
(10, 1.0 / 16 ** 10, 1 - 1.0 / 16)]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0(self):
dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_1(self):
dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_1(self):
pp = np.linspace(0, 1, 101, endpoint=True)
# dataset = np.array([(1, p, 1-p) for p in pp])
dataset = np.column_stack([[1]*len(pp), pp, 1-pp])
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_2(self):
x = np.linspace(0.5, 1, 101, endpoint=True)
p = np.power(1-x, 2)
n = np.array([2] * len(x))
dataset = np.column_stack([n, p, x])
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_3(self):
x = np.linspace(0.7, 1, 31, endpoint=True)
p = np.power(1-x, 3)
n = np.array([3] * len(x))
dataset = np.column_stack([n, p, x])
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_round_trip(self):
def _sm_smi(n, p):
return smirnov(n, smirnovi(n, p))
def _smc_smci(n, p):
return _smirnovc(n, _smirnovci(n, p))
dataset = [(1, 0.4, 0.4),
(1, 0.6, 0.6),
(2, 0.875, 0.875),
(3, 0.875, 0.875),
(3, 0.125, 0.125),
(10, 0.999, 0.999),
(10, 0.0001, 0.0001)]
dataset = np.asarray(dataset)
FuncData(_sm_smi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
FuncData(_smc_smci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0point5(self):
dataset = [(1, 0.5, 0.5),
(2, 0.5, 0.366025403784),
(2, 0.25, 0.5),
(3, 0.5, 0.297156508177),
(4, 0.5, 0.255520481121),
(5, 0.5, 0.234559536069),
(6, 0.5, 0.21715965898),
(7, 0.5, 0.202722580034),
(8, 0.5, 0.190621765256),
(9, 0.5, 0.180363501362),
(10, 0.5, 0.17157867006)]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
class TestSmirnovp(object):
def test_nan(self):
assert_(np.isnan(_smirnovp(1, np.nan)))
def test_basic(self):
# Check derivative at endpoints
n1_10 = np.arange(1, 10)
dataset0 = np.column_stack([n1_10, np.full_like(n1_10, 0), np.full_like(n1_10, -1)])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
n2_10 = np.arange(2, 10)
dataset1 = np.column_stack([n2_10, np.full_like(n2_10, 1.0), np.full_like(n2_10, 0)])
FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_oneminusoneovern(self):
# Check derivative at x=1-1/n
n = np.arange(1, 20)
x = 1.0/n
xm1 = 1-1.0/n
pp1 = -n * x**(n-1)
pp1 -= (1-np.sign(n-2)**2) * 0.5 # n=2, x=0.5, 1-1/n = 0.5, need to adjust
dataset1 = np.column_stack([n, xm1, pp1])
FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_oneovertwon(self):
# Check derivative at x=1/2n (Discontinuous at x=1/n, so check at x=1/2n)
n = np.arange(1, 20)
x = 1.0/2/n
pp = -(n*x+1) * (1+x)**(n-2)
dataset0 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_oneovern(self):
# Check derivative at x=1/n (Discontinuous at x=1/n, hard to tell if x==1/n, only use n=power of 2)
n = 2**np.arange(1, 10)
x = 1.0/n
pp = -(n*x+1) * (1+x)**(n-2) + 0.5
dataset0 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
@pytest.mark.xfail(sys.maxsize <= 2**32,
reason="requires 64-bit platform")
def test_oneovernclose(self):
# Check derivative at x=1/n (Discontinuous at x=1/n, test on either side: x=1/n +/- 2epsilon)
n = np.arange(3, 20)
x = 1.0/n - 2*np.finfo(float).eps
pp = -(n*x+1) * (1+x)**(n-2)
dataset0 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
x = 1.0/n + 2*np.finfo(float).eps
pp = -(n*x+1) * (1+x)**(n-2) + 1
dataset1 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
class TestKolmogorov(object):
def test_nan(self):
assert_(np.isnan(kolmogorov(np.nan)))
def test_basic(self):
dataset = [(0, 1.0),
(0.5, 0.96394524366487511),
(0.8275735551899077, 0.5000000000000000),
(1, 0.26999967167735456),
(2, 0.00067092525577969533)]
dataset = np.asarray(dataset)
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
def test_linspace(self):
x = np.linspace(0, 2.0, 21)
dataset = [1.0000000000000000, 1.0000000000000000, 0.9999999999994950,
0.9999906941986655, 0.9971923267772983, 0.9639452436648751,
0.8642827790506042, 0.7112351950296890, 0.5441424115741981,
0.3927307079406543, 0.2699996716773546, 0.1777181926064012,
0.1122496666707249, 0.0680922218447664, 0.0396818795381144,
0.0222179626165251, 0.0119520432391966, 0.0061774306344441,
0.0030676213475797, 0.0014636048371873, 0.0006709252557797]
dataset_c = [0.0000000000000000, 6.609305242245699e-53, 5.050407338670114e-13,
9.305801334566668e-06, 0.0028076732227017, 0.0360547563351249,
0.1357172209493958, 0.2887648049703110, 0.4558575884258019,
0.6072692920593457, 0.7300003283226455, 0.8222818073935988,
0.8877503333292751, 0.9319077781552336, 0.9603181204618857,
0.9777820373834749, 0.9880479567608034, 0.9938225693655559,
0.9969323786524203, 0.9985363951628127, 0.9993290747442203]
dataset = np.column_stack([x, dataset])
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
dataset_c = np.column_stack([x, dataset_c])
FuncData(_kolmogc, dataset_c, (0,), 1, rtol=_rtol).check()
def test_linspacei(self):
p = np.linspace(0, 1.0, 21, endpoint=True)
dataset = [np.inf, 1.3580986393225507, 1.2238478702170823,
1.1379465424937751, 1.0727491749396481, 1.0191847202536859,
0.9730633753323726, 0.9320695842357622, 0.8947644549851197,
0.8601710725555463, 0.8275735551899077, 0.7964065373291559,
0.7661855555617682, 0.7364542888171910, 0.7067326523068980,
0.6764476915028201, 0.6448126061663567, 0.6105590999244391,
0.5711732651063401, 0.5196103791686224, 0.0000000000000000]
dataset_c = [0.0000000000000000, 0.5196103791686225, 0.5711732651063401,
0.6105590999244391, 0.6448126061663567, 0.6764476915028201,
0.7067326523068980, 0.7364542888171910, 0.7661855555617682,
0.7964065373291559, 0.8275735551899077, 0.8601710725555463,
0.8947644549851196, 0.9320695842357622, 0.9730633753323727,
1.0191847202536859, 1.0727491749396481, 1.1379465424937754,
1.2238478702170825, 1.3580986393225509, np.inf]
dataset = np.column_stack([p[1:], dataset[1:]])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset_c = np.column_stack([p[:-1], dataset_c[:-1]])
FuncData(_kolmogci, dataset_c, (0,), 1, rtol=_rtol).check()
def test_smallx(self):
epsilon = 0.1 ** np.arange(1, 14)
x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217,
0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254,
0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658,
0.19487060742])
dataset = np.column_stack([x, 1-epsilon])
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
def test_round_trip(self):
def _ki_k(_x):
return kolmogi(kolmogorov(_x))
def _kci_kc(_x):
return _kolmogci(_kolmogc(_x))
x = np.linspace(0.0, 2.0, 21, endpoint=True)
x02 = x[(x == 0) | (x > 0.21)] # Exclude 0.1, 0.2. 0.2 almost makes succeeds, but 0.1 has no chance.
dataset02 = np.column_stack([x02, x02])
FuncData(_ki_k, dataset02, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([x, x])
FuncData(_kci_kc, dataset, (0,), 1, rtol=_rtol).check()
class TestKolmogi(object):
def test_nan(self):
assert_(np.isnan(kolmogi(np.nan)))
def test_basic(self):
dataset = [(1.0, 0),
(0.96394524366487511, 0.5),
(0.9, 0.571173265106),
(0.5000000000000000, 0.8275735551899077),
(0.26999967167735456, 1),
(0.00067092525577969533, 2)]
dataset = np.asarray(dataset)
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
def test_smallpcdf(self):
epsilon = 0.5 ** np.arange(1, 55, 3)
# kolmogi(1-p) == _kolmogci(p) if 1-(1-p) == p, but not necessarily otherwise
# Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results
x = np.array([0.8275735551899077, 0.5345255069097583, 0.4320114038786941,
0.3736868442620478, 0.3345161714909591, 0.3057833329315859,
0.2835052890528936, 0.2655578150208676, 0.2506869966107999,
0.2380971058736669, 0.2272549289962079, 0.2177876361600040,
0.2094254686862041, 0.2019676748836232, 0.1952612948137504,
0.1891874239646641, 0.1836520225050326, 0.1785795904846466])
dataset = np.column_stack([1-epsilon, x])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([epsilon, x])
FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
def test_smallpsf(self):
epsilon = 0.5 ** np.arange(1, 55, 3)
# kolmogi(p) == _kolmogci(1-p) if 1-(1-p) == p, but not necessarily otherwise
# Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results
x = np.array([0.8275735551899077, 1.3163786275161036, 1.6651092133663343,
1.9525136345289607, 2.2027324540033235, 2.4272929437460848,
2.6327688477341593, 2.8233300509220260, 3.0018183401530627,
3.1702735084088891, 3.3302184446307912, 3.4828258153113318,
3.6290214150152051, 3.7695513262825959, 3.9050272690877326,
4.0359582187082550, 4.1627730557884890, 4.2858371743264527])
dataset = np.column_stack([epsilon, x])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([1-epsilon, x])
FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
def test_round_trip(self):
def _k_ki(_p):
return kolmogorov(kolmogi(_p))
p = np.linspace(0.1, 1.0, 10, endpoint=True)
dataset = np.column_stack([p, p])
FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check()
class TestKolmogp(object):
def test_nan(self):
assert_(np.isnan(_kolmogp(np.nan)))
def test_basic(self):
dataset = [(0.000000, -0.0),
(0.200000, -1.532420541338916e-10),
(0.400000, -0.1012254419260496),
(0.600000, -1.324123244249925),
(0.800000, -1.627024345636592),
(1.000000, -1.071948558356941),
(1.200000, -0.538512430720529),
(1.400000, -0.2222133182429472),
(1.600000, -0.07649302775520538),
(1.800000, -0.02208687346347873),
(2.000000, -0.005367402045629683)]
dataset = np.asarray(dataset)
FuncData(_kolmogp, dataset, (0,), 1, rtol=_rtol).check()
@@ -1,103 +0,0 @@
#
# Tests for the lambertw function,
# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il
# Distributed under the same license as SciPy itself.
#
# [1] mpmath source code, Subversion revision 992
# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_almost_equal
from scipy.special import lambertw
from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_
from scipy.special._testutils import FuncData
def test_values():
assert_(isnan(lambertw(nan)))
assert_equal(lambertw(inf,1).real, inf)
assert_equal(lambertw(inf,1).imag, 2*pi)
assert_equal(lambertw(-inf,1).real, inf)
assert_equal(lambertw(-inf,1).imag, 3*pi)
assert_equal(lambertw(1.), lambertw(1., 0))
data = [
(0,0, 0),
(0+0j,0, 0),
(inf,0, inf),
(0,-1, -inf),
(0,1, -inf),
(0,3, -inf),
(e,0, 1),
(1,0, 0.567143290409783873),
(-pi/2,0, 1j*pi/2),
(-log(2)/2,0, -log(2)),
(0.25,0, 0.203888354702240164),
(-0.25,0, -0.357402956181388903),
(-1./10000,0, -0.000100010001500266719),
(-0.25,-1, -2.15329236411034965),
(0.25,-1, -3.00899800997004620-4.07652978899159763j),
(-0.25,-1, -2.15329236411034965),
(0.25,1, -3.00899800997004620+4.07652978899159763j),
(-0.25,1, -3.48973228422959210+7.41405453009603664j),
(-4,0, 0.67881197132094523+1.91195078174339937j),
(-4,1, -0.66743107129800988+7.76827456802783084j),
(-4,-1, 0.67881197132094523-1.91195078174339937j),
(1000,0, 5.24960285240159623),
(1000,1, 4.91492239981054535+5.44652615979447070j),
(1000,-1, 4.91492239981054535-5.44652615979447070j),
(1000,5, 3.5010625305312892+29.9614548941181328j),
(3+4j,0, 1.281561806123775878+0.533095222020971071j),
(-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j),
(3+4j,1, -0.11691092896595324+5.61888039871282334j),
(3+4j,-1, 0.25856740686699742-3.85211668616143559j),
(-0.5,-1, -0.794023632344689368-0.770111750510379110j),
(-1./10000,1, -11.82350837248724344+6.80546081842002101j),
(-1./10000,-1, -11.6671145325663544),
(-1./10000,-2, -11.82350837248724344-6.80546081842002101j),
(-1./100000,4, -14.9186890769540539+26.1856750178782046j),
(-1./100000,5, -15.0931437726379218666+32.5525721210262290086j),
((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j),
((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j),
((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j),
((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j),
(-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j),
(-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j),
(-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j),
(-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j),
(pi,0, 1.073658194796149172092178407024821347547745350410314531),
# Former bug in generated branch,
(-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j),
(-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j),
(-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j),
(-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j),
]
data = array(data, dtype=complex_)
def w(x, y):
return lambertw(x, y.real.astype(int))
olderr = np.seterr(all='ignore')
try:
FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check()
finally:
np.seterr(**olderr)
def test_ufunc():
assert_array_almost_equal(
lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873])
def test_lambertw_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(lambertw(0, 0, 0).dtype, dt)
assert_equal(lambertw([0], 0, 0).dtype, dt)
assert_equal(lambertw(0, [0], 0).dtype, dt)
assert_equal(lambertw(0, 0, [0]).dtype, dt)
assert_equal(lambertw([0], [0], [0]).dtype, dt)
@@ -1,72 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_
from scipy.special._testutils import FuncData
from scipy.special import gamma, gammaln, loggamma
def test_identities1():
# test the identity exp(loggamma(z)) = gamma(z)
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
y = x.copy()
x, y = np.meshgrid(x, y)
z = (x + 1J*y).flatten()
dataset = np.vstack((z, gamma(z))).T
def f(z):
return np.exp(loggamma(z))
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_identities2():
# test the identity loggamma(z + 1) = log(z) + loggamma(z)
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
y = x.copy()
x, y = np.meshgrid(x, y)
z = (x + 1J*y).flatten()
dataset = np.vstack((z, np.log(z) + loggamma(z))).T
def f(z):
return loggamma(z + 1)
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_complex_dispatch_realpart():
# Test that the real parts of loggamma and gammaln agree on the
# real axis.
x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5
dataset = np.vstack((x, gammaln(x))).T
def f(z):
z = np.array(z, dtype='complex128')
return loggamma(z).real
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_real_dispatch():
x = np.logspace(-10, 10) + 0.5
dataset = np.vstack((x, gammaln(x))).T
FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
assert_(loggamma(0) == np.inf)
assert_(np.isnan(loggamma(-1)))
def test_gh_6536():
z = loggamma(complex(-3.4, +0.0))
zbar = loggamma(complex(-3.4, -0.0))
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
def test_branch_cut():
# Make sure negative zero is treated correctly
x = -np.logspace(300, -30, 100)
z = np.asarray([complex(x0, 0.0) for x0 in x])
zbar = np.asarray([complex(x0, -0.0) for x0 in x])
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
@@ -1,81 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_allclose)
from scipy.special import logit, expit
class TestLogit(object):
def check_logit_out(self, dtype, expected):
a = np.linspace(0,1,10)
a = np.array(a, dtype=dtype)
olderr = np.seterr(divide='ignore')
try:
actual = logit(a)
finally:
np.seterr(**olderr)
assert_almost_equal(actual, expected)
assert_equal(actual.dtype, np.dtype(dtype))
def test_float32(self):
expected = np.array([-np.inf, -2.07944155,
-1.25276291, -0.69314718,
-0.22314353, 0.22314365,
0.6931473, 1.25276303,
2.07944155, np.inf], dtype=np.float32)
self.check_logit_out('f4', expected)
def test_float64(self):
expected = np.array([-np.inf, -2.07944154,
-1.25276297, -0.69314718,
-0.22314355, 0.22314355,
0.69314718, 1.25276297,
2.07944154, np.inf])
self.check_logit_out('f8', expected)
def test_nan(self):
expected = np.array([np.nan]*4)
olderr = np.seterr(invalid='ignore')
try:
actual = logit(np.array([-3., -2., 2., 3.]))
finally:
np.seterr(**olderr)
assert_equal(expected, actual)
class TestExpit(object):
def check_expit_out(self, dtype, expected):
a = np.linspace(-4,4,10)
a = np.array(a, dtype=dtype)
actual = expit(a)
assert_almost_equal(actual, expected)
assert_equal(actual.dtype, np.dtype(dtype))
def test_float32(self):
expected = np.array([0.01798621, 0.04265125,
0.09777259, 0.20860852,
0.39068246, 0.60931754,
0.79139149, 0.9022274,
0.95734876, 0.98201376], dtype=np.float32)
self.check_expit_out('f4',expected)
def test_float64(self):
expected = np.array([0.01798621, 0.04265125,
0.0977726, 0.20860853,
0.39068246, 0.60931754,
0.79139147, 0.9022274,
0.95734875, 0.98201379])
self.check_expit_out('f8', expected)
def test_large(self):
for dtype in (np.float32, np.float64, np.longdouble):
for n in (88, 89, 709, 710, 11356, 11357):
n = np.array(n, dtype=dtype)
assert_allclose(expit(n), 1.0, atol=1e-20)
assert_allclose(expit(-n), 0.0, atol=1e-20)
assert_equal(expit(n).dtype, dtype)
assert_equal(expit(-n).dtype, dtype)
@@ -1,196 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_array_almost_equal, assert_)
from scipy.special import logsumexp, softmax
def test_logsumexp():
# Test whether logsumexp() function correctly handles large inputs.
a = np.arange(200)
desired = np.log(np.sum(np.exp(a)))
assert_almost_equal(logsumexp(a), desired)
# Now test with large numbers
b = [1000, 1000]
desired = 1000.0 + np.log(2.0)
assert_almost_equal(logsumexp(b), desired)
n = 1000
b = np.full(n, 10000, dtype='float64')
desired = 10000.0 + np.log(n)
assert_almost_equal(logsumexp(b), desired)
x = np.array([1e-40] * 1000000)
logx = np.log(x)
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum())
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
# Handling special values properly
assert_equal(logsumexp(np.inf), np.inf)
assert_equal(logsumexp(-np.inf), -np.inf)
assert_equal(logsumexp(np.nan), np.nan)
assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf)
# Handling an array with different magnitudes on the axes
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]], axis=-1),
[1e10, -1e10])
# Test keeping dimensions
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]],
axis=-1,
keepdims=True),
[[1e10], [-1e10]])
# Test multiple axes
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]],
axis=(-1,-2)),
1e10)
def test_logsumexp_b():
a = np.arange(200)
b = np.arange(200, 0, -1)
desired = np.log(np.sum(b*np.exp(a)))
assert_almost_equal(logsumexp(a, b=b), desired)
a = [1000, 1000]
b = [1.2, 1.2]
desired = 1000 + np.log(2 * 1.2)
assert_almost_equal(logsumexp(a, b=b), desired)
x = np.array([1e-40] * 100000)
b = np.linspace(1, 1000, 100000)
logx = np.log(x)
X = np.vstack((x, x))
logX = np.vstack((logx, logx))
B = np.vstack((b, b))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
(B * X).sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
(B * X).sum(axis=1))
def test_logsumexp_sign():
a = [1,1,1]
b = [1,-1,-1]
r, s = logsumexp(a, b=b, return_sign=True)
assert_almost_equal(r,1)
assert_equal(s,-1)
def test_logsumexp_sign_zero():
a = [1,1]
b = [1,-1]
r, s = logsumexp(a, b=b, return_sign=True)
assert_(not np.isfinite(r))
assert_(not np.isnan(r))
assert_(r < 0)
assert_equal(s,0)
def test_logsumexp_sign_shape():
a = np.ones((1,2,3,4))
b = np.ones_like(a)
r, s = logsumexp(a, axis=2, b=b, return_sign=True)
assert_equal(r.shape, s.shape)
assert_equal(r.shape, (1,2,4))
r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True)
assert_equal(r.shape, s.shape)
assert_equal(r.shape, (1,3))
def test_logsumexp_shape():
a = np.ones((1, 2, 3, 4))
b = np.ones_like(a)
r = logsumexp(a, axis=2, b=b)
assert_equal(r.shape, (1, 2, 4))
r = logsumexp(a, axis=(1, 3), b=b)
assert_equal(r.shape, (1, 3))
def test_logsumexp_b_zero():
a = [1,10000]
b = [1,0]
assert_almost_equal(logsumexp(a, b=b), 1)
def test_logsumexp_b_shape():
a = np.zeros((4,1,2,1))
b = np.ones((3,1,5))
logsumexp(a, b=b)
def test_softmax_fixtures():
assert_allclose(softmax([1000, 0, 0, 0]), np.array([1, 0, 0, 0]),
rtol=1e-13)
assert_allclose(softmax([1, 1]), np.array([.5, .5]), rtol=1e-13)
assert_allclose(softmax([0, 1]), np.array([1, np.e])/(1 + np.e),
rtol=1e-13)
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
# converted to float.
x = np.arange(4)
expected = np.array([0.03205860328008499,
0.08714431874203256,
0.23688281808991013,
0.6439142598879722])
assert_allclose(softmax(x), expected, rtol=1e-13)
# Translation property. If all the values are changed by the same amount,
# the softmax result does not change.
assert_allclose(softmax(x + 100), expected, rtol=1e-13)
# When axis=None, softmax operates on the entire array, and preserves
# the shape.
assert_allclose(softmax(x.reshape(2, 2)), expected.reshape(2, 2),
rtol=1e-13)
def test_softmax_multi_axes():
assert_allclose(softmax([[1000, 0], [1000, 0]], axis=0),
np.array([[.5, .5], [.5, .5]]), rtol=1e-13)
assert_allclose(softmax([[1000, 0], [1000, 0]], axis=1),
np.array([[1, 0], [1, 0]]), rtol=1e-13)
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
# converted to float.
x = np.array([[-25, 0, 25, 50],
[1, 325, 749, 750]])
expected = np.array([[2.678636961770877e-33,
1.9287498479371314e-22,
1.3887943864771144e-11,
0.999999999986112],
[0.0,
1.9444526359919372e-185,
0.2689414213699951,
0.7310585786300048]])
assert_allclose(softmax(x, axis=1), expected, rtol=1e-13)
assert_allclose(softmax(x.T, axis=0), expected.T, rtol=1e-13)
# 3-d input, with a tuple for the axis.
x3d = x.reshape(2, 2, 2)
assert_allclose(softmax(x3d, axis=(1, 2)), expected.reshape(2, 2, 2),
rtol=1e-13)
File diff suppressed because it is too large Load Diff
@@ -1,64 +0,0 @@
"""Test how the ufuncs in special handle nan inputs.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal, assert_
import pytest
import scipy.special as sc
from scipy._lib._numpy_compat import suppress_warnings
KNOWNFAILURES = {}
POSTPROCESSING = {}
def _get_ufuncs():
ufuncs = []
ufunc_names = []
for name in sorted(sc.__dict__):
obj = sc.__dict__[name]
if not isinstance(obj, np.ufunc):
continue
msg = KNOWNFAILURES.get(obj)
if msg is None:
ufuncs.append(obj)
ufunc_names.append(name)
else:
fail = pytest.mark.xfail(run=False, reason=msg)
ufuncs.append(pytest.param(obj, marks=fail))
ufunc_names.append(name)
return ufuncs, ufunc_names
UFUNCS, UFUNC_NAMES = _get_ufuncs()
@pytest.mark.parametrize("func", UFUNCS, ids=UFUNC_NAMES)
def test_nan_inputs(func):
args = (np.nan,)*func.nin
with suppress_warnings() as sup:
# Ignore warnings about unsafe casts from legacy wrappers
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
try:
res = func(*args)
except TypeError:
# One of the arguments doesn't take real inputs
return
if func in POSTPROCESSING:
res = POSTPROCESSING[func](*res)
msg = "got {} instead of nan".format(res)
assert_array_equal(np.isnan(res), True, err_msg=msg)
def test_legacy_cast():
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
res = sc.bdtrc(np.nan, 1, 0.5)
assert_(np.isnan(res))
@@ -1,756 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, sqrt
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_almost_equal, assert_allclose)
from pytest import raises as assert_raises
from scipy._lib.six import xrange
from scipy import integrate
import scipy.special as sc
from scipy.special import gamma
import scipy.special.orthogonal as orth
class TestCheby(object):
def test_chebyc(self):
C0 = orth.chebyc(0)
C1 = orth.chebyc(1)
olderr = np.seterr(all='ignore')
try:
C2 = orth.chebyc(2)
C3 = orth.chebyc(3)
C4 = orth.chebyc(4)
C5 = orth.chebyc(5)
finally:
np.seterr(**olderr)
assert_array_almost_equal(C0.c,[2],13)
assert_array_almost_equal(C1.c,[1,0],13)
assert_array_almost_equal(C2.c,[1,0,-2],13)
assert_array_almost_equal(C3.c,[1,0,-3,0],13)
assert_array_almost_equal(C4.c,[1,0,-4,0,2],13)
assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13)
def test_chebys(self):
S0 = orth.chebys(0)
S1 = orth.chebys(1)
S2 = orth.chebys(2)
S3 = orth.chebys(3)
S4 = orth.chebys(4)
S5 = orth.chebys(5)
assert_array_almost_equal(S0.c,[1],13)
assert_array_almost_equal(S1.c,[1,0],13)
assert_array_almost_equal(S2.c,[1,0,-1],13)
assert_array_almost_equal(S3.c,[1,0,-2,0],13)
assert_array_almost_equal(S4.c,[1,0,-3,0,1],13)
assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13)
def test_chebyt(self):
T0 = orth.chebyt(0)
T1 = orth.chebyt(1)
T2 = orth.chebyt(2)
T3 = orth.chebyt(3)
T4 = orth.chebyt(4)
T5 = orth.chebyt(5)
assert_array_almost_equal(T0.c,[1],13)
assert_array_almost_equal(T1.c,[1,0],13)
assert_array_almost_equal(T2.c,[2,0,-1],13)
assert_array_almost_equal(T3.c,[4,0,-3,0],13)
assert_array_almost_equal(T4.c,[8,0,-8,0,1],13)
assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13)
def test_chebyu(self):
U0 = orth.chebyu(0)
U1 = orth.chebyu(1)
U2 = orth.chebyu(2)
U3 = orth.chebyu(3)
U4 = orth.chebyu(4)
U5 = orth.chebyu(5)
assert_array_almost_equal(U0.c,[1],13)
assert_array_almost_equal(U1.c,[2,0],13)
assert_array_almost_equal(U2.c,[4,0,-1],13)
assert_array_almost_equal(U3.c,[8,0,-4,0],13)
assert_array_almost_equal(U4.c,[16,0,-12,0,1],13)
assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13)
class TestGegenbauer(object):
def test_gegenbauer(self):
a = 5*np.random.random() - 0.5
if np.any(a == 0):
a = -0.2
Ca0 = orth.gegenbauer(0,a)
Ca1 = orth.gegenbauer(1,a)
Ca2 = orth.gegenbauer(2,a)
Ca3 = orth.gegenbauer(3,a)
Ca4 = orth.gegenbauer(4,a)
Ca5 = orth.gegenbauer(5,a)
assert_array_almost_equal(Ca0.c,array([1]),13)
assert_array_almost_equal(Ca1.c,array([2*a,0]),13)
assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13)
assert_array_almost_equal(Ca3.c,array([4*orth.poch(a,3),0,-6*a*(a+1),
0])/3.0,11)
assert_array_almost_equal(Ca4.c,array([4*orth.poch(a,4),0,-12*orth.poch(a,3),
0,3*a*(a+1)])/6.0,11)
assert_array_almost_equal(Ca5.c,array([4*orth.poch(a,5),0,-20*orth.poch(a,4),
0,15*orth.poch(a,3),0])/15.0,11)
class TestHermite(object):
def test_hermite(self):
H0 = orth.hermite(0)
H1 = orth.hermite(1)
H2 = orth.hermite(2)
H3 = orth.hermite(3)
H4 = orth.hermite(4)
H5 = orth.hermite(5)
assert_array_almost_equal(H0.c,[1],13)
assert_array_almost_equal(H1.c,[2,0],13)
assert_array_almost_equal(H2.c,[4,0,-2],13)
assert_array_almost_equal(H3.c,[8,0,-12,0],13)
assert_array_almost_equal(H4.c,[16,0,-48,0,12],12)
assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12)
def test_hermitenorm(self):
# He_n(x) = 2**(-n/2) H_n(x/sqrt(2))
psub = np.poly1d([1.0/sqrt(2),0])
H0 = orth.hermitenorm(0)
H1 = orth.hermitenorm(1)
H2 = orth.hermitenorm(2)
H3 = orth.hermitenorm(3)
H4 = orth.hermitenorm(4)
H5 = orth.hermitenorm(5)
he0 = orth.hermite(0)(psub)
he1 = orth.hermite(1)(psub) / sqrt(2)
he2 = orth.hermite(2)(psub) / 2.0
he3 = orth.hermite(3)(psub) / (2*sqrt(2))
he4 = orth.hermite(4)(psub) / 4.0
he5 = orth.hermite(5)(psub) / (4.0*sqrt(2))
assert_array_almost_equal(H0.c,he0.c,13)
assert_array_almost_equal(H1.c,he1.c,13)
assert_array_almost_equal(H2.c,he2.c,13)
assert_array_almost_equal(H3.c,he3.c,13)
assert_array_almost_equal(H4.c,he4.c,13)
assert_array_almost_equal(H5.c,he5.c,13)
class _test_sh_legendre(object):
def test_sh_legendre(self):
# P*_n(x) = P_n(2x-1)
psub = np.poly1d([2,-1])
Ps0 = orth.sh_legendre(0)
Ps1 = orth.sh_legendre(1)
Ps2 = orth.sh_legendre(2)
Ps3 = orth.sh_legendre(3)
Ps4 = orth.sh_legendre(4)
Ps5 = orth.sh_legendre(5)
pse0 = orth.legendre(0)(psub)
pse1 = orth.legendre(1)(psub)
pse2 = orth.legendre(2)(psub)
pse3 = orth.legendre(3)(psub)
pse4 = orth.legendre(4)(psub)
pse5 = orth.legendre(5)(psub)
assert_array_almost_equal(Ps0.c,pse0.c,13)
assert_array_almost_equal(Ps1.c,pse1.c,13)
assert_array_almost_equal(Ps2.c,pse2.c,13)
assert_array_almost_equal(Ps3.c,pse3.c,13)
assert_array_almost_equal(Ps4.c,pse4.c,12)
assert_array_almost_equal(Ps5.c,pse5.c,12)
class _test_sh_chebyt(object):
def test_sh_chebyt(self):
# T*_n(x) = T_n(2x-1)
psub = np.poly1d([2,-1])
Ts0 = orth.sh_chebyt(0)
Ts1 = orth.sh_chebyt(1)
Ts2 = orth.sh_chebyt(2)
Ts3 = orth.sh_chebyt(3)
Ts4 = orth.sh_chebyt(4)
Ts5 = orth.sh_chebyt(5)
tse0 = orth.chebyt(0)(psub)
tse1 = orth.chebyt(1)(psub)
tse2 = orth.chebyt(2)(psub)
tse3 = orth.chebyt(3)(psub)
tse4 = orth.chebyt(4)(psub)
tse5 = orth.chebyt(5)(psub)
assert_array_almost_equal(Ts0.c,tse0.c,13)
assert_array_almost_equal(Ts1.c,tse1.c,13)
assert_array_almost_equal(Ts2.c,tse2.c,13)
assert_array_almost_equal(Ts3.c,tse3.c,13)
assert_array_almost_equal(Ts4.c,tse4.c,12)
assert_array_almost_equal(Ts5.c,tse5.c,12)
class _test_sh_chebyu(object):
def test_sh_chebyu(self):
# U*_n(x) = U_n(2x-1)
psub = np.poly1d([2,-1])
Us0 = orth.sh_chebyu(0)
Us1 = orth.sh_chebyu(1)
Us2 = orth.sh_chebyu(2)
Us3 = orth.sh_chebyu(3)
Us4 = orth.sh_chebyu(4)
Us5 = orth.sh_chebyu(5)
use0 = orth.chebyu(0)(psub)
use1 = orth.chebyu(1)(psub)
use2 = orth.chebyu(2)(psub)
use3 = orth.chebyu(3)(psub)
use4 = orth.chebyu(4)(psub)
use5 = orth.chebyu(5)(psub)
assert_array_almost_equal(Us0.c,use0.c,13)
assert_array_almost_equal(Us1.c,use1.c,13)
assert_array_almost_equal(Us2.c,use2.c,13)
assert_array_almost_equal(Us3.c,use3.c,13)
assert_array_almost_equal(Us4.c,use4.c,12)
assert_array_almost_equal(Us5.c,use5.c,11)
class _test_sh_jacobi(object):
def test_sh_jacobi(self):
# G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
conv = lambda n,p: gamma(n+1)*gamma(n+p)/gamma(2*n+p)
psub = np.poly1d([2,-1])
q = 4 * np.random.random()
p = q-1 + 2*np.random.random()
# print("shifted jacobi p,q = ", p, q)
G0 = orth.sh_jacobi(0,p,q)
G1 = orth.sh_jacobi(1,p,q)
G2 = orth.sh_jacobi(2,p,q)
G3 = orth.sh_jacobi(3,p,q)
G4 = orth.sh_jacobi(4,p,q)
G5 = orth.sh_jacobi(5,p,q)
ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p)
ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p)
ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p)
ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p)
ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p)
ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p)
assert_array_almost_equal(G0.c,ge0.c,13)
assert_array_almost_equal(G1.c,ge1.c,13)
assert_array_almost_equal(G2.c,ge2.c,13)
assert_array_almost_equal(G3.c,ge3.c,13)
assert_array_almost_equal(G4.c,ge4.c,13)
assert_array_almost_equal(G5.c,ge5.c,13)
class TestCall(object):
def test_call(self):
poly = []
for n in xrange(5):
poly.extend([x.strip() for x in
("""
orth.jacobi(%(n)d,0.3,0.9)
orth.sh_jacobi(%(n)d,0.3,0.9)
orth.genlaguerre(%(n)d,0.3)
orth.laguerre(%(n)d)
orth.hermite(%(n)d)
orth.hermitenorm(%(n)d)
orth.gegenbauer(%(n)d,0.3)
orth.chebyt(%(n)d)
orth.chebyu(%(n)d)
orth.chebyc(%(n)d)
orth.chebys(%(n)d)
orth.sh_chebyt(%(n)d)
orth.sh_chebyu(%(n)d)
orth.legendre(%(n)d)
orth.sh_legendre(%(n)d)
""" % dict(n=n)).split()
])
olderr = np.seterr(all='ignore')
try:
for pstr in poly:
p = eval(pstr)
assert_almost_equal(p(0.315), np.poly1d(p.coef)(0.315),
err_msg=pstr)
finally:
np.seterr(**olderr)
class TestGenlaguerre(object):
def test_regression(self):
assert_equal(orth.genlaguerre(1, 1, monic=False)(0), 2.)
assert_equal(orth.genlaguerre(1, 1, monic=True)(0), -2.)
assert_equal(orth.genlaguerre(1, 1, monic=False), np.poly1d([-1, 2]))
assert_equal(orth.genlaguerre(1, 1, monic=True), np.poly1d([1, -2]))
def verify_gauss_quad(root_func, eval_func, weight_func, a, b, N,
rtol=1e-15, atol=1e-14):
# this test is copied from numpy's TestGauss in test_hermite.py
x, w, mu = root_func(N, True)
n = np.arange(N)
v = eval_func(n[:,np.newaxis], x)
vv = np.dot(v*w, v.T)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, np.newaxis] * vv * vd
assert_allclose(vv, np.eye(N), rtol, atol)
# check that the integral of 1 is correct
assert_allclose(w.sum(), mu, rtol, atol)
# compare the results of integrating a function with quad.
f = lambda x: x**3 - 3*x**2 + x - 2
resI = integrate.quad(lambda x: f(x)*weight_func(x), a, b)
resG = np.vdot(f(x), w)
rtol = 1e-6 if 1e-6 < resI[1] else resI[1] * 10
assert_allclose(resI[0], resG, rtol=rtol)
def test_roots_jacobi():
rf = lambda a, b: lambda n, mu: sc.roots_jacobi(n, a, b, mu)
ef = lambda a, b: lambda n, x: orth.eval_jacobi(n, a, b, x)
wf = lambda a, b: lambda x: (1 - x)**a * (1 + x)**b
vgq = verify_gauss_quad
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., 5)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
25, atol=1e-12)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
100, atol=1e-11)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 5)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 25, atol=1.5e-13)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 100, atol=1e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 5, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 25, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 100, atol=1e-12)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 5)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 25, atol=1e-13)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 100, atol=3e-13)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 5)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 25)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1.,
100, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 5, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 25, atol=2e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1.,
100, atol=1e-11)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 5)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 25, atol=1e-13)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1.,
100, atol=1e-13)
# when alpha == beta == 0, P_n^{a,b}(x) == P_n(x)
xj, wj = sc.roots_jacobi(6, 0.0, 0.0)
xl, wl = sc.roots_legendre(6)
assert_allclose(xj, xl, 1e-14, 1e-14)
assert_allclose(wj, wl, 1e-14, 1e-14)
# when alpha == beta != 0, P_n^{a,b}(x) == C_n^{alpha+0.5}(x)
xj, wj = sc.roots_jacobi(6, 4.0, 4.0)
xc, wc = sc.roots_gegenbauer(6, 4.5)
assert_allclose(xj, xc, 1e-14, 1e-14)
assert_allclose(wj, wc, 1e-14, 1e-14)
x, w = sc.roots_jacobi(5, 2, 3, False)
y, v, m = sc.roots_jacobi(5, 2, 3, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(2,3), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, 1, -2)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, -2)
def test_roots_sh_jacobi():
rf = lambda a, b: lambda n, mu: sc.roots_sh_jacobi(n, a, b, mu)
ef = lambda a, b: lambda n, x: orth.eval_sh_jacobi(n, a, b, x)
wf = lambda a, b: lambda x: (1. - x)**(a - b) * (x)**(b - 1.)
vgq = verify_gauss_quad
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., 5)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
25, atol=1e-12)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
100, atol=1e-11)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 5)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 25, atol=1e-13)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 100, atol=1e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 5)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 25, atol=1.5e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 100, atol=1e-12)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 5)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 25, atol=1e-13)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 100, atol=1e-12)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 5)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 25)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1.,
100, atol=1e-13)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 5, atol=1e-12)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 25, atol=1e-11)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 100, atol=1e-10)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 5, atol=3.5e-14)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 25, atol=2e-13)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_jacobi(5, 3, 2, False)
y, v, m = sc.roots_sh_jacobi(5, 3, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(3,2), 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 1, 2) # p - q <= -1
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 2, -1) # q <= 0
assert_raises(ValueError, sc.roots_sh_jacobi, 3, -2, -1) # both
def test_roots_hermite():
rootf = sc.roots_hermite
evalf = orth.eval_hermite
weightf = orth.hermite(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
# Golub-Welsch branch
x, w = sc.roots_hermite(5, False)
y, v, m = sc.roots_hermite(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
# Asymptotic branch (switch over at n >= 150)
x, w = sc.roots_hermite(200, False)
y, v, m = sc.roots_hermite(200, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
assert_allclose(sum(v), m, 1e-14, 1e-14)
assert_raises(ValueError, sc.roots_hermite, 0)
assert_raises(ValueError, sc.roots_hermite, 3.3)
def test_roots_hermite_asy():
# Recursion for Hermite functions
def hermite_recursion(n, nodes):
H = np.zeros((n, nodes.size))
H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2)
if n > 1:
H[1,:] = sqrt(2.0) * nodes * H[0,:]
for k in xrange(2, n):
H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:]
return H
# This tests only the nodes
def test(N, rtol=1e-15, atol=1e-14):
x, w = orth._roots_hermite_asy(N)
H = hermite_recursion(N+1, x)
assert_allclose(H[-1,:], np.zeros(N), rtol, atol)
assert_allclose(sum(w), sqrt(np.pi), rtol, atol)
test(150, atol=1e-12)
test(151, atol=1e-12)
test(300, atol=1e-12)
test(301, atol=1e-12)
test(500, atol=1e-12)
test(501, atol=1e-12)
test(999, atol=1e-12)
test(1000, atol=1e-12)
test(2000, atol=1e-12)
test(5000, atol=1e-12)
def test_roots_hermitenorm():
rootf = sc.roots_hermitenorm
evalf = orth.eval_hermitenorm
weightf = orth.hermitenorm(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
x, w = sc.roots_hermitenorm(5, False)
y, v, m = sc.roots_hermitenorm(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_hermitenorm, 0)
assert_raises(ValueError, sc.roots_hermitenorm, 3.3)
def test_roots_gegenbauer():
rootf = lambda a: lambda n, mu: sc.roots_gegenbauer(n, a, mu)
evalf = lambda a: lambda n, x: orth.eval_gegenbauer(n, a, x)
weightf = lambda a: lambda x: (1 - x**2)**(a - 0.5)
vgq = verify_gauss_quad
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 5)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 25, atol=1e-12)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 100, atol=1e-11)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 100, atol=1e-12)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 5)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 100, atol=1e-12)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 5)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 5, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 25, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 100, atol=1e-11)
# this is a special case that the old code supported.
# when alpha = 0, the gegenbauer polynomial is uniformly 0. but it goes
# to a scaled down copy of T_n(x) there.
vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 5)
vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 25)
vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 100, atol=1e-12)
x, w = sc.roots_gegenbauer(5, 2, False)
y, v, m = sc.roots_gegenbauer(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_gegenbauer, 0, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3.3, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3, -.75)
def test_roots_chebyt():
weightf = orth.chebyt(5).weight_func
verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 100, atol=1e-12)
x, w = sc.roots_chebyt(5, False)
y, v, m = sc.roots_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyt, 0)
assert_raises(ValueError, sc.roots_chebyt, 3.3)
def test_chebyt_symmetry():
x, w = sc.roots_chebyt(21)
pos, neg = x[:10], x[11:]
assert_equal(neg, -pos[::-1])
assert_equal(x[10], 0)
def test_roots_chebyu():
weightf = orth.chebyu(5).weight_func
verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 100)
x, w = sc.roots_chebyu(5, False)
y, v, m = sc.roots_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyu, 0)
assert_raises(ValueError, sc.roots_chebyu, 3.3)
def test_roots_chebyc():
weightf = orth.chebyc(5).weight_func
verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 100, atol=1e-12)
x, w = sc.roots_chebyc(5, False)
y, v, m = sc.roots_chebyc(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyc, 0)
assert_raises(ValueError, sc.roots_chebyc, 3.3)
def test_roots_chebys():
weightf = orth.chebys(5).weight_func
verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 100)
x, w = sc.roots_chebys(5, False)
y, v, m = sc.roots_chebys(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebys, 0)
assert_raises(ValueError, sc.roots_chebys, 3.3)
def test_roots_sh_chebyt():
weightf = orth.sh_chebyt(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyt(5, False)
y, v, m = sc.roots_sh_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyt, 0)
assert_raises(ValueError, sc.roots_sh_chebyt, 3.3)
def test_roots_sh_chebyu():
weightf = orth.sh_chebyu(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyu(5, False)
y, v, m = sc.roots_sh_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyu, 0)
assert_raises(ValueError, sc.roots_sh_chebyu, 3.3)
def test_roots_legendre():
weightf = orth.legendre(5).weight_func
verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1.,
100, atol=1e-12)
x, w = sc.roots_legendre(5, False)
y, v, m = sc.roots_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_legendre, 0)
assert_raises(ValueError, sc.roots_legendre, 3.3)
def test_roots_sh_legendre():
weightf = orth.sh_legendre(5).weight_func
verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_legendre(5, False)
y, v, m = sc.roots_sh_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_legendre, 0)
assert_raises(ValueError, sc.roots_sh_legendre, 3.3)
def test_roots_laguerre():
weightf = orth.laguerre(5).weight_func
verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf, 5)
verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf,
25, atol=1e-13)
verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf,
100, atol=1e-12)
x, w = sc.roots_laguerre(5, False)
y, v, m = sc.roots_laguerre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_laguerre, 0)
assert_raises(ValueError, sc.roots_laguerre, 3.3)
def test_roots_genlaguerre():
rootf = lambda a: lambda n, mu: sc.roots_genlaguerre(n, a, mu)
evalf = lambda a: lambda n, x: orth.eval_genlaguerre(n, a, x)
weightf = lambda a: lambda x: x**a * np.exp(-x)
vgq = verify_gauss_quad
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 5)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 25, atol=1e-13)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 100, atol=1e-12)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 100, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 5)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 100, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 5)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 5)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 25, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 100, rtol=1e-14, atol=2e-13)
x, w = sc.roots_genlaguerre(5, 2, False)
y, v, m = sc.roots_genlaguerre(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2.), 0., np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_genlaguerre, 0, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3.3, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3, -1.1)
def test_gh_6721():
# Regresssion test for gh_6721. This should not raise.
sc.chebyt(65)(0.2)
@@ -1,248 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_allclose
import scipy.special.orthogonal as orth
from scipy.special._testutils import FuncData
def test_eval_chebyt():
n = np.arange(0, 10000, 7)
x = 2*np.random.rand() - 1
v1 = np.cos(n*np.arccos(x))
v2 = orth.eval_chebyt(n, x)
assert_(np.allclose(v1, v2, rtol=1e-15))
def test_eval_genlaguerre_restriction():
# check it returns nan for alpha <= -1
assert_(np.isnan(orth.eval_genlaguerre(0, -1, 0)))
assert_(np.isnan(orth.eval_genlaguerre(0.1, -1, 0)))
def test_warnings():
# ticket 1334
olderr = np.seterr(all='raise')
try:
# these should raise no fp warnings
orth.eval_legendre(1, 0)
orth.eval_laguerre(1, 1)
orth.eval_gegenbauer(1, 1, 0)
finally:
np.seterr(**olderr)
class TestPolys(object):
"""
Check that the eval_* functions agree with the constructed polynomials
"""
def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
poly = np.poly1d(cls(*p).coef)
z = np.c_[np.tile(p, (nx,1)), x, poly(x)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
return func(*p)
olderr = np.seterr(all='raise')
try:
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
finally:
np.seterr(**olderr)
def test_jacobi(self):
self.check_poly(orth.eval_jacobi, orth.jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1],
rtol=1e-5)
def test_sh_jacobi(self):
self.check_poly(orth.eval_sh_jacobi, orth.sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1],
rtol=1e-5)
def test_gegenbauer(self):
self.check_poly(orth.eval_gegenbauer, orth.gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1],
rtol=1e-7)
def test_chebyt(self):
self.check_poly(orth.eval_chebyt, orth.chebyt,
param_ranges=[], x_range=[-1, 1])
def test_chebyu(self):
self.check_poly(orth.eval_chebyu, orth.chebyu,
param_ranges=[], x_range=[-1, 1])
def test_chebys(self):
self.check_poly(orth.eval_chebys, orth.chebys,
param_ranges=[], x_range=[-2, 2])
def test_chebyc(self):
self.check_poly(orth.eval_chebyc, orth.chebyc,
param_ranges=[], x_range=[-2, 2])
def test_sh_chebyt(self):
olderr = np.seterr(all='ignore')
try:
self.check_poly(orth.eval_sh_chebyt, orth.sh_chebyt,
param_ranges=[], x_range=[0, 1])
finally:
np.seterr(**olderr)
def test_sh_chebyu(self):
self.check_poly(orth.eval_sh_chebyu, orth.sh_chebyu,
param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(orth.eval_legendre, orth.legendre,
param_ranges=[], x_range=[-1, 1])
def test_sh_legendre(self):
olderr = np.seterr(all='ignore')
try:
self.check_poly(orth.eval_sh_legendre, orth.sh_legendre,
param_ranges=[], x_range=[0, 1])
finally:
np.seterr(**olderr)
def test_genlaguerre(self):
self.check_poly(orth.eval_genlaguerre, orth.genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(orth.eval_laguerre, orth.laguerre,
param_ranges=[], x_range=[0, 100])
def test_hermite(self):
self.check_poly(orth.eval_hermite, orth.hermite,
param_ranges=[], x_range=[-100, 100])
def test_hermitenorm(self):
self.check_poly(orth.eval_hermitenorm, orth.hermitenorm,
param_ranges=[], x_range=[-100, 100])
class TestRecurrence(object):
"""
Check that the eval_* functions sig='ld->d' and 'dd->d' agree.
"""
def check_poly(self, func, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
kw = dict(sig=(len(p)+1)*'d'+'->d')
z = np.c_[np.tile(p, (nx,1)), x, func(*(p + (x,)), **kw)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
kw = dict(sig='l'+(len(p)-1)*'d'+'->d')
return func(*p, **kw)
olderr = np.seterr(all='raise')
try:
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
finally:
np.seterr(**olderr)
def test_jacobi(self):
self.check_poly(orth.eval_jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1])
def test_sh_jacobi(self):
self.check_poly(orth.eval_sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1])
def test_gegenbauer(self):
self.check_poly(orth.eval_gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1])
def test_chebyt(self):
self.check_poly(orth.eval_chebyt,
param_ranges=[], x_range=[-1, 1])
def test_chebyu(self):
self.check_poly(orth.eval_chebyu,
param_ranges=[], x_range=[-1, 1])
def test_chebys(self):
self.check_poly(orth.eval_chebys,
param_ranges=[], x_range=[-2, 2])
def test_chebyc(self):
self.check_poly(orth.eval_chebyc,
param_ranges=[], x_range=[-2, 2])
def test_sh_chebyt(self):
self.check_poly(orth.eval_sh_chebyt,
param_ranges=[], x_range=[0, 1])
def test_sh_chebyu(self):
self.check_poly(orth.eval_sh_chebyu,
param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(orth.eval_legendre,
param_ranges=[], x_range=[-1, 1])
def test_sh_legendre(self):
self.check_poly(orth.eval_sh_legendre,
param_ranges=[], x_range=[0, 1])
def test_genlaguerre(self):
self.check_poly(orth.eval_genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(orth.eval_laguerre,
param_ranges=[], x_range=[0, 100])
def test_hermite(self):
v = orth.eval_hermite(70, 1.0)
a = -1.457076485701412e60
assert_allclose(v,a)
@@ -1,44 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import scipy.special as sc
def test_symmetries():
np.random.seed(1234)
a, h = np.random.rand(100), np.random.rand(100)
assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a))
assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a))
def test_special_cases():
assert_equal(sc.owens_t(5, 0), 0)
assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi,
rtol=5e-14)
# Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the
# standard normal distribution
assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07,
rtol=5e-14)
def test_nans():
assert_equal(sc.owens_t(20, np.nan), np.nan)
assert_equal(sc.owens_t(np.nan, 20), np.nan)
assert_equal(sc.owens_t(np.nan, np.nan), np.nan)
def test_infs():
h = 1
res = 0.5*sc.erfc(h/np.sqrt(2))
assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14)
assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14)
assert_equal(sc.owens_t(np.inf, 1), 0)
assert_equal(sc.owens_t(-np.inf, 1), 0)
assert_equal(sc.owens_t(np.inf, np.inf), 0)
assert_equal(sc.owens_t(-np.inf, np.inf), 0)
assert_equal(sc.owens_t(np.inf, -np.inf), -0.0)
assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0)
@@ -1,24 +0,0 @@
"""Tests for parabolic cylinder functions.
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import scipy.special as sc
def test_pbwa_segfault():
# Regression test for https://github.com/scipy/scipy/issues/6208.
#
# Data generated by mpmath.
#
w = 1.02276567211316867161
wp = -0.48887053372346189882
assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0)
def test_pbwa_nan():
# Check that NaN's are returned outside of the range in which the
# implementation is accurate.
pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)]
for p in pts:
assert_equal(sc.pbwa(*p), (np.nan, np.nan))
@@ -1,26 +0,0 @@
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal
from scipy.special._testutils import check_version, MissingModule
from scipy.special._precompute.expn_asy import generate_A
try:
import sympy
from sympy import Poly
except ImportError:
sympy = MissingModule("sympy")
@check_version(sympy, "1.0")
def test_generate_A():
# Data from DLMF 8.20.5
x = sympy.symbols('x')
Astd = [Poly(1, x),
Poly(1, x),
Poly(1 - 2*x),
Poly(1 - 8*x + 6*x**2)]
Ares = generate_A(len(Astd))
for p, q in zip(Astd, Ares):
assert_equal(p, q)
@@ -1,116 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from scipy.special._testutils import MissingModule, check_version
from scipy.special._mptestutils import (
Arg, IntArg, mp_assert_allclose, assert_mpmath_equal)
from scipy.special._precompute.gammainc_asy import (
compute_g, compute_alpha, compute_d)
from scipy.special._precompute.gammainc_data import gammainc, gammaincc
try:
import sympy
except ImportError:
sympy = MissingModule('sympy')
try:
import mpmath as mp
except ImportError:
mp = MissingModule('mpmath')
_is_32bit_platform = np.intp(0).itemsize < 8
@check_version(mp, '0.19')
def test_g():
# Test data for the g_k. See DLMF 5.11.4.
with mp.workdps(30):
g = [mp.mpf(1), mp.mpf(1)/12, mp.mpf(1)/288,
-mp.mpf(139)/51840, -mp.mpf(571)/2488320,
mp.mpf(163879)/209018880, mp.mpf(5246819)/75246796800]
mp_assert_allclose(compute_g(7), g)
@pytest.mark.slow
@check_version(mp, '0.19')
@check_version(sympy, '0.7')
@pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 2e-11, see gh-6938")
def test_alpha():
# Test data for the alpha_k. See DLMF 8.12.14.
with mp.workdps(30):
alpha = [mp.mpf(0), mp.mpf(1), mp.mpf(1)/3, mp.mpf(1)/36,
-mp.mpf(1)/270, mp.mpf(1)/4320, mp.mpf(1)/17010,
-mp.mpf(139)/5443200, mp.mpf(1)/204120]
mp_assert_allclose(compute_alpha(9), alpha)
@pytest.mark.xslow
@check_version(mp, '0.19')
@check_version(sympy, '0.7')
def test_d():
# Compare the d_{k, n} to the results in appendix F of [1].
#
# Sources
# -------
# [1] DiDonato and Morris, Computation of the Incomplete Gamma
# Function Ratios and their Inverse, ACM Transactions on
# Mathematical Software, 1986.
with mp.workdps(50):
dataset = [(0, 0, -mp.mpf('0.333333333333333333333333333333')),
(0, 12, mp.mpf('0.102618097842403080425739573227e-7')),
(1, 0, -mp.mpf('0.185185185185185185185185185185e-2')),
(1, 12, mp.mpf('0.119516285997781473243076536700e-7')),
(2, 0, mp.mpf('0.413359788359788359788359788360e-2')),
(2, 12, -mp.mpf('0.140925299108675210532930244154e-7')),
(3, 0, mp.mpf('0.649434156378600823045267489712e-3')),
(3, 12, -mp.mpf('0.191111684859736540606728140873e-7')),
(4, 0, -mp.mpf('0.861888290916711698604702719929e-3')),
(4, 12, mp.mpf('0.288658297427087836297341274604e-7')),
(5, 0, -mp.mpf('0.336798553366358150308767592718e-3')),
(5, 12, mp.mpf('0.482409670378941807563762631739e-7')),
(6, 0, mp.mpf('0.531307936463992223165748542978e-3')),
(6, 12, -mp.mpf('0.882860074633048352505085243179e-7')),
(7, 0, mp.mpf('0.344367606892377671254279625109e-3')),
(7, 12, -mp.mpf('0.175629733590604619378669693914e-6')),
(8, 0, -mp.mpf('0.652623918595309418922034919727e-3')),
(8, 12, mp.mpf('0.377358774161109793380344937299e-6')),
(9, 0, -mp.mpf('0.596761290192746250124390067179e-3')),
(9, 12, mp.mpf('0.870823417786464116761231237189e-6'))]
d = compute_d(10, 13)
res = []
for k, n, std in dataset:
res.append(d[k][n])
std = map(lambda x: x[2], dataset)
mp_assert_allclose(res, std)
@check_version(mp, '0.19')
def test_gammainc():
# Quick check that the gammainc in
# special._precompute.gammainc_data agrees with mpmath's
# gammainc.
assert_mpmath_equal(gammainc,
lambda a, x: mp.gammainc(a, b=x, regularized=True),
[Arg(0, 100, inclusive_a=False), Arg(0, 100)],
nan_ok=False, rtol=1e-17, n=50, dps=50)
@pytest.mark.xslow
@check_version(mp, '0.19')
def test_gammaincc():
# Check that the gammaincc in special._precompute.gammainc_data
# agrees with mpmath's gammainc.
assert_mpmath_equal(lambda a, x: gammaincc(a, x, dps=1000),
lambda a, x: mp.gammainc(a, a=x, regularized=True),
[Arg(20, 100), Arg(20, 100)],
nan_ok=False, rtol=1e-17, n=50, dps=1000)
# Test the fast integer path
assert_mpmath_equal(gammaincc,
lambda a, x: mp.gammainc(a, a=x, regularized=True),
[IntArg(1, 100), Arg(0, 100)],
nan_ok=False, rtol=1e-17, n=50, dps=50)
@@ -1,42 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from scipy.special._testutils import MissingModule, check_version
from scipy.special._mptestutils import mp_assert_allclose
from scipy.special._precompute.utils import lagrange_inversion
try:
import sympy
except ImportError:
sympy = MissingModule('sympy')
try:
import mpmath as mp
except ImportError:
mp = MissingModule('mpmath')
_is_32bit_platform = np.intp(0).itemsize < 8
@pytest.mark.slow
@check_version(sympy, '0.7')
@check_version(mp, '0.19')
class TestInversion(object):
@pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 2e-9, see gh-6938")
def test_log(self):
with mp.workdps(30):
logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10)
expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10)
invlogcoeffs = lagrange_inversion(logcoeffs)
mp_assert_allclose(invlogcoeffs, expcoeffs)
@pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 1e-15, see gh-6938")
def test_sin(self):
with mp.workdps(30):
sincoeffs = mp.taylor(mp.sin, 0, 10)
asincoeffs = mp.taylor(mp.asin, 0, 10)
invsincoeffs = lagrange_inversion(sincoeffs)
mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30)
@@ -1,18 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from scipy.special import _test_round
@pytest.mark.skipif(not _test_round.have_fenv(), reason="no fenv()")
def test_add_round_up():
np.random.seed(1234)
_test_round.test_add_round(10**5, 'up')
@pytest.mark.skipif(not _test_round.have_fenv(), reason="no fenv()")
def test_add_round_down():
np.random.seed(1234)
_test_round.test_add_round(10**5, 'down')
@@ -1,115 +0,0 @@
from __future__ import division, print_function, absolute_import
import warnings
from numpy.testing import assert_, assert_equal
from scipy._lib._numpy_compat import suppress_warnings
import pytest
from pytest import raises as assert_raises
import scipy.special as sc
from scipy.special._ufuncs import _sf_error_test_function
_sf_error_code_map = {
# skip 'ok'
'singular': 1,
'underflow': 2,
'overflow': 3,
'slow': 4,
'loss': 5,
'no_result': 6,
'domain': 7,
'arg': 8,
'other': 9
}
_sf_error_actions = [
'ignore',
'warn',
'raise'
]
def _check_action(fun, args, action):
if action == 'warn':
with pytest.warns(sc.SpecialFunctionWarning):
fun(*args)
elif action == 'raise':
with assert_raises(sc.SpecialFunctionError):
fun(*args)
else:
# action == 'ignore', make sure there are no warnings/exceptions
with warnings.catch_warnings():
warnings.simplefilter("error")
fun(*args)
def test_geterr():
err = sc.geterr()
for key, value in err.items():
assert_(key in _sf_error_code_map.keys())
assert_(value in _sf_error_actions)
def test_seterr():
entry_err = sc.geterr()
try:
for category in _sf_error_code_map.keys():
for action in _sf_error_actions:
geterr_olderr = sc.geterr()
seterr_olderr = sc.seterr(**{category: action})
assert_(geterr_olderr == seterr_olderr)
newerr = sc.geterr()
assert_(newerr[category] == action)
geterr_olderr.pop(category)
newerr.pop(category)
assert_(geterr_olderr == newerr)
_check_action(_sf_error_test_function,
(_sf_error_code_map[category],),
action)
finally:
sc.seterr(**entry_err)
def test_errstate_pyx_basic():
olderr = sc.geterr()
with sc.errstate(singular='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.loggamma(0)
assert_equal(olderr, sc.geterr())
def test_errstate_c_basic():
olderr = sc.geterr()
with sc.errstate(domain='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.spence(-1)
assert_equal(olderr, sc.geterr())
def test_errstate_cpp_basic():
olderr = sc.geterr()
with sc.errstate(underflow='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.wrightomega(-1000)
assert_equal(olderr, sc.geterr())
def test_errstate():
for category in _sf_error_code_map.keys():
for action in _sf_error_actions:
olderr = sc.geterr()
with sc.errstate(**{category: action}):
_check_action(_sf_error_test_function,
(_sf_error_code_map[category],),
action)
assert_equal(olderr, sc.geterr())
def test_errstate_all_but_one():
olderr = sc.geterr()
with sc.errstate(all='raise', singular='ignore'):
sc.gammaln(0)
with assert_raises(sc.SpecialFunctionError):
sc.spence(-1.0)
assert_equal(olderr, sc.geterr())
@@ -1,38 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.special as sc
from scipy.special._testutils import FuncData
def test_sici_consistency():
# Make sure the implementation of sici for real arguments agrees
# with the implementation of sici for complex arguments.
# On the negative real axis Cephes drops the imaginary part in ci
def sici(x):
si, ci = sc.sici(x + 0j)
return si.real, ci.real
x = np.r_[-np.logspace(8, -30, 200), 0, np.logspace(-30, 8, 200)]
si, ci = sc.sici(x)
dataset = np.column_stack((x, si, ci))
FuncData(sici, dataset, 0, (1, 2), rtol=1e-12).check()
def test_shichi_consistency():
# Make sure the implementation of shichi for real arguments agrees
# with the implementation of shichi for complex arguments.
# On the negative real axis Cephes drops the imaginary part in chi
def shichi(x):
shi, chi = sc.shichi(x + 0j)
return shi.real, chi.real
# Overflow happens quickly, so limit range
x = np.r_[-np.logspace(np.log10(700), -30, 200), 0,
np.logspace(-30, np.log10(700), 200)]
shi, chi = sc.shichi(x)
dataset = np.column_stack((x, shi, chi))
FuncData(shichi, dataset, 0, (1, 2), rtol=1e-14).check()
@@ -1,34 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import sqrt, log, pi
from scipy.special._testutils import FuncData
from scipy.special import spence
def test_consistency():
# Make sure the implementation of spence for real arguments
# agrees with the implementation of spence for imaginary arguments.
x = np.logspace(-30, 300, 200)
dataset = np.vstack((x + 0j, spence(x))).T
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
def test_special_points():
# Check against known values of Spence's function.
phi = (1 + sqrt(5))/2
dataset = [(1, 0),
(2, -pi**2/12),
(0.5, pi**2/12 - log(2)**2/2),
(0, pi**2/6),
(-1, pi**2/4 - 1j*pi*log(2)),
((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2),
((3 - sqrt(5))/2, pi**2/10 - log(phi)**2),
(phi, -pi**2/15 + log(phi)**2/2),
# Corrected from Zagier, "The Dilogarithm Function"
((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)]
dataset = np.asarray(dataset)
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
@@ -1,63 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_array_equal,
assert_array_almost_equal_nulp, assert_almost_equal)
from pytest import raises as assert_raises
from scipy.special import gammaln, multigammaln
class TestMultiGammaLn(object):
def test1(self):
# A test of the identity
# Gamma_1(a) = Gamma(a)
np.random.seed(1234)
a = np.abs(np.random.randn())
assert_array_equal(multigammaln(a, 1), gammaln(a))
def test2(self):
# A test of the identity
# Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5)
a = np.array([2.5, 10.0])
result = multigammaln(a, 2)
expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5)
assert_almost_equal(result, expected)
def test_bararg(self):
assert_raises(ValueError, multigammaln, 0.5, 1.2)
def _check_multigammaln_array_result(a, d):
# Test that the shape of the array returned by multigammaln
# matches the input shape, and that all the values match
# the value computed when multigammaln is called with a scalar.
result = multigammaln(a, d)
assert_array_equal(a.shape, result.shape)
a1 = a.ravel()
result1 = result.ravel()
for i in range(a.size):
assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d))
def test_multigammaln_array_arg():
# Check that the array returned by multigammaln has the correct
# shape and contains the correct values. The cases have arrays
# with several differnent shapes.
# The cases include a regression test for ticket #1849
# (a = np.array([2.0]), an array with a single element).
np.random.seed(1234)
cases = [
# a, d
(np.abs(np.random.randn(3, 2)) + 5, 5),
(np.abs(np.random.randn(1, 2)) + 5, 5),
(np.arange(10.0, 18.0).reshape(2, 2, 2), 3),
(np.array([2.0]), 3),
(np.float64(2.0), 3),
]
for a, d in cases:
_check_multigammaln_array_result(a, d)
@@ -1,39 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
def test_first_harmonics():
# Test against explicit representations of the first four
# spherical harmonics which use `theta` as the azimuthal angle,
# `phi` as the polar angle, and include the Condon-Shortley
# phase.
# Notation is Ymn
def Y00(theta, phi):
return 0.5*np.sqrt(1/np.pi)
def Yn11(theta, phi):
return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi)
def Y01(theta, phi):
return 0.5*np.sqrt(3/np.pi)*np.cos(phi)
def Y11(theta, phi):
return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi)
harms = [Y00, Yn11, Y01, Y11]
m = [0, -1, 0, 1]
n = [0, 1, 1, 1]
theta = np.linspace(0, 2*np.pi)
phi = np.linspace(0, np.pi)
theta, phi = np.meshgrid(theta, phi)
for harm, m, n in zip(harms, m, n):
assert_allclose(sc.sph_harm(m, n, theta, phi),
harm(theta, phi),
rtol=1e-15, atol=1e-15,
err_msg="Y^{}_{} incorrect".format(m, n))
@@ -1,383 +0,0 @@
#
# Tests of spherical Bessel functions.
#
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose,
assert_array_almost_equal)
import pytest
from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi
from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn
from scipy.integrate import quad
from scipy._lib._numpy_compat import suppress_warnings
class TestSphericalJn:
def test_spherical_jn_exact(self):
# https://dlmf.nist.gov/10.49.E3
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_jn(2, x),
(-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x))
def test_spherical_jn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_jn(n, x), np.array([0, 0]))
def test_spherical_jn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_jn_large_arg_1(self):
# https://github.com/scipy/scipy/issues/2165
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747)
def test_spherical_jn_large_arg_2(self):
# https://github.com/scipy/scipy/issues/1641
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05)
def test_spherical_jn_at_zero(self):
# https://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: j0 = sin(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalYn:
def test_spherical_yn_exact(self):
# https://dlmf.nist.gov/10.49.E5
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_yn(2, x),
(1/x - 3/x**3)*cos(x) - 3/x**2*sin(x))
def test_spherical_yn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_yn(n, x), np.array([0, 0]))
def test_spherical_yn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_yn_at_zero(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_yn(n, x), -inf*np.ones(shape=n.shape))
def test_spherical_yn_at_zero_complex(self):
# Consistently with numpy:
# >>> -np.cos(0)/0
# -inf
# >>> -np.cos(0+0j)/(0+0j)
# (-inf + nan*j)
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_yn(n, x), nan*np.ones(shape=n.shape))
class TestSphericalJnYnCrossProduct:
def test_spherical_jn_yn_cross_product_1(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 1, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 1, x))
right = 1/x**2
assert_allclose(left, right)
def test_spherical_jn_yn_cross_product_2(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 2, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 2, x))
right = (2*n + 3)/x**3
assert_allclose(left, right)
class TestSphericalIn:
def test_spherical_in_exact(self):
# https://dlmf.nist.gov/10.49.E9
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_in(2, x),
(1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x))
def test_spherical_in_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_in(n, x), np.array([-inf, inf]))
def test_spherical_in_inf_complex(self):
# https://dlmf.nist.gov/10.52.E5
# Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but
# this appears impossible to achieve because C99 regards any complex
# value with at least one infinite part as a complex infinity, so
# 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is
# the correct return value.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan]))
def test_spherical_in_at_zero(self):
# https://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: i0 = sinh(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalKn:
def test_spherical_kn_exact(self):
# https://dlmf.nist.gov/10.49.E13
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_kn(2, x),
pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3))
def test_spherical_kn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_inf_real(self):
# https://dlmf.nist.gov/10.52.E6
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0]))
def test_spherical_kn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E6
# The behavior at complex infinity depends on the sign of the real
# part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's
# z*inf. This distinction cannot be captured, so we return nan.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan]))
def test_spherical_kn_at_zero(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_kn(n, x), inf*np.ones(shape=n.shape))
def test_spherical_kn_at_zero_complex(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_kn(n, x), nan*np.ones(shape=n.shape))
class SphericalDerivativesTestCase:
def fundamental_theorem(self, n, a, b):
integral, tolerance = quad(lambda z: self.df(n, z), a, b)
assert_allclose(integral,
self.f(n, b) - self.f(n, a),
atol=tolerance)
@pytest.mark.slow
def test_fundamental_theorem_0(self):
self.fundamental_theorem(0, 3.0, 15.0)
@pytest.mark.slow
def test_fundamental_theorem_7(self):
self.fundamental_theorem(7, 0.5, 1.2)
class TestSphericalJnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_jn(n, z)
def df(self, n, z):
return spherical_jn(n, z, derivative=True)
def test_spherical_jn_d_zero(self):
n = np.array([0, 1, 2, 3, 7, 15])
assert_allclose(spherical_jn(n, 0, derivative=True),
np.array([0, 1/3, 0, 0, 0, 0]))
class TestSphericalYnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_yn(n, z)
def df(self, n, z):
return spherical_yn(n, z, derivative=True)
class TestSphericalInDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_in(n, z)
def df(self, n, z):
return spherical_in(n, z, derivative=True)
def test_spherical_in_d_zero(self):
n = np.array([1, 2, 3, 7, 15])
assert_allclose(spherical_in(n, 0, derivative=True),
np.zeros(5))
class TestSphericalKnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_kn(n, z)
def df(self, n, z):
return spherical_kn(n, z, derivative=True)
class TestSphericalOld:
# These are tests from the TestSpherical class of test_basic.py,
# rewritten to use spherical_* instead of sph_* but otherwise unchanged.
def test_sph_in(self):
# This test reproduces test_basic.TestSpherical.test_sph_in.
i1n = np.empty((2,2))
x = 0.2
i1n[0][0] = spherical_in(0, x)
i1n[0][1] = spherical_in(1, x)
i1n[1][0] = spherical_in(0, x, derivative=True)
i1n[1][1] = spherical_in(1, x, derivative=True)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = np.empty((2,))
sph_i0[0] = spherical_in(0, x)
sph_i0[1] = spherical_in(0, x, derivative=True)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = np.empty((2,))
sph_k0[0] = spherical_kn(0, x)
sph_k0[1] = spherical_kn(0, x, derivative=True)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
def test_sph_jn(self):
s1 = np.empty((2,3))
x = 0.2
s1[0][0] = spherical_jn(0, x)
s1[0][1] = spherical_jn(1, x)
s1[0][2] = spherical_jn(2, x)
s1[1][0] = spherical_jn(0, x, derivative=True)
s1[1][1] = spherical_jn(1, x, derivative=True)
s1[1][2] = spherical_jn(2, x, derivative=True)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_kn(self):
kn = np.empty((2,3))
x = 0.2
kn[0][0] = spherical_kn(0, x)
kn[0][1] = spherical_kn(1, x)
kn[0][2] = spherical_kn(2, x)
kn[1][0] = spherical_kn(0, x, derivative=True)
kn[1][1] = spherical_kn(1, x, derivative=True)
kn[1][2] = spherical_kn(2, x, derivative=True)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = spherical_yn(2, 0.2)
sy2 = spherical_yn(0, 0.2)
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3
sy3 = spherical_yn(1, 0.2, derivative=True)
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
@@ -1,77 +0,0 @@
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pytest
from scipy.special._ufuncs import _sinpi as sinpi
from scipy.special._ufuncs import _cospi as cospi
from scipy._lib._numpy_compat import suppress_warnings
def test_integer_real_part():
x = np.arange(-100, 101)
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.real, 0.0)
res = cospi(z)
assert_equal(res.imag, 0.0)
def test_half_integer_real_part():
x = np.arange(-100, 101) + 0.5
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.imag, 0.0)
res = cospi(z)
assert_equal(res.real, 0.0)
def test_intermediate_overlow():
# Make sure we avoid overflow in situations where cosh/sinh would
# overflow but the product with sin/cos would not
sinpi_pts = [complex(1 + 1e-14, 227),
complex(1e-35, 250),
complex(1e-301, 445)]
# Data generated with mpmath
sinpi_std = [complex(-8.113438309924894e+295, -np.inf),
complex(1.9507801934611995e+306, np.inf),
complex(2.205958493464539e+306, np.inf)]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
for p, std in zip(sinpi_pts, sinpi_std):
assert_allclose(sinpi(p), std)
# Test for cosine, less interesting because cos(0) = 1.
p = complex(0.5 + 1e-14, 227)
std = complex(-8.113438309924894e+295, -np.inf)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(cospi(p), std)
@pytest.mark.xfail('win32' in sys.platform
and np.intp(0).itemsize < 8
and sys.version_info < (3, 5),
reason="fails on 32-bit Windows with old MSVC")
def test_zero_sign():
y = sinpi(-0.0)
assert y == 0.0
assert np.signbit(y)
y = sinpi(0.0)
assert y == 0.0
assert not np.signbit(y)
y = cospi(0.5)
assert y == 0.0
assert not np.signbit(y)
@@ -1,55 +0,0 @@
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_equal
import scipy.special as sc
def test_wrightomega_nan():
pts = [complex(np.nan, 0),
complex(0, np.nan),
complex(np.nan, np.nan),
complex(np.nan, 1),
complex(1, np.nan)]
for p in pts:
res = sc.wrightomega(p)
assert_(np.isnan(res.real))
assert_(np.isnan(res.imag))
def test_wrightomega_inf_branch():
pts = [complex(-np.inf, np.pi/4),
complex(-np.inf, -np.pi/4),
complex(-np.inf, 3*np.pi/4),
complex(-np.inf, -3*np.pi/4)]
expected_results = [complex(0.0, 0.0),
complex(0.0, -0.0),
complex(-0.0, 0.0),
complex(-0.0, -0.0)]
for p, expected in zip(pts, expected_results):
res = sc.wrightomega(p)
# We can't use assert_equal(res, expected) because in older versions of
# numpy, assert_equal doesn't check the sign of the real and imaginary
# parts when comparing complex zeros. It does check the sign when the
# arguments are *real* scalars.
assert_equal(res.real, expected.real)
assert_equal(res.imag, expected.imag)
def test_wrightomega_inf():
pts = [complex(np.inf, 10),
complex(-np.inf, 10),
complex(10, np.inf),
complex(10, -np.inf)]
for p in pts:
assert_equal(sc.wrightomega(p), p)
def test_wrightomega_singular():
pts = [complex(-1.0, np.pi),
complex(-1.0, -np.pi)]
for p in pts:
res = sc.wrightomega(p)
assert_equal(res, -1.0)
assert_(np.signbit(res.imag) == False)
@@ -1,39 +0,0 @@
from __future__ import division, print_function, absolute_import
import scipy.special as sc
import numpy as np
from numpy.testing import assert_, assert_equal, assert_allclose
def test_zeta():
assert_allclose(sc.zeta(2,2), np.pi**2/6 - 1, rtol=1e-12)
def test_zeta_1arg():
assert_allclose(sc.zeta(2), np.pi**2/6, rtol=1e-12)
assert_allclose(sc.zeta(4), np.pi**4/90, rtol=1e-12)
def test_zetac():
assert_equal(sc.zetac(0), -1.5)
assert_equal(sc.zetac(1.0), np.inf)
# Expected values in the following were computed using
# Wolfram Alpha `Zeta[x] - 1`:
rtol = 1e-12
assert_allclose(sc.zetac(-2.1), -0.9972705002153750, rtol=rtol)
assert_allclose(sc.zetac(0.8), -5.437538415895550, rtol=rtol)
assert_allclose(sc.zetac(0.9999), -10000.42279161673, rtol=rtol)
assert_allclose(sc.zetac(9), 0.002008392826082214, rtol=rtol)
assert_allclose(sc.zetac(50), 8.881784210930816e-16, rtol=rtol)
assert_allclose(sc.zetac(75), 2.646977960169853e-23, rtol=rtol)
def test_zetac_negative_even():
pts = [-2, -50, -100]
for p in pts:
assert_equal(sc.zetac(p), -1)
def test_zetac_inf():
assert_equal(sc.zetac(np.inf), 0.0)
assert_(np.isnan(sc.zetac(-np.inf)))