602 lines
22 KiB
Python
602 lines
22 KiB
Python
|
import numpy as np
|
||
|
from .base import OdeSolver, DenseOutput
|
||
|
from .common import (validate_max_step, validate_tol, select_initial_step,
|
||
|
norm, warn_extraneous, validate_first_step)
|
||
|
from . import dop853_coefficients
|
||
|
|
||
|
# Multiply steps computed from asymptotic behaviour of errors by this.
|
||
|
SAFETY = 0.9
|
||
|
|
||
|
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
|
||
|
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
|
||
|
|
||
|
|
||
|
def rk_step(fun, t, y, f, h, A, B, C, K):
|
||
|
"""Perform a single Runge-Kutta step.
|
||
|
|
||
|
This function computes a prediction of an explicit Runge-Kutta method and
|
||
|
also estimates the error of a less accurate method.
|
||
|
|
||
|
Notation for Butcher tableau is as in [1]_.
|
||
|
|
||
|
Parameters
|
||
|
----------
|
||
|
fun : callable
|
||
|
Right-hand side of the system.
|
||
|
t : float
|
||
|
Current time.
|
||
|
y : ndarray, shape (n,)
|
||
|
Current state.
|
||
|
f : ndarray, shape (n,)
|
||
|
Current value of the derivative, i.e., ``fun(x, y)``.
|
||
|
h : float
|
||
|
Step to use.
|
||
|
A : ndarray, shape (n_stages, n_stages)
|
||
|
Coefficients for combining previous RK stages to compute the next
|
||
|
stage. For explicit methods the coefficients at and above the main
|
||
|
diagonal are zeros.
|
||
|
B : ndarray, shape (n_stages,)
|
||
|
Coefficients for combining RK stages for computing the final
|
||
|
prediction.
|
||
|
C : ndarray, shape (n_stages,)
|
||
|
Coefficients for incrementing time for consecutive RK stages.
|
||
|
The value for the first stage is always zero.
|
||
|
K : ndarray, shape (n_stages + 1, n)
|
||
|
Storage array for putting RK stages here. Stages are stored in rows.
|
||
|
The last row is a linear combination of the previous rows with
|
||
|
coefficients
|
||
|
|
||
|
Returns
|
||
|
-------
|
||
|
y_new : ndarray, shape (n,)
|
||
|
Solution at t + h computed with a higher accuracy.
|
||
|
f_new : ndarray, shape (n,)
|
||
|
Derivative ``fun(t + h, y_new)``.
|
||
|
|
||
|
References
|
||
|
----------
|
||
|
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
|
||
|
Equations I: Nonstiff Problems", Sec. II.4.
|
||
|
"""
|
||
|
K[0] = f
|
||
|
for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
|
||
|
dy = np.dot(K[:s].T, a[:s]) * h
|
||
|
K[s] = fun(t + c * h, y + dy)
|
||
|
|
||
|
y_new = y + h * np.dot(K[:-1].T, B)
|
||
|
f_new = fun(t + h, y_new)
|
||
|
|
||
|
K[-1] = f_new
|
||
|
|
||
|
return y_new, f_new
|
||
|
|
||
|
|
||
|
class RungeKutta(OdeSolver):
|
||
|
"""Base class for explicit Runge-Kutta methods."""
|
||
|
C: np.ndarray = NotImplemented
|
||
|
A: np.ndarray = NotImplemented
|
||
|
B: np.ndarray = NotImplemented
|
||
|
E: np.ndarray = NotImplemented
|
||
|
P: np.ndarray = NotImplemented
|
||
|
order: int = NotImplemented
|
||
|
error_estimator_order: int = NotImplemented
|
||
|
n_stages: int = NotImplemented
|
||
|
|
||
|
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
|
||
|
rtol=1e-3, atol=1e-6, vectorized=False,
|
||
|
first_step=None, **extraneous):
|
||
|
warn_extraneous(extraneous)
|
||
|
super().__init__(fun, t0, y0, t_bound, vectorized,
|
||
|
support_complex=True)
|
||
|
self.y_old = None
|
||
|
self.max_step = validate_max_step(max_step)
|
||
|
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
|
||
|
self.f = self.fun(self.t, self.y)
|
||
|
if first_step is None:
|
||
|
self.h_abs = select_initial_step(
|
||
|
self.fun, self.t, self.y, self.f, self.direction,
|
||
|
self.error_estimator_order, self.rtol, self.atol)
|
||
|
else:
|
||
|
self.h_abs = validate_first_step(first_step, t0, t_bound)
|
||
|
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
|
||
|
self.error_exponent = -1 / (self.error_estimator_order + 1)
|
||
|
self.h_previous = None
|
||
|
|
||
|
def _estimate_error(self, K, h):
|
||
|
return np.dot(K.T, self.E) * h
|
||
|
|
||
|
def _estimate_error_norm(self, K, h, scale):
|
||
|
return norm(self._estimate_error(K, h) / scale)
|
||
|
|
||
|
def _step_impl(self):
|
||
|
t = self.t
|
||
|
y = self.y
|
||
|
|
||
|
max_step = self.max_step
|
||
|
rtol = self.rtol
|
||
|
atol = self.atol
|
||
|
|
||
|
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
|
||
|
|
||
|
if self.h_abs > max_step:
|
||
|
h_abs = max_step
|
||
|
elif self.h_abs < min_step:
|
||
|
h_abs = min_step
|
||
|
else:
|
||
|
h_abs = self.h_abs
|
||
|
|
||
|
step_accepted = False
|
||
|
step_rejected = False
|
||
|
|
||
|
while not step_accepted:
|
||
|
if h_abs < min_step:
|
||
|
return False, self.TOO_SMALL_STEP
|
||
|
|
||
|
h = h_abs * self.direction
|
||
|
t_new = t + h
|
||
|
|
||
|
if self.direction * (t_new - self.t_bound) > 0:
|
||
|
t_new = self.t_bound
|
||
|
|
||
|
h = t_new - t
|
||
|
h_abs = np.abs(h)
|
||
|
|
||
|
y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
|
||
|
self.B, self.C, self.K)
|
||
|
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
|
||
|
error_norm = self._estimate_error_norm(self.K, h, scale)
|
||
|
|
||
|
if error_norm < 1:
|
||
|
if error_norm == 0:
|
||
|
factor = MAX_FACTOR
|
||
|
else:
|
||
|
factor = min(MAX_FACTOR,
|
||
|
SAFETY * error_norm ** self.error_exponent)
|
||
|
|
||
|
if step_rejected:
|
||
|
factor = min(1, factor)
|
||
|
|
||
|
h_abs *= factor
|
||
|
|
||
|
step_accepted = True
|
||
|
else:
|
||
|
h_abs *= max(MIN_FACTOR,
|
||
|
SAFETY * error_norm ** self.error_exponent)
|
||
|
step_rejected = True
|
||
|
|
||
|
self.h_previous = h
|
||
|
self.y_old = y
|
||
|
|
||
|
self.t = t_new
|
||
|
self.y = y_new
|
||
|
|
||
|
self.h_abs = h_abs
|
||
|
self.f = f_new
|
||
|
|
||
|
return True, None
|
||
|
|
||
|
def _dense_output_impl(self):
|
||
|
Q = self.K.T.dot(self.P)
|
||
|
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
|
||
|
|
||
|
|
||
|
class RK23(RungeKutta):
|
||
|
"""Explicit Runge-Kutta method of order 3(2).
|
||
|
|
||
|
This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
|
||
|
assuming accuracy of the second-order method, but steps are taken using the
|
||
|
third-order accurate formula (local extrapolation is done). A cubic Hermite
|
||
|
polynomial is used for the dense output.
|
||
|
|
||
|
Can be applied in the complex domain.
|
||
|
|
||
|
Parameters
|
||
|
----------
|
||
|
fun : callable
|
||
|
Right-hand side of the system: the time derivative of the state ``y``
|
||
|
at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
|
||
|
scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
|
||
|
return an array of the same shape as ``y``. See `vectorized` for more
|
||
|
information.
|
||
|
t0 : float
|
||
|
Initial time.
|
||
|
y0 : array_like, shape (n,)
|
||
|
Initial state.
|
||
|
t_bound : float
|
||
|
Boundary time - the integration won't continue beyond it. It also
|
||
|
determines the direction of the integration.
|
||
|
first_step : float or None, optional
|
||
|
Initial step size. Default is ``None`` which means that the algorithm
|
||
|
should choose.
|
||
|
max_step : float, optional
|
||
|
Maximum allowed step size. Default is np.inf, i.e., the step size is not
|
||
|
bounded and determined solely by the solver.
|
||
|
rtol, atol : float and array_like, optional
|
||
|
Relative and absolute tolerances. The solver keeps the local error
|
||
|
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
|
||
|
relative accuracy (number of correct digits), while `atol` controls
|
||
|
absolute accuracy (number of correct decimal places). To achieve the
|
||
|
desired `rtol`, set `atol` to be smaller than the smallest value that
|
||
|
can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
|
||
|
allowable error. If `atol` is larger than ``rtol * abs(y)`` the
|
||
|
number of correct digits is not guaranteed. Conversely, to achieve the
|
||
|
desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
|
||
|
than `atol`. If components of y have different scales, it might be
|
||
|
beneficial to set different `atol` values for different components by
|
||
|
passing array_like with shape (n,) for `atol`. Default values are
|
||
|
1e-3 for `rtol` and 1e-6 for `atol`.
|
||
|
vectorized : bool, optional
|
||
|
Whether `fun` may be called in a vectorized fashion. False (default)
|
||
|
is recommended for this solver.
|
||
|
|
||
|
If ``vectorized`` is False, `fun` will always be called with ``y`` of
|
||
|
shape ``(n,)``, where ``n = len(y0)``.
|
||
|
|
||
|
If ``vectorized`` is True, `fun` may be called with ``y`` of shape
|
||
|
``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
|
||
|
such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
|
||
|
the returned array is the time derivative of the state corresponding
|
||
|
with a column of ``y``).
|
||
|
|
||
|
Setting ``vectorized=True`` allows for faster finite difference
|
||
|
approximation of the Jacobian by methods 'Radau' and 'BDF', but
|
||
|
will result in slower execution for this solver.
|
||
|
|
||
|
Attributes
|
||
|
----------
|
||
|
n : int
|
||
|
Number of equations.
|
||
|
status : string
|
||
|
Current status of the solver: 'running', 'finished' or 'failed'.
|
||
|
t_bound : float
|
||
|
Boundary time.
|
||
|
direction : float
|
||
|
Integration direction: +1 or -1.
|
||
|
t : float
|
||
|
Current time.
|
||
|
y : ndarray
|
||
|
Current state.
|
||
|
t_old : float
|
||
|
Previous time. None if no steps were made yet.
|
||
|
step_size : float
|
||
|
Size of the last successful step. None if no steps were made yet.
|
||
|
nfev : int
|
||
|
Number evaluations of the system's right-hand side.
|
||
|
njev : int
|
||
|
Number of evaluations of the Jacobian.
|
||
|
Is always 0 for this solver as it does not use the Jacobian.
|
||
|
nlu : int
|
||
|
Number of LU decompositions. Is always 0 for this solver.
|
||
|
|
||
|
References
|
||
|
----------
|
||
|
.. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
|
||
|
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
|
||
|
"""
|
||
|
order = 3
|
||
|
error_estimator_order = 2
|
||
|
n_stages = 3
|
||
|
C = np.array([0, 1/2, 3/4])
|
||
|
A = np.array([
|
||
|
[0, 0, 0],
|
||
|
[1/2, 0, 0],
|
||
|
[0, 3/4, 0]
|
||
|
])
|
||
|
B = np.array([2/9, 1/3, 4/9])
|
||
|
E = np.array([5/72, -1/12, -1/9, 1/8])
|
||
|
P = np.array([[1, -4 / 3, 5 / 9],
|
||
|
[0, 1, -2/3],
|
||
|
[0, 4/3, -8/9],
|
||
|
[0, -1, 1]])
|
||
|
|
||
|
|
||
|
class RK45(RungeKutta):
|
||
|
"""Explicit Runge-Kutta method of order 5(4).
|
||
|
|
||
|
This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
|
||
|
assuming accuracy of the fourth-order method accuracy, but steps are taken
|
||
|
using the fifth-order accurate formula (local extrapolation is done).
|
||
|
A quartic interpolation polynomial is used for the dense output [2]_.
|
||
|
|
||
|
Can be applied in the complex domain.
|
||
|
|
||
|
Parameters
|
||
|
----------
|
||
|
fun : callable
|
||
|
Right-hand side of the system. The calling signature is ``fun(t, y)``.
|
||
|
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
|
||
|
It can either have shape (n,); then ``fun`` must return array_like with
|
||
|
shape (n,). Alternatively it can have shape (n, k); then ``fun``
|
||
|
must return an array_like with shape (n, k), i.e., each column
|
||
|
corresponds to a single column in ``y``. The choice between the two
|
||
|
options is determined by `vectorized` argument (see below).
|
||
|
t0 : float
|
||
|
Initial time.
|
||
|
y0 : array_like, shape (n,)
|
||
|
Initial state.
|
||
|
t_bound : float
|
||
|
Boundary time - the integration won't continue beyond it. It also
|
||
|
determines the direction of the integration.
|
||
|
first_step : float or None, optional
|
||
|
Initial step size. Default is ``None`` which means that the algorithm
|
||
|
should choose.
|
||
|
max_step : float, optional
|
||
|
Maximum allowed step size. Default is np.inf, i.e., the step size is not
|
||
|
bounded and determined solely by the solver.
|
||
|
rtol, atol : float and array_like, optional
|
||
|
Relative and absolute tolerances. The solver keeps the local error
|
||
|
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
|
||
|
relative accuracy (number of correct digits), while `atol` controls
|
||
|
absolute accuracy (number of correct decimal places). To achieve the
|
||
|
desired `rtol`, set `atol` to be smaller than the smallest value that
|
||
|
can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
|
||
|
allowable error. If `atol` is larger than ``rtol * abs(y)`` the
|
||
|
number of correct digits is not guaranteed. Conversely, to achieve the
|
||
|
desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
|
||
|
than `atol`. If components of y have different scales, it might be
|
||
|
beneficial to set different `atol` values for different components by
|
||
|
passing array_like with shape (n,) for `atol`. Default values are
|
||
|
1e-3 for `rtol` and 1e-6 for `atol`.
|
||
|
vectorized : bool, optional
|
||
|
Whether `fun` is implemented in a vectorized fashion. Default is False.
|
||
|
|
||
|
Attributes
|
||
|
----------
|
||
|
n : int
|
||
|
Number of equations.
|
||
|
status : string
|
||
|
Current status of the solver: 'running', 'finished' or 'failed'.
|
||
|
t_bound : float
|
||
|
Boundary time.
|
||
|
direction : float
|
||
|
Integration direction: +1 or -1.
|
||
|
t : float
|
||
|
Current time.
|
||
|
y : ndarray
|
||
|
Current state.
|
||
|
t_old : float
|
||
|
Previous time. None if no steps were made yet.
|
||
|
step_size : float
|
||
|
Size of the last successful step. None if no steps were made yet.
|
||
|
nfev : int
|
||
|
Number evaluations of the system's right-hand side.
|
||
|
njev : int
|
||
|
Number of evaluations of the Jacobian.
|
||
|
Is always 0 for this solver as it does not use the Jacobian.
|
||
|
nlu : int
|
||
|
Number of LU decompositions. Is always 0 for this solver.
|
||
|
|
||
|
References
|
||
|
----------
|
||
|
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
|
||
|
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
|
||
|
No. 1, pp. 19-26, 1980.
|
||
|
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
|
||
|
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
|
||
|
"""
|
||
|
order = 5
|
||
|
error_estimator_order = 4
|
||
|
n_stages = 6
|
||
|
C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
|
||
|
A = np.array([
|
||
|
[0, 0, 0, 0, 0],
|
||
|
[1/5, 0, 0, 0, 0],
|
||
|
[3/40, 9/40, 0, 0, 0],
|
||
|
[44/45, -56/15, 32/9, 0, 0],
|
||
|
[19372/6561, -25360/2187, 64448/6561, -212/729, 0],
|
||
|
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
|
||
|
])
|
||
|
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
|
||
|
E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
|
||
|
1/40])
|
||
|
# Corresponds to the optimum value of c_6 from [2]_.
|
||
|
P = np.array([
|
||
|
[1, -8048581381/2820520608, 8663915743/2820520608,
|
||
|
-12715105075/11282082432],
|
||
|
[0, 0, 0, 0],
|
||
|
[0, 131558114200/32700410799, -68118460800/10900136933,
|
||
|
87487479700/32700410799],
|
||
|
[0, -1754552775/470086768, 14199869525/1410260304,
|
||
|
-10690763975/1880347072],
|
||
|
[0, 127303824393/49829197408, -318862633887/49829197408,
|
||
|
701980252875 / 199316789632],
|
||
|
[0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
|
||
|
[0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
|
||
|
|
||
|
|
||
|
class DOP853(RungeKutta):
|
||
|
"""Explicit Runge-Kutta method of order 8.
|
||
|
|
||
|
This is a Python implementation of "DOP853" algorithm originally written
|
||
|
in Fortran [1]_, [2]_. Note that this is not a literal translation, but
|
||
|
the algorithmic core and coefficients are the same.
|
||
|
|
||
|
Can be applied in the complex domain.
|
||
|
|
||
|
Parameters
|
||
|
----------
|
||
|
fun : callable
|
||
|
Right-hand side of the system. The calling signature is ``fun(t, y)``.
|
||
|
Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
|
||
|
It can either have shape (n,); then ``fun`` must return array_like with
|
||
|
shape (n,). Alternatively it can have shape (n, k); then ``fun``
|
||
|
must return an array_like with shape (n, k), i.e. each column
|
||
|
corresponds to a single column in ``y``. The choice between the two
|
||
|
options is determined by `vectorized` argument (see below).
|
||
|
t0 : float
|
||
|
Initial time.
|
||
|
y0 : array_like, shape (n,)
|
||
|
Initial state.
|
||
|
t_bound : float
|
||
|
Boundary time - the integration won't continue beyond it. It also
|
||
|
determines the direction of the integration.
|
||
|
first_step : float or None, optional
|
||
|
Initial step size. Default is ``None`` which means that the algorithm
|
||
|
should choose.
|
||
|
max_step : float, optional
|
||
|
Maximum allowed step size. Default is np.inf, i.e. the step size is not
|
||
|
bounded and determined solely by the solver.
|
||
|
rtol, atol : float and array_like, optional
|
||
|
Relative and absolute tolerances. The solver keeps the local error
|
||
|
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
|
||
|
relative accuracy (number of correct digits), while `atol` controls
|
||
|
absolute accuracy (number of correct decimal places). To achieve the
|
||
|
desired `rtol`, set `atol` to be smaller than the smallest value that
|
||
|
can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
|
||
|
allowable error. If `atol` is larger than ``rtol * abs(y)`` the
|
||
|
number of correct digits is not guaranteed. Conversely, to achieve the
|
||
|
desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
|
||
|
than `atol`. If components of y have different scales, it might be
|
||
|
beneficial to set different `atol` values for different components by
|
||
|
passing array_like with shape (n,) for `atol`. Default values are
|
||
|
1e-3 for `rtol` and 1e-6 for `atol`.
|
||
|
vectorized : bool, optional
|
||
|
Whether `fun` is implemented in a vectorized fashion. Default is False.
|
||
|
|
||
|
Attributes
|
||
|
----------
|
||
|
n : int
|
||
|
Number of equations.
|
||
|
status : string
|
||
|
Current status of the solver: 'running', 'finished' or 'failed'.
|
||
|
t_bound : float
|
||
|
Boundary time.
|
||
|
direction : float
|
||
|
Integration direction: +1 or -1.
|
||
|
t : float
|
||
|
Current time.
|
||
|
y : ndarray
|
||
|
Current state.
|
||
|
t_old : float
|
||
|
Previous time. None if no steps were made yet.
|
||
|
step_size : float
|
||
|
Size of the last successful step. None if no steps were made yet.
|
||
|
nfev : int
|
||
|
Number evaluations of the system's right-hand side.
|
||
|
njev : int
|
||
|
Number of evaluations of the Jacobian. Is always 0 for this solver
|
||
|
as it does not use the Jacobian.
|
||
|
nlu : int
|
||
|
Number of LU decompositions. Is always 0 for this solver.
|
||
|
|
||
|
References
|
||
|
----------
|
||
|
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
|
||
|
Equations I: Nonstiff Problems", Sec. II.
|
||
|
.. [2] `Page with original Fortran code of DOP853
|
||
|
<http://www.unige.ch/~hairer/software.html>`_.
|
||
|
"""
|
||
|
n_stages = dop853_coefficients.N_STAGES
|
||
|
order = 8
|
||
|
error_estimator_order = 7
|
||
|
A = dop853_coefficients.A[:n_stages, :n_stages]
|
||
|
B = dop853_coefficients.B
|
||
|
C = dop853_coefficients.C[:n_stages]
|
||
|
E3 = dop853_coefficients.E3
|
||
|
E5 = dop853_coefficients.E5
|
||
|
D = dop853_coefficients.D
|
||
|
|
||
|
A_EXTRA = dop853_coefficients.A[n_stages + 1:]
|
||
|
C_EXTRA = dop853_coefficients.C[n_stages + 1:]
|
||
|
|
||
|
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
|
||
|
rtol=1e-3, atol=1e-6, vectorized=False,
|
||
|
first_step=None, **extraneous):
|
||
|
super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol,
|
||
|
vectorized, first_step, **extraneous)
|
||
|
self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
|
||
|
self.n), dtype=self.y.dtype)
|
||
|
self.K = self.K_extended[:self.n_stages + 1]
|
||
|
|
||
|
def _estimate_error(self, K, h): # Left for testing purposes.
|
||
|
err5 = np.dot(K.T, self.E5)
|
||
|
err3 = np.dot(K.T, self.E3)
|
||
|
denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
|
||
|
correction_factor = np.ones_like(err5)
|
||
|
mask = denom > 0
|
||
|
correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
|
||
|
return h * err5 * correction_factor
|
||
|
|
||
|
def _estimate_error_norm(self, K, h, scale):
|
||
|
err5 = np.dot(K.T, self.E5) / scale
|
||
|
err3 = np.dot(K.T, self.E3) / scale
|
||
|
err5_norm_2 = np.linalg.norm(err5)**2
|
||
|
err3_norm_2 = np.linalg.norm(err3)**2
|
||
|
if err5_norm_2 == 0 and err3_norm_2 == 0:
|
||
|
return 0.0
|
||
|
denom = err5_norm_2 + 0.01 * err3_norm_2
|
||
|
return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
|
||
|
|
||
|
def _dense_output_impl(self):
|
||
|
K = self.K_extended
|
||
|
h = self.h_previous
|
||
|
for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
|
||
|
start=self.n_stages + 1):
|
||
|
dy = np.dot(K[:s].T, a[:s]) * h
|
||
|
K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
|
||
|
|
||
|
F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
|
||
|
dtype=self.y_old.dtype)
|
||
|
|
||
|
f_old = K[0]
|
||
|
delta_y = self.y - self.y_old
|
||
|
|
||
|
F[0] = delta_y
|
||
|
F[1] = h * f_old - delta_y
|
||
|
F[2] = 2 * delta_y - h * (self.f + f_old)
|
||
|
F[3:] = h * np.dot(self.D, K)
|
||
|
|
||
|
return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
|
||
|
|
||
|
|
||
|
class RkDenseOutput(DenseOutput):
|
||
|
def __init__(self, t_old, t, y_old, Q):
|
||
|
super().__init__(t_old, t)
|
||
|
self.h = t - t_old
|
||
|
self.Q = Q
|
||
|
self.order = Q.shape[1] - 1
|
||
|
self.y_old = y_old
|
||
|
|
||
|
def _call_impl(self, t):
|
||
|
x = (t - self.t_old) / self.h
|
||
|
if t.ndim == 0:
|
||
|
p = np.tile(x, self.order + 1)
|
||
|
p = np.cumprod(p)
|
||
|
else:
|
||
|
p = np.tile(x, (self.order + 1, 1))
|
||
|
p = np.cumprod(p, axis=0)
|
||
|
y = self.h * np.dot(self.Q, p)
|
||
|
if y.ndim == 2:
|
||
|
y += self.y_old[:, None]
|
||
|
else:
|
||
|
y += self.y_old
|
||
|
|
||
|
return y
|
||
|
|
||
|
|
||
|
class Dop853DenseOutput(DenseOutput):
|
||
|
def __init__(self, t_old, t, y_old, F):
|
||
|
super().__init__(t_old, t)
|
||
|
self.h = t - t_old
|
||
|
self.F = F
|
||
|
self.y_old = y_old
|
||
|
|
||
|
def _call_impl(self, t):
|
||
|
x = (t - self.t_old) / self.h
|
||
|
|
||
|
if t.ndim == 0:
|
||
|
y = np.zeros_like(self.y_old)
|
||
|
else:
|
||
|
x = x[:, None]
|
||
|
y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
|
||
|
|
||
|
for i, f in enumerate(reversed(self.F)):
|
||
|
y += f
|
||
|
if i % 2 == 0:
|
||
|
y *= x
|
||
|
else:
|
||
|
y *= 1 - x
|
||
|
y += self.y_old
|
||
|
|
||
|
return y.T
|