Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable Newton solver with simulation #1075

Merged
merged 10 commits into from
May 9, 2020
1 change: 1 addition & 0 deletions include/amici/backwardproblem.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ class BackwardProblem {

Model *model;
Solver *solver;
const ExpData *edata;

/** current time */
realtype t;
Expand Down
16 changes: 16 additions & 0 deletions include/amici/solver.h
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,11 @@ class Solver {
*/
void setSensitivityMethod(SensitivityMethod sensi_meth);

/**
* @brief Disable forward sensitivity integration (used in steady state sim)
*/
void switchForwardSensisOff() const;

/**
* @brief Get maximum number of allowed Newton steps for steady state
* computation
Expand Down Expand Up @@ -638,6 +643,12 @@ class Solver {
virtual void sensReInit(const AmiVectorArray &yyS0,
const AmiVectorArray &ypS0) const = 0;

/**
* @brief Switches off computation of state sensitivites without
* deallocating the memory for sensitivities
*/
virtual void sensToggleOff() const = 0;

/**
* @brief Reinitializes the adjoint states after an event occurence
*
Expand Down Expand Up @@ -1452,6 +1463,11 @@ class Solver {
*/
void setSensInitDone() const;

/**
* @brief sets that memory for forward sensitivities has not been allocated
*/
void setSensInitOff() const;

/**
* @brief sets that memory for forward interpolation has been allocated
*/
Expand Down
2 changes: 2 additions & 0 deletions include/amici/solver_cvodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ class CVodeSolver : public Solver {
void sensReInit(const AmiVectorArray &yyS0,
const AmiVectorArray &ypS0) const override;

void sensToggleOff() const override;

void reInitB(int which, realtype tB0,
const AmiVector &yyB0, const AmiVector &ypB0) const override;

Expand Down
2 changes: 2 additions & 0 deletions include/amici/solver_idas.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ class IDASolver : public Solver {
void sensReInit(const AmiVectorArray &yyS0,
const AmiVectorArray &ypS0) const override;

void sensToggleOff() const override;

void reInitB(int which, realtype tB0,
const AmiVector &yyB0, const AmiVector &ypB0) const override;

Expand Down
66 changes: 57 additions & 9 deletions python/tests/test_preequilibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ def preeq_fixture(pysb_example_presimulation_module):
model.setReinitializeFixedParameterInitialStates(True)

solver = model.getSolver()
solver.setSensitivityOrder(amici.SensitivityOrder_first)
solver.setSensitivityMethod(amici.SensitivityMethod_forward)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
solver.setSensitivityMethod(amici.SensitivityMethod.forward)

edata = get_data(model)
edata.t_presim = 2
Expand Down Expand Up @@ -49,12 +49,12 @@ def preeq_fixture(pysb_example_presimulation_module):
edata_sim.fixedParametersPreequilibration = ()

pscales = [
amici.ParameterScaling_log10, amici.ParameterScaling_ln,
amici.ParameterScaling_none,
amici.ParameterScaling.log10, amici.ParameterScaling.ln,
amici.ParameterScaling.none,
amici.parameterScalingFromIntVector([
amici.ParameterScaling_log10, amici.ParameterScaling_ln,
amici.ParameterScaling_none, amici.ParameterScaling_log10,
amici.ParameterScaling_ln, amici.ParameterScaling_none
amici.ParameterScaling.log10, amici.ParameterScaling.ln,
amici.ParameterScaling.none, amici.ParameterScaling.log10,
amici.ParameterScaling.ln, amici.ParameterScaling.none
])
]

Expand Down Expand Up @@ -148,8 +148,9 @@ def test_data_replicates(preeq_fixture):
model, solver, edata, edata_preeq, \
edata_presim, edata_sim, pscales, plists = preeq_fixture

for sensi_meth in [amici.SensitivityMethod_forward,
amici.SensitivityMethod_adjoint]:
for sensi_meth in [amici.SensitivityMethod.forward, ]:
# will be changed back to [..., amici.SensitivityMethod.adjoint] as
# soon as postequilibration with adjoints is implemented
solver.setSensitivityMethod(sensi_meth)

# add infty timepoint
Expand Down Expand Up @@ -234,3 +235,50 @@ def test_parameter_in_expdata(preeq_fixture):
rdata_edata[variable][0, :],
1e-6, 1e-6
).all(), variable


def test_raise_postequilibration_with_adjoints(preeq_fixture):
"""Test data replicates"""

model, solver, edata, edata_preeq, \
edata_presim, edata_sim, pscales, plists = preeq_fixture

# this needs to fail unless we remove presimulation
solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)

rdata = amici.runAmiciSimulation(model, solver, edata)
assert rdata['status'] == amici.AMICI_ERROR

edata.t_presim = 0.0
edata.fixedParametersPresimulation = ()

rdatas = {}
for sensi_meth in [amici.SensitivityMethod.forward,
amici.SensitivityMethod.adjoint]:
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's just a question, not necessary, but:
Do we also want to not only check FSA and ASA, but also FD?

# set sensi method
solver.setSensitivityMethod(sensi_meth)
solver.setNewtonMaxSteps(0)
solver.SteadyStateSensitivityMethod = \
amici.SteadyStateSensitivityMode.simulationFSA
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add another loop for Newton solver as well?
Or are we overtesting then?

# add rdatas
rdatas[sensi_meth] = amici.runAmiciSimulation(model, solver, edata)
assert rdatas[sensi_meth]['status'] == amici.AMICI_SUCCESS

for variable in ['llh', 'sllh']:
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I didn't add it in my commits, but checked it rather by hand:
In an ideal world, also sx0 and sx_ss are checked for here...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
for variable in ['llh', 'sllh']:
for variable in ['llh', 'sllh', 'sx_ss', 'sx0']:

assert np.allclose(
rdatas[amici.SensitivityMethod.forward][variable],
rdatas[amici.SensitivityMethod.adjoint][variable],
1e-6, 1e-6
), variable

# add infty timepoint
solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)
y = edata.getObservedData()
stdy = edata.getObservedDataStdDev()
ts = np.hstack([*edata.getTimepoints(), np.inf])
edata.setTimepoints(sorted(ts))
edata.setObservedData(np.hstack([y, y[0]]))
edata.setObservedDataStdDev(np.hstack([stdy, stdy[0]]))

with pytest.raises(RuntimeError):
amici.runAmiciSimulation(model, solver, edata)
6 changes: 6 additions & 0 deletions src/backwardproblem.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ BackwardProblem::BackwardProblem(const ForwardProblem &fwd,
const SteadystateProblem *posteq):
model(fwd.model),
solver(fwd.solver),
edata(fwd.edata),
t(fwd.getTime()),
xB(fwd.model->nx_solver),
dxB(fwd.model->nx_solver),
Expand Down Expand Up @@ -96,6 +97,11 @@ void BackwardProblem::workBackwardProblem() {
solver->runB(model->t0());
solver->writeSolutionB(&t, xB, dxB, xQB, this->which);
}
if (edata && edata->t_presim > 0) {
ConditionContext(model, edata, FixedParameterContext::presimulation);
solver->runB(model->t0() - edata->t_presim);
solver->writeSolutionB(&t, xB, dxB, xQB, this->which);
}
}


Expand Down
22 changes: 16 additions & 6 deletions src/forwardproblem.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,20 +65,30 @@ void ForwardProblem::workForwardProblem() {

/* perform presimulation if necessary */
if (presimulate) {
if (solver->computingASA())
throw AmiException("Presimulation with adjoint sensitivities"
" is currently not implemented.");
handlePresimulation();
t = model->t0();
}
/* when computing adjoint sensitivity analysis with presimulation,
we need to store sx after the reinitialization after preequilibration
but before reinitialization after presimulation. As presimulation with ASA
will not update sx, we can simply extract the values here.*/
if (solver->computingASA() && presimulate)
sx = solver->getStateSensitivity(model->t0());

if (presimulate || preequilibrated)
solver->updateAndReinitStatesAndSensitivities(model);

// update x0 after computing consistence IC/reinitialization
x = solver->getState(model->t0());
/* this is a bit tricky here. we want to update sx0 after calling
solver->updateAndReinitStatesAndSensitivities, but not end up overwriting
results from non-FSA preequilibration. checking for solver->computingFSA()
is not the most intuitive way to make sure we do the right thing, but does
the right job */
if (solver->computingFSA())
/* when computing forward sensitivities, we generally want to update sx
after presimulation/preequilibration, and if we didn't do either this also
wont harm. when computing ASA, we only want to update here, if we didn't
update before presimulation (if applicable).
*/
if (solver->computingFSA() || (solver->computingASA() && !presimulate ))
sx = solver->getStateSensitivity(model->t0());


Expand Down
17 changes: 11 additions & 6 deletions src/solver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,12 @@ void Solver::setup(const realtype t0, Model *model, const AmiVector &x0,

if (sensi >= SensitivityOrder::first && model->nx_solver > 0) {
auto plist = model->getParameterList();
sensInit1(sx0, sdx0);
if (sensi_meth == SensitivityMethod::forward && !plist.empty()) {
/* Set sensitivity analysis optional inputs */
auto par = model->getUnscaledParameters();

/* Activate sensitivity calculations */
sensInit1(sx0, sdx0);
initializeNonLinearSolverSens(model);
setSensParams(par.data(), nullptr, plist.data());

Expand Down Expand Up @@ -162,10 +162,10 @@ void Solver::updateAndReinitStatesAndSensitivities(Model *model) {
model->fx0_fixedParameters(x);
reInit(t, x, dx);

if (getSensitivityOrder() >= SensitivityOrder::first &&
getSensitivityMethod() == SensitivityMethod::forward) {
if (getSensitivityOrder() >= SensitivityOrder::first) {
model->fsx0_fixedParameters(sx, x);
sensReInit(sx, sdx);
if (getSensitivityMethod() == SensitivityMethod::forward)
sensReInit(sx, sdx);
}
}

Expand Down Expand Up @@ -918,6 +918,8 @@ void Solver::setInitDone() const { initialized = true; };

void Solver::setSensInitDone() const { sensInitialized = true; }

void Solver::setSensInitOff() const { sensInitialized = false; }

void Solver::setAdjInitDone() const { adjInitialized = true; }

void Solver::setInitDoneB(const int which) const {
Expand All @@ -932,6 +934,11 @@ void Solver::setQuadInitDoneB(const int which) const {
initializedQB.at(which) = true;
}

void Solver::switchForwardSensisOff() const {
sensToggleOff();
setSensInitOff();
}

realtype Solver::getCpuTime() const {
return cpu_time;
}
Expand Down Expand Up @@ -1010,8 +1017,6 @@ const AmiVectorArray &Solver::getStateSensitivity(const realtype t) const {
getSensDky(t, 0);
}
}
} else {
sx.reset();
}
return sx;
}
Expand Down
38 changes: 24 additions & 14 deletions src/solver_cvodes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ void CVodeSolver::init(const realtype t0, const AmiVector &x0,
solverWasCalledF = false;
forceReInitPostProcessF = false;
t = t0;
x.copy(x0);
x = x0;
int status;
if (getInitDone()) {
status = CVodeReInit(solverMemory.get(), t0, x.getNVector());
Expand All @@ -105,17 +105,21 @@ void CVodeSolver::init(const realtype t0, const AmiVector &x0,

void CVodeSolver::sensInit1(const AmiVectorArray &sx0,
const AmiVectorArray & /*sdx0*/) const {
int status;
sx.copy(sx0);
if (getSensInitDone()) {
status = CVodeSensReInit(solverMemory.get(),
static_cast<int>(getSensitivityMethod()),
sx.getNVectorArray());
} else {
status = CVodeSensInit1(solverMemory.get(), nplist(),
static_cast<int>(getSensitivityMethod()),
fsxdot, sx.getNVectorArray());
setSensInitDone();
int status = CV_SUCCESS;
sx = sx0;
if (getSensitivityMethod() == SensitivityMethod::forward) {
if (getSensInitDone()) {
status = CVodeSensReInit(
solverMemory.get(),
static_cast<int>(getInternalSensitivityMethod()),
sx.getNVectorArray());
} else {
status =
CVodeSensInit1(solverMemory.get(), nplist(),
static_cast<int>(getInternalSensitivityMethod()),
fsxdot, sx.getNVectorArray());
setSensInitDone();
}
}
if (status != CV_SUCCESS)
throw CvodeException(status, "CVodeSensInit1");
Expand All @@ -126,7 +130,7 @@ void CVodeSolver::binit(const int which, const realtype tf,
const AmiVector & /*dxB0*/) const {
solverWasCalledB = false;
forceReInitPostProcessB = false;
xB.copy(xB0);
xB = xB0;
int status;
if (getInitDoneB(which)) {
status = CVodeReInitB(solverMemory.get(), which, tf, xB.getNVector());
Expand All @@ -140,7 +144,7 @@ void CVodeSolver::binit(const int which, const realtype tf,
}

void CVodeSolver::qbinit(const int which, const AmiVector &xQB0) const {
xQB.copy(xQB0);
xQB = xQB0;
int status;
if (getQuadInitDoneB(which)) {
status = CVodeQuadReInitB(solverMemory.get(), which, xQB.getNVector());
Expand Down Expand Up @@ -488,6 +492,12 @@ void CVodeSolver::reInitB(const int which, const realtype tB0,
resetState(cv_memB, xB.getNVector());
}

void CVodeSolver::sensToggleOff() const {
auto status = CVodeSensToggleOff(solverMemory.get());
if (status != CV_SUCCESS)
throw CvodeException(status, "CVodeSensToggleOff");
}

void CVodeSolver::quadReInitB(int which, const AmiVector &yQB0) const {
auto cv_memB =
static_cast<CVodeMem>(CVodeGetAdjCVodeBmem(solverMemory.get(), which));
Expand Down
Loading