Skip to content

Commit

Permalink
Merge pull request #172 from sebapersson/Bugfix_prior
Browse files Browse the repository at this point in the history
Fix bug in gradient for prior, test gradient, and test block Hessian
  • Loading branch information
sebapersson committed Feb 16, 2024
2 parents 536198d + 0a25f81 commit 3222155
Show file tree
Hide file tree
Showing 23 changed files with 1,208 additions and 129 deletions.
12 changes: 7 additions & 5 deletions ext/PEtabPlotsExtension/optimisation_trajectory_recipes.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
# The possible types of plots avaiable for PEtabOptimisationResult.
const plot_types = [:objective, :best_objective]
const plot_types_ms = [:objective,
:best_objective,
:waterfall,
:runtime_eval,
:parallel_coordinates]
const plot_types_ms = [
:objective,
:best_objective,
:waterfall,
:runtime_eval,
:parallel_coordinates
]

# Plots the objective function progression for a PEtabOptimisationResult.
@recipe function f(res::PEtabOptimisationResult{T}; plot_type = :best_objective) where {T}
Expand Down
7 changes: 5 additions & 2 deletions src/Derivatives/Gradient.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ function compute_gradient_autodiff!(gradient::Vector{Float64},
# We need to track a variable if ODE system could be solve as checking retcode on solution array it not enough.
# This is because for ForwardDiff some chunks can solve the ODE, but other fail, and thus if we check the final
# retcode we cannot catch these cases
# We need to track a variable if ODE system could be solve as checking retcode on solution array it not enough.
# This is because for ForwardDiff some chunks can solve the ODE, but other fail, and thus if we check the final
# retcode we cannot catch these cases
simulation_info.could_solve[1] = true

# Case where based on the original PEtab file read into Julia we do not have any parameter vectors fixated.
Expand Down Expand Up @@ -213,10 +216,10 @@ function compute_gradient_prior!(gradient::Vector{Float64},
θ::Vector{<:Real},
θ_indices::ParameterIndices,
prior_info::PriorInfo)::Nothing
_evalPriors = (θ_est) -> begin
_eval_priors = (θ_est) -> begin
θ_estT = transformθ(θ_est, θ_indices.θ_names, θ_indices)
return -1.0 * compute_priors(θ_est, θ_estT, θ_indices.θ_names, prior_info) # We work with -loglik
end
gradient .+= ForwardDiff.gradient(_evalPriors, θ)
gradient .+= ForwardDiff.gradient(_eval_priors, θ)
return nothing
end
5 changes: 0 additions & 5 deletions src/Objective/Priors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,6 @@ function compute_priors(θ_parameter_scale::Vector{T},
θ_linear_scale::Vector{T},
θ_names::Vector{Symbol},
prior_info::PriorInfo)::T where {T <: Real}
if prior_info.has_priors == false
return 0.0
end

prior_value = 0.0
for (θ_name, logpdf) in prior_info.logpdf
= findfirst(x -> x == θ_name, θ_names)
Expand All @@ -18,6 +14,5 @@ function compute_priors(θ_parameter_scale::Vector{T},
prior_value += logpdf(θ_linear_scale[iθ])
end
end

return prior_value
end
13 changes: 9 additions & 4 deletions src/PEtabODEProblem/Create.jl
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,8 @@ function PEtabODEProblem(petab_model::PEtabModel;
return measurement_info.simulated_values
end

# Computing nllh along with the gradient is needed for efficient Bayesian
# inference, as for example AdvancedHMC.jl needs both nllh and gradient
# Computing nllh along with the gradient is needed for efficient Bayesian
# inference, as for example AdvancedHMC.jl needs both nllh and gradient
# in its evaluations.
Expand Down Expand Up @@ -631,7 +633,9 @@ function create_gradient_function(which_method::Symbol,
prior_info,
cfg,
petab_ODE_cache,
exp_id_solve = [:all],
exp_id_solve = [
:all
],
split_over_conditions = split_over_conditions,
isremade = isremade)
end
Expand Down Expand Up @@ -810,8 +814,7 @@ function create_hessian_function(which_method::Symbol,

_chunksize = isnothing(chunksize) ? ForwardDiff.Chunk(θ_dynamic) :
ForwardDiff.Chunk(chunksize)
cfg = ForwardDiff.HessianConfig(compute_cost_θ_dynamic, θ_dynamic,
ForwardDiff.Chunk(chunksize))
cfg = ForwardDiff.HessianConfig(compute_cost_θ_dynamic, θ_dynamic, _chunksize)

_compute_hessian! = let compute_cost_θ_not_ODE = compute_cost_θ_not_ODE,
compute_cost_θ_dynamic = compute_cost_θ_dynamic,
Expand Down Expand Up @@ -990,7 +993,9 @@ function create_hessian_function(which_method::Symbol,
cfg,
cfg_not_solve_ode,
petab_ODE_cache,
exp_id_solve = [:all],
exp_id_solve = [
:all
],
reuse_sensitivities = reuse_sensitivities,
return_jacobian = return_jacobian,
split_over_conditions = split_over_conditions,
Expand Down
4 changes: 2 additions & 2 deletions src/Process_input/Julia_input.jl
Original file line number Diff line number Diff line change
Expand Up @@ -257,8 +257,8 @@ function parse_petab_measurements(petab_measurements::DataFrame,
simulation_conditions,
petab_parameters::Vector{PEtabParameter})::DataFrame
allowed_names = ["time", "obs_id", "observable_id", "noise_parameters", "measurement",
"simulation_id", "pre_equilibration_id", "pre_eq_id",
"observable_parameters"]
"simulation_id", "pre_equilibration_id", "pre_eq_id",
"observable_parameters"]
column_names = names(petab_measurements)
for name in column_names
if name allowed_names
Expand Down
2 changes: 1 addition & 1 deletion src/Process_input/Observables/h_sigma_derivatives.jl
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ function create_derivative_σ_h_file(model_name::String,
parameter_map, state_map, experimental_conditions)

# Dummary variables to keep PEtab importer happy even as we are not providing any PEtab files
model_SBML = SBMLImporter.ModelSBML()
model_SBML = SBMLImporter.ModelSBML("")

∂h∂u_str, ∂h∂p_str = PEtab.create∂h∂_function(model_name, @__DIR__, model_state_names,
parameter_info, p_ode_problem_names,
Expand Down
6 changes: 3 additions & 3 deletions src/Process_input/Observables/u0_h_sigma.jl
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ function create_σ_h_u0_file(model_name::String,
parameter_map, state_map, experimental_conditions)

# Dummary variables to keep PEtab importer happy even as we are not providing any PEtab files
model_SBML = SBMLImporter.ModelSBML()
model_SBML = SBMLImporter.ModelSBML("")

h_str = PEtab.create_h_function(model_name, @__DIR__, model_state_names, parameter_info,
p_ode_problem_names,
Expand Down Expand Up @@ -227,7 +227,7 @@ end
dir_model::String,
parameter_info::ParametersInfo,
p_ode_problem_names::Vector{String},
state_map,
state_map,
model_SBML;
inplace::Bool=true)
Expand Down Expand Up @@ -270,7 +270,7 @@ function create_u0_function(model_name::String,
# Write the formula for each initial condition to file
_model_state_names = [replace.(string.(state_map[i].first), "(t)" => "")
for i in eachindex(state_map)]
# If we create from Julia model (e.g.) Catalyst this is not applicable and step is skipped
# If we create from Julia model (e.g.) Catalyst this is not applicable and step is skipped
if !isempty(model_SBML.parameters)
model_state_names1 = filter(x -> x keys(model_SBML.species) &&
model_SBML.species[x].assignment_rule == false,
Expand Down
148 changes: 82 additions & 66 deletions src/Process_input/Table_input/Read_tables.jl
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,16 @@ function check_petab_files(experimental_conditions::CSV.File, measurements_data,
transformation_types = ["lin", "log", "log10"]
distribution_types = ["laplace", "normal"]
estimateTypes = [0, 1]
priorParameterTypes = ["uniform",
"normal",
"laplace",
"logNormal",
"logLaplace",
"parameterScaleUniform",
"parameterScaleNormal",
"parameterScaleLaplace"]
priorParameterTypes = [
"uniform",
"normal",
"laplace",
"logNormal",
"logLaplace",
"parameterScaleUniform",
"parameterScaleNormal",
"parameterScaleLaplace"
]

# Check experimental_conditions
columns_check = ["conditionId", "conditionName"]
Expand All @@ -106,74 +108,88 @@ function check_petab_files(experimental_conditions::CSV.File, measurements_data,
end

# Check measurements_data
columns_check = ["observableId",
"simulationConditionId",
"measurement",
"time",
"preequilibrationConditionId",
"observableParameters",
"noiseParameters",
"datasetId",
"replicateId"]
allowed_types = [string_types,
string_types,
number_types,
number_types,
string_types,
string_number_types,
string_number_types,
string_number_types,
string_number_types]
columns_check = [
"observableId",
"simulationConditionId",
"measurement",
"time",
"preequilibrationConditionId",
"observableParameters",
"noiseParameters",
"datasetId",
"replicateId"
]
allowed_types = [
string_types,
string_types,
number_types,
number_types,
string_types,
string_number_types,
string_number_types,
string_number_types,
string_number_types
]
required_columns = ["observableId", "simulationConditionId", "measurement", "time"]
check_df_columns(measurements_data, "measurements_data", columns_check, allowed_types,
required_columns)

# Check parameters_data
columns_check = ["parameterId",
"parameterScale",
"lowerBound",
"upperBound",
"nominalValue",
"estimate",
"parameterName",
"initializationPriorType",
"initializationPriorParameters",
"objectivePriorType",
"objectivePriorParameters"]
columns_check = [
"parameterId",
"parameterScale",
"lowerBound",
"upperBound",
"nominalValue",
"estimate",
"parameterName",
"initializationPriorType",
"initializationPriorParameters",
"objectivePriorType",
"objectivePriorParameters"
]
# Some models have missing parameters in their bounds even though it's mandatory, so we add Missing as an allowed DataType for these columns.
allowed_types = [string_types,
transformation_types,
number_types,
number_types,
number_types,
estimateTypes,
string_types,
priorParameterTypes,
string_types,
priorParameterTypes,
string_types]
required_columns = ["parameterId",
"parameterScale",
"lowerBound",
"upperBound",
"nominalValue",
"estimate"]
allowed_types = [
string_types,
transformation_types,
number_types,
number_types,
number_types,
estimateTypes,
string_types,
priorParameterTypes,
string_types,
priorParameterTypes,
string_types
]
required_columns = [
"parameterId",
"parameterScale",
"lowerBound",
"upperBound",
"nominalValue",
"estimate"
]
check_df_columns(parameters_data, "parameters_data", columns_check, allowed_types,
required_columns)

# Check observables_data
columns_check = ["observableId",
"observableFormula",
"noiseFormula",
"observableName",
"observableTransformation",
"noiseDistribution"]
allowed_types = [string_types,
string_types,
string_number_types,
string_types,
transformation_types,
distribution_types]
columns_check = [
"observableId",
"observableFormula",
"noiseFormula",
"observableName",
"observableTransformation",
"noiseDistribution"
]
allowed_types = [
string_types,
string_types,
string_number_types,
string_types,
transformation_types,
distribution_types
]
required_columns = ["observableId", "observableFormula", "noiseFormula"]
check_df_columns(observables_data, "observables_data", columns_check, allowed_types,
required_columns)
Expand Down
11 changes: 0 additions & 11 deletions test/Julia_import/Example.jl

This file was deleted.

Loading

0 comments on commit 3222155

Please sign in to comment.