Skip to content

Commit

Permalink
V0.13.0 examples (#147)
Browse files Browse the repository at this point in the history
* example updates

* rm jupyter content

* modified tests, added GaussianAdjoint

* rm available sensitivity test

* ...

* fixing FMIImport 1.0.6

* fixed juliacon23 example

* longer training for tests

* SciML code formatting

* minor modification to test net #2
  • Loading branch information
ThummeTo committed Sep 11, 2024
1 parent 322f028 commit 32ee3db
Show file tree
Hide file tree
Showing 38 changed files with 4,064 additions and 2,371 deletions.
14 changes: 14 additions & 0 deletions .github/workflows/Formatter.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
name: Format suggestions
on:
pull_request:
# this argument is not required if you don't use the `suggestion-label` input
types: [ opened, reopened, synchronize, labeled, unlabeled ]
jobs:
code-style:
runs-on: ubuntu-latest
steps:
- uses: julia-actions/julia-format@v3
with:
version: '1' # Set `version` to '1.0.54' if you need to use JuliaFormatter.jl v1.0.54 (default: '1')
suggestion-label: 'format-suggest' # leave this unset or empty to show suggestions for all PRs

66 changes: 33 additions & 33 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,33 +1,33 @@
name = "FMIFlux"
uuid = "fabad875-0d53-4e47-9446-963b74cae21f"
version = "0.13.0"

[deps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
DifferentiableEigen = "73a20539-4e65-4dcb-a56d-dc20f210a01b"
FMIImport = "9fcbc62e-52a0-44e9-a616-1359a0008194"
FMISensitivity = "3e748fe5-cd7f-4615-8419-3159287187d2"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431"

[weakdeps]
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"

[extensions]
JLD2Ext = ["JLD2"]

[compat]
Colors = "0.12"
DifferentiableEigen = "0.2.0"
FMIImport = "1.0.0"
FMISensitivity = "0.2.0"
Flux = "0.9 - 0.14"
Optim = "1.6"
OrdinaryDiffEq = "6.0"
Statistics = "1"
ThreadPools = "2.1"
julia = "1.6"
name = "FMIFlux"
uuid = "fabad875-0d53-4e47-9446-963b74cae21f"
version = "0.13.0"

[deps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
DifferentiableEigen = "73a20539-4e65-4dcb-a56d-dc20f210a01b"
FMIImport = "9fcbc62e-52a0-44e9-a616-1359a0008194"
FMISensitivity = "3e748fe5-cd7f-4615-8419-3159287187d2"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431"

[weakdeps]
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"

[extensions]
JLD2Ext = ["JLD2"]

[compat]
Colors = "0.12"
DifferentiableEigen = "0.2.0"
FMIImport = "1.0.6"
FMISensitivity = "0.2.0"
Flux = "0.9 - 0.14"
Optim = "1.6"
OrdinaryDiffEq = "6.0"
Statistics = "1"
ThreadPools = "2.1"
julia = "1.6"
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ You can evaluate FMUs inside of your loss function.
[![Run PkgEval](https://github.com/ThummeTo/FMIFlux.jl/actions/workflows/Eval.yml/badge.svg)](https://github.com/ThummeTo/FMIFlux.jl/actions/workflows/Eval.yml)
[![Coverage](https://codecov.io/gh/ThummeTo/FMIFlux.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/ThummeTo/FMIFlux.jl)
[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
[![FMIFlux Downloads](https://shields.io/endpoint?url=https://pkgs.genieframework.com/api/v1/badge/FMIFlux)](https://pkgs.genieframework.com?packages=FMIFlux)
[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle)

## How can I use FMIFlux.jl?

Expand Down
62 changes: 34 additions & 28 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,36 +3,38 @@
# Licensed under the MIT license. See LICENSE file in the project root for details.
#

import Pkg; Pkg.develop(path=joinpath(@__DIR__,"../../FMIFlux.jl"))
import Pkg;
Pkg.develop(path = joinpath(@__DIR__, "../../FMIFlux.jl"));
using Documenter, FMIFlux
using Documenter: GitHubActions

makedocs(sitename="FMIFlux.jl",
format = Documenter.HTML(
collapselevel = 1,
sidebar_sitename = false,
edit_link = nothing,
size_threshold_ignore = [joinpath("examples","juliacon_2023.md")]
),
warnonly=true,
pages= Any[
"Introduction" => "index.md"
"Examples" => [
"Overview" => "examples/overview.md"
"Simple CS-NeuralFMU" => "examples/simple_hybrid_CS.md"
"Simple ME-NeuralFMU" => "examples/simple_hybrid_ME.md"
"Growing Horizon ME-NeuralFMU" => "examples/growing_horizon_ME.md"
"JuliaCon 2023" => "examples/juliacon_2023.md"
"MDPI 2022" => "examples/mdpi_2022.md"
"Modelica Conference 2021" => "examples/modelica_conference_2021.md"
"Pluto Workshops" => "examples/workshops.md"
]
"FAQ" => "faq.md"
"Library Functions" => "library.md"
"Related Publication" => "related.md"
"Contents" => "contents.md"
]
)
makedocs(
sitename = "FMIFlux.jl",
format = Documenter.HTML(
collapselevel = 1,
sidebar_sitename = false,
edit_link = nothing,
size_threshold_ignore = [joinpath("examples", "juliacon_2023.md")],
),
warnonly = true,
pages = Any[
"Introduction" => "index.md"
"Examples" => [
"Overview" => "examples/overview.md"
"Simple CS-NeuralFMU" => "examples/simple_hybrid_CS.md"
"Simple ME-NeuralFMU" => "examples/simple_hybrid_ME.md"
"Growing Horizon ME-NeuralFMU" => "examples/growing_horizon_ME.md"
"JuliaCon 2023" => "examples/juliacon_2023.md"
"MDPI 2022" => "examples/mdpi_2022.md"
"Modelica Conference 2021" => "examples/modelica_conference_2021.md"
"Pluto Workshops" => "examples/workshops.md"
]
"FAQ" => "faq.md"
"Library Functions" => "library.md"
"Related Publication" => "related.md"
"Contents" => "contents.md"
],
)

function deployConfig()
github_repository = get(ENV, "GITHUB_REPOSITORY", "")
Expand All @@ -44,4 +46,8 @@ function deployConfig()
return GitHubActions(github_repository, github_event_name, github_ref)
end

deploydocs(repo = "github.com/ThummeTo/FMIFlux.jl.git", devbranch = "main", deploy_config = deployConfig())
deploydocs(
repo = "github.com/ThummeTo/FMIFlux.jl.git",
devbranch = "main",
deploy_config = deployConfig(),
)
3 changes: 2 additions & 1 deletion examples/jupyter-src/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
params/
*.png
*.png
*.gif
38 changes: 19 additions & 19 deletions examples/jupyter-src/juliacon_2023.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -65,7 +65,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -74,7 +74,7 @@
"using FMIFlux # for NeuralFMUs\n",
"using FMIZoo # a collection of demo models, including the VLDM\n",
"using FMIFlux.Flux # Machine Learning in Julia\n",
"using DifferentialEquations # for picking a NeuralFMU solver\n",
"using DifferentialEquations: Tsit5 # for picking a NeuralFMU solver\n",
"\n",
"import JLD2 # data format for saving/loading parameters\n",
"\n",
Expand All @@ -91,7 +91,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -108,7 +108,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -372,7 +372,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -408,7 +408,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -489,7 +489,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -552,7 +552,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -737,7 +737,7 @@
" params = FMIFlux.params(neuralFMU)\n",
"\n",
" # initialize the scheduler, keywords are passed to the NeuralFMU\n",
" initialize!(scheduler; parameters=data.params, p=params[1], showProgress=showProgress)\n",
" FMIFlux.initialize!(scheduler; parameters=data.params, p=params[1], showProgress=showProgress)\n",
" \n",
" # initialize Adam optimizer with our hyperparameters\n",
" optim = Adam(ETA, (BETA1, BETA2))\n",
Expand All @@ -747,16 +747,16 @@
" neuralFMU, # the neural FMU including the parameters to train\n",
" Iterators.repeated((), steps), # an iterator repeating `steps` times\n",
" optim; # the optimizer to train\n",
" gradient=:ForwardDiff, # currently, only ForwardDiff leads to good results for multi-event systems\n",
" chunk_size=32, # ForwardDiff chunk_size (=number of parameter estimations per run)\n",
" cb=() -> update!(scheduler), # update the scheduler after every step \n",
" gradient=:ReverseDiff, # ForwardDiff leads to good results for multi-event systems\n",
" chunk_size=32, # ForwardDiff chunk_size (=number of parameter estimations per run) - only if ForwardDiff is used\n",
" cb=() -> FMIFlux.update!(scheduler), # update the scheduler after every step \n",
" proceed_on_assert=true) # proceed, even if assertions are thrown, with the next step\n",
" \n",
" # the default execution mode\n",
" singleInstanceMode(fmu, false)\n",
"\n",
" # save our result parameters\n",
" fmiSaveParameters(neuralFMU, joinpath(@__DIR__, \"params\", \"$(ind).jld2\"))\n",
" FMIFlux.saveParameters(neuralFMU, joinpath(@__DIR__, \"params\", \"$(ind).jld2\"))\n",
" \n",
" # simulate the NeuralFMU on a validation trajectory\n",
" resultNFMU = neuralFMU(x0, (data_validation.consumption_t[1], data_validation.consumption_t[end]); parameters=data_validation.params, showProgress=showProgress, maxiters=1e7, saveat=data_validation.consumption_t)\n",
Expand Down Expand Up @@ -817,11 +817,11 @@
"neuralFMU = build_NFMU(fmu)\n",
"\n",
"# load parameters from hyperparameter optimization\n",
"loadParameters(neuralFMU, joinpath(@__DIR__, \"juliacon_2023.jld2\"))\n",
"FMIFlux.loadParameters(neuralFMU, joinpath(@__DIR__, \"juliacon_2023.jld2\"))\n",
"\n",
"# simulate and plot the NeuralFMU\n",
"resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
"resultFMU = fmiSimulate(fmu, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
"resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
"resultFMU = simulate(fmu, (tStart, tStop); parameters=data.params, showProgress=showProgress, saveat=tSave) \n",
"\n",
"# plot the NeuralFMU, original FMU and data (cumulative consumption)\n",
"fig = plot(resultNFMU; stateIndices=6:6, stateEvents=false, timeEvents=false, label=\"NeuralFMU\", ylabel=\"cumulative consumption [m/s]\")\n",
Expand Down Expand Up @@ -929,7 +929,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 33,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -957,7 +957,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 34,
"metadata": {},
"outputs": [],
"source": [
Expand Down
62 changes: 38 additions & 24 deletions examples/jupyter-src/juliacon_2023_distributedhyperopt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,34 +13,48 @@ using DistributedHyperOpt # add via `add "https://github.com/ThummeTo/Distribu
# ENV["JULIA_DEBUG"] = "DistributedHyperOpt"

nprocs()
workers = addprocs(5)
workers = addprocs(5)
@everywhere include(joinpath(@__DIR__, "workshop_module.jl"))

# creating paths for log files (logs), parameter sets (params) and hyperparameter plots (plots)
for dir ("logs", "params", "plots")
for dir ("logs", "params", "plots")
path = joinpath(@__DIR__, dir)
@info "Creating (if not already) path: $(path)"
mkpath(path)
end

beta1 = 1.0 .- exp10.(LinRange(-4,-1,4))
beta2 = 1.0 .- exp10.(LinRange(-6,-1,6))

sampler = DistributedHyperOpt.Hyperband(;R=81, η=3, ressourceScale=1.0/81.0*NODE_Training.data.cumconsumption_t[end])
optimization = DistributedHyperOpt.Optimization(NODE_Training.train!,
DistributedHyperOpt.Parameter("eta", (1e-5, 1e-2); type=:Log, samples=7, round_digits=5),
DistributedHyperOpt.Parameter("beta1", beta1),
DistributedHyperOpt.Parameter("beta2", beta2),
DistributedHyperOpt.Parameter("batchDur", (0.5, 20.0); samples=40, round_digits=1),
DistributedHyperOpt.Parameter("lastWeight", (0.1, 1.0); samples=10, round_digits=1),
DistributedHyperOpt.Parameter("schedulerID", [:Random, :Sequential, :LossAccumulation]),
DistributedHyperOpt.Parameter("loss", [:MSE, :MAE]) )
DistributedHyperOpt.optimize(optimization;
sampler=sampler,
plot=true,
plot_ressources=true,
save_plot=joinpath(@__DIR__, "plots", "hyperoptim.png"),
redirect_worker_io_dir=joinpath(@__DIR__, "logs"))

Plots.plot(optimization; size=(1024, 1024), ressources=true)
end

beta1 = 1.0 .- exp10.(LinRange(-4, -1, 4))
beta2 = 1.0 .- exp10.(LinRange(-6, -1, 6))

sampler = DistributedHyperOpt.Hyperband(;
R = 81,
η = 3,
ressourceScale = 1.0 / 81.0 * NODE_Training.data.cumconsumption_t[end],
)
optimization = DistributedHyperOpt.Optimization(
NODE_Training.train!,
DistributedHyperOpt.Parameter(
"eta",
(1e-5, 1e-2);
type = :Log,
samples = 7,
round_digits = 5,
),
DistributedHyperOpt.Parameter("beta1", beta1),
DistributedHyperOpt.Parameter("beta2", beta2),
DistributedHyperOpt.Parameter("batchDur", (0.5, 20.0); samples = 40, round_digits = 1),
DistributedHyperOpt.Parameter("lastWeight", (0.1, 1.0); samples = 10, round_digits = 1),
DistributedHyperOpt.Parameter("schedulerID", [:Random, :Sequential, :LossAccumulation]),
DistributedHyperOpt.Parameter("loss", [:MSE, :MAE]),
)
DistributedHyperOpt.optimize(
optimization;
sampler = sampler,
plot = true,
plot_ressources = true,
save_plot = joinpath(@__DIR__, "plots", "hyperoptim.png"),
redirect_worker_io_dir = joinpath(@__DIR__, "logs"),
)

Plots.plot(optimization; size = (1024, 1024), ressources = true)
minimum, minimizer, ressource = DistributedHyperOpt.results(optimization)
Loading

0 comments on commit 32ee3db

Please sign in to comment.