Skip to content

Commit e766d40

Browse files
authored
v0.10.5 (#93)
* fix for documentatio script / examples * modified tutotial * workshop enhanced * tutorial * final workshop * tutorial juliacon
1 parent f8f26eb commit e766d40

File tree

7 files changed

+2177
-236
lines changed

7 files changed

+2177
-236
lines changed

.github/workflows/Example.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ jobs:
4444
env:
4545
FILE: examples/src/${{ matrix.file-name }}.ipynb
4646
run: |
47-
jupyter nbconvert --ExecutePreprocessor.kernel_name="julia-1.8" --to notebook --inplace --execute ${{ env.FILE }}
47+
jupyter nbconvert --ExecutePreprocessor.kernel_name="julia-1.9" --to notebook --inplace --execute ${{ env.FILE }}
4848
jupyter nbconvert --to script ${{ env.FILE }}
4949
jupyter nbconvert --to markdown ${{ env.FILE }}
5050

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
name = "FMIFlux"
22
uuid = "fabad875-0d53-4e47-9446-963b74cae21f"
3-
version = "0.10.4"
3+
version = "0.10.5"
44

55
[deps]
66
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
-606 Bytes
Loading

examples/src/.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
params/
2-
*.png
2+
*.png
3+
*.gif

examples/src/juliacon_2023.ipynb

Lines changed: 2108 additions & 163 deletions
Large diffs are not rendered by default.

examples/src/juliacon_2023_distributedhyperopt.jl

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,9 @@ nprocs()
1616
workers = addprocs(5)
1717
@everywhere include(joinpath(@__DIR__, "workshop_module.jl"))
1818

19-
# set the current hyperparameter optimization run
20-
@everywhere NODE_Training.HPRUN = 1
21-
@info "Run: $(NODE_Training.HPRUN)"
22-
2319
# creating paths for log files (logs), parameter sets (params) and hyperparameter plots (plots)
24-
for dir ("logs", "params", "plots", "results")
25-
path = joinpath(@__DIR__, dir, "$(NODE_Training.HPRUN)")
20+
for dir ("logs", "params", "plots")
21+
path = joinpath(@__DIR__, dir)
2622
@info "Creating (if not already) path: $(path)"
2723
mkpath(path)
2824
end
@@ -43,8 +39,8 @@ DistributedHyperOpt.optimize(optimization;
4339
sampler=sampler,
4440
plot=true,
4541
plot_ressources=true,
46-
save_plot=joinpath(@__DIR__, "plots", "$(NODE_Training.HPRUN)", "hyperoptim.png"),
47-
redirect_worker_io_dir=joinpath(@__DIR__, "logs", "$(NODE_Training.HPRUN)"))
42+
save_plot=joinpath(@__DIR__, "plots", "hyperoptim.png"),
43+
redirect_worker_io_dir=joinpath(@__DIR__, "logs"))
4844

4945
Plots.plot(optimization; size=(1024, 1024), ressources=true)
5046
minimum, minimizer, ressource = DistributedHyperOpt.results(optimization)

examples/src/juliacon_2023_helpers.jl

Lines changed: 61 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,32 @@
55
using LaTeXStrings
66

77
import FMIFlux: roundToLength
8-
import FMIZoo:movavg
8+
import FMIZoo: movavg
99

1010
import FMI: FMU2Solution
1111
import FMIZoo: VLDM, VLDM_Data
1212

13-
function plotANNError(neuralFMU::NeuralFMU, data::FMIZoo.VLDM_Data; reductionFactor::Int=10, field=:consumption, mov_avg::Int=100, filename=nothing)
13+
function fmiSingleInstanceMode(fmu::FMU2, mode::Bool)
14+
if mode
15+
# switch to a more efficient execution configuration, allocate only a single FMU instance, see:
16+
# https://thummeto.github.io/FMI.jl/dev/features/#Execution-Configuration
17+
fmu.executionConfig = FMI.FMIImport.FMU2_EXECUTION_CONFIGURATION_NOTHING
18+
c, _ = FMIFlux.prepareSolveFMU(fmu, nothing, fmu.type, true, false, false, false, true, data.params; x0=x0)
19+
else
20+
c = FMI.getCurrentComponent(fmu)
21+
# switch back to the default execution configuration, allocate a new FMU instance for every run, see:
22+
# https://thummeto.github.io/FMI.jl/dev/features/#Execution-Configuration
23+
fmu.executionConfig = FMI.FMIImport.FMU2_EXECUTION_CONFIGURATION_NO_RESET
24+
FMIFlux.finishSolveFMU(fmu, c, false, true)
25+
end
26+
return nothing
27+
end
28+
29+
function dataIndexForTime(t::Real)
30+
return 1+round(Int, t/dt)
31+
end
32+
33+
function plotEnhancements(neuralFMU::NeuralFMU, fmu::FMU2, data::FMIZoo.VLDM_Data; reductionFactor::Int=10, mov_avg::Int=100, filename=nothing)
1434
colorMin = 0
1535
colorMax = 0
1636
okregion = 0
@@ -19,7 +39,8 @@ function plotANNError(neuralFMU::NeuralFMU, data::FMIZoo.VLDM_Data; reductionFac
1939
tStart = data.consumption_t[1]
2040
tStop = data.consumption_t[end]
2141
x0 = FMIZoo.getStateVector(data, tStart)
22-
result = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=true, recordValues=:derivatives)
42+
resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=false, recordValues=:derivatives, saveat=data.consumption_t)
43+
resultFMU = fmiSimulate(fmu, (tStart, tStop); parameters=data.params, showProgress=false, recordValues=:derivatives, saveat=data.consumption_t)
2344

2445
# Finite differences for acceleration
2546
dt = data.consumption_t[2]-data.consumption_t[1]
@@ -28,40 +49,38 @@ function plotANNError(neuralFMU::NeuralFMU, data::FMIZoo.VLDM_Data; reductionFac
2849
acceleration_dev = (data.speed_dev[2:end] - data.speed_dev[1:end-1]) / dt
2950
acceleration_dev = [acceleration_dev..., 0.0]
3051

31-
ANNInputs = fmiGetSolutionValue(result, :derivatives) # collect([0.0, 0.0, 0.0, data.speed_val[i], acceleration_val[i], data.consumption_val[i]] for i in 1:length(data.consumption_t))
52+
ANNInputs = fmiGetSolutionValue(resultNFMU, :derivatives) # collect([0.0, 0.0, 0.0, data.speed_val[i], acceleration_val[i], data.consumption_val[i]] for i in 1:length(data.consumption_t))
3253
ANNInputs = collect([ANNInputs[1][i], ANNInputs[2][i], ANNInputs[3][i], ANNInputs[4][i], ANNInputs[5][i], ANNInputs[6][i]] for i in 1:length(ANNInputs[1]))
33-
ANNOutputs = fmiGetSolutionDerivative(result, 5:6; isIndex=true)
54+
55+
ANNOutputs = fmiGetSolutionDerivative(resultNFMU, 5:6; isIndex=true)
3456
ANNOutputs = collect([ANNOutputs[1][i], ANNOutputs[2][i]] for i in 1:length(ANNOutputs[1]))
3557

36-
ANN_error = nothing
37-
38-
if field == :consumption
39-
ANN_consumption = collect(o[2] for o in ANNOutputs)
40-
ANN_error = ANN_consumption - data.consumption_val
41-
ANN_error = collect(ANN_error[i] > 0.0 ? max(0.0, ANN_error[i]-data.consumption_dev[i]) : min(0.0, ANN_error[i]+data.consumption_dev[i]) for i in 1:length(data.consumption_t))
42-
43-
label = L"consumption [W]"
44-
colorMin=-610.0
45-
colorMax=610.0
46-
else # :acceleration
47-
ANN_acceleration = collect(o[1] for o in ANNOutputs)
48-
ANN_error = ANN_acceleration - acceleration_val
49-
ANN_error = collect(ANN_error[i] > 0.0 ? max(0.0, ANN_error[i]-acceleration_dev[i]) : min(0.0, ANN_error[i]+acceleration_dev[i]) for i in 1:length(data.consumption_t))
50-
51-
label = L"acceleration [m/s^2]"
52-
colorMin=-0.04
53-
colorMax=0.04
54-
end
58+
FMUOutputs = fmiGetSolutionDerivative(resultFMU, 5:6; isIndex=true)
59+
FMUOutputs = collect([FMUOutputs[1][i], FMUOutputs[2][i]] for i in 1:length(FMUOutputs[1]))
5560

61+
ANN_consumption = collect(o[2] for o in ANNOutputs)
62+
ANN_error = ANN_consumption - data.consumption_val
63+
ANN_error = collect(ANN_error[i] > 0.0 ? max(0.0, ANN_error[i]-data.consumption_dev[i]) : min(0.0, ANN_error[i]+data.consumption_dev[i]) for i in 1:length(data.consumption_t))
64+
65+
FMU_consumption = collect(o[2] for o in FMUOutputs)
66+
FMU_error = FMU_consumption - data.consumption_val
67+
FMU_error = collect(FMU_error[i] > 0.0 ? max(0.0, FMU_error[i]-data.consumption_dev[i]) : min(0.0, FMU_error[i]+data.consumption_dev[i]) for i in 1:length(data.consumption_t))
68+
69+
colorMin=-231.0
70+
colorMax=231.0
71+
72+
FMU_error = movavg(FMU_error, mov_avg)
5673
ANN_error = movavg(ANN_error, mov_avg)
74+
75+
ANN_error = ANN_error .- FMU_error
5776

5877
ANNInput_vel = collect(o[4] for o in ANNInputs)
5978
ANNInput_acc = collect(o[5] for o in ANNInputs)
6079
ANNInput_con = collect(o[6] for o in ANNInputs)
6180

6281
_max = max(ANN_error...)
6382
_min = min(ANN_error...)
64-
neutral = -colorMin/(colorMax-colorMin) # -_min/(_max-_min)
83+
neutral = 0.5
6584

6685
if _max > colorMax
6786
@warn "max value ($(_max)) is larger than colorMax ($(colorMax)) - values will be cut"
@@ -71,31 +90,27 @@ function plotANNError(neuralFMU::NeuralFMU, data::FMIZoo.VLDM_Data; reductionFac
7190
@warn "min value ($(_min)) is smaller than colorMin ($(colorMin)) - values will be cut"
7291
end
7392

74-
ANN_error = collect(min(max(e, colorMin), colorMax) for e in ANN_error)
75-
76-
@info "$(_min) $(_max) $(neutral)"
77-
7893
anim = @animate for ang in 0:5:360
7994
l = Plots.@layout [Plots.grid(3,1) r{0.85w}]
8095
fig = Plots.plot(layout=l, size=(1600,800), left_margin = 10Plots.mm, right_margin = 10Plots.mm, bottom_margin = 10Plots.mm)
81-
82-
colorgrad = cgrad([:orange, :white, :blue], [0.0, 0.5, 1.0]) # , scale = :log)
83-
96+
97+
colorgrad = cgrad([:green, :white, :red], [0.0, 0.5, 1.0]) # , scale = :log)
98+
8499
scatter!(fig[1], ANNInput_vel[1:reductionFactor:end], ANNInput_acc[1:reductionFactor:end],
85-
xlabel=L"velocity [m/s]", ylabel=L"acceleration [m/s^2]",
100+
xlabel="velocity [m/s]", ylabel="acceleration [m/s^2]",
86101
color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], label=:none, colorbar=:none) #
87-
102+
88103
scatter!(fig[2], ANNInput_acc[1:reductionFactor:end], ANNInput_con[1:reductionFactor:end],
89-
xlabel=L"acceleration [m/s^2]", ylabel=L"consumption [W]",
104+
xlabel="acceleration [m/s^2]", ylabel="consumption [W]",
90105
color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], label=:none, colorbar=:none) #
91-
106+
92107
scatter!(fig[3], ANNInput_vel[1:reductionFactor:end], ANNInput_con[1:reductionFactor:end],
93-
xlabel=L"velocity [m/s]", ylabel=L"consumption [W]",
108+
xlabel="velocity [m/s]", ylabel="consumption [W]",
94109
color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], label=:none, colorbar=:none) #
95-
110+
96111
scatter!(fig[4], ANNInput_vel[1:reductionFactor:end], ANNInput_acc[1:reductionFactor:end], ANNInput_con[1:reductionFactor:end],
97-
xlabel=L"velocity [m/s]", ylabel=L"acceleration [m/s^2]", zlabel=L"consumption [W]",
98-
color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], markersize=8, label=:none, camera=(ang,20), colorbar_title=" \n\n\n\n" * L"Δ" * label * " (smoothed)") #
112+
xlabel="velocity [m/s]", ylabel="acceleration [m/s^2]", zlabel="consumption [W]",
113+
color=colorgrad, zcolor=ANN_error[1:reductionFactor:end], markersize=8, label=:none, camera=(ang,20), colorbar_title=" \n\n\n\n" * L"ΔMAE" * " (smoothed)")
99114

100115
# draw invisible dummys to scale colorbar to fixed size
101116
for i in 1:3
@@ -110,7 +125,11 @@ function plotANNError(neuralFMU::NeuralFMU, data::FMIZoo.VLDM_Data; reductionFac
110125
end
111126
end
112127

113-
return gif(anim, filename; fps=10)
128+
if !isnothing(filename)
129+
return gif(anim, filename; fps=10)
130+
else
131+
return gif(anim; fps=10)
132+
end
114133
end
115134

116135
function plotCumulativeConsumption(solutionNFMU::FMU2Solution, solutionFMU::FMU2Solution, data::FMIZoo.VLDM_Data; range=(0.0,1.0), filename=nothing)
@@ -159,24 +178,4 @@ function simPlotCumulativeConsumption(cycle::Symbol, filename=nothing; kwargs...
159178
savefig(fig, filename)
160179
end
161180
return fig
162-
end
163-
164-
function checkMSE(cycle; init::Bool=false)
165-
166-
data = FMIZoo.VLDM(cycle)
167-
tStart = data.consumption_t[1]
168-
tStop = data.consumption_t[end]
169-
tSave = data.consumption_t
170-
171-
if init
172-
c = FMI.FMIImport.getCurrentComponent(fmu)
173-
FMI.FMIImport.fmi2SetFMUstate(c, batch[1].initialState)
174-
c.eventInfo = deepcopy(batch[1].initialEventInfo)
175-
c.t = batch[1].tStart
176-
end
177-
resultNFMU = neuralFMU(x0, (tStart, tStop); parameters=data.params, showProgress=true, maxiters=1e7, saveat=tSave)
178-
179-
mse_NFMU = FMIFlux.Losses.mse_dev(data.cumconsumption_val, fmiGetSolutionState(resultNFMU, 6; isIndex=true), data.cumconsumption_dev)
180-
181-
return mse_NFMU
182-
end
181+
end

0 commit comments

Comments
 (0)