From fbf96c5c1611e5ab1d878685cea6a1d525bc92b7 Mon Sep 17 00:00:00 2001 From: iburakov Date: Wed, 23 Aug 2023 17:44:52 +0000 Subject: [PATCH] Try to make baseline model less prone to overfitting Training history showed that (proper) val metrics (evaluated on a different synthesis tree) behave better with this model version. A clear val_loss minimum is now seen (and I'm overtraining the model). --- ml/synthesis/src/components/model_generation/models.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ml/synthesis/src/components/model_generation/models.py b/ml/synthesis/src/components/model_generation/models.py index 6f1d5dd29..8a75cda6a 100644 --- a/ml/synthesis/src/components/model_generation/models.py +++ b/ml/synthesis/src/components/model_generation/models.py @@ -5,16 +5,16 @@ def create_baseline_model(input_shape) -> Model: model = Sequential( [ layers.InputLayer(input_shape=input_shape), - layers.Dense(128, activation="relu"), - layers.Dense(64, activation="relu"), - layers.Dense(64, activation="relu"), - layers.Dense(32, activation="relu"), + layers.Dense(64, activation="relu", kernel_regularizer="l2"), + layers.Dropout(0.5), + layers.Dense(16, activation="relu"), + layers.Dense(16, activation="relu"), layers.Dense(1), ], ) model.compile( - optimizer=optimizers.Adam(learning_rate=1e-3), + optimizer=optimizers.Adam(learning_rate=1e-4), loss="mse", metrics=["mae"], )