diff --git a/experiments/experiment.jl b/experiments/experiment.jl
index f3388c04723f4b99bb5dc2db51e976250ebd5292..baf73b49845f7109dc29195860254bca3791fd37 100644
--- a/experiments/experiment.jl
+++ b/experiments/experiment.jl
@@ -91,7 +91,7 @@ function run_experiment(exper::Experiment; save_output::Bool=true, only_models::
 
     # Model tuning:
     if TUNE_MODEL
-        mach = tune_model(exper)
+        mach = tune_mlp(exper)
         return mach
     end
 
diff --git a/experiments/jobscripts/tuning/generators/tabular.sh b/experiments/jobscripts/tuning/generators/tabular.sh
index 8cf5e9666b41cdcb0091109a2bcf5f14a18970c2..6769aebdea7ea30c70d635b4b649ce7055a73d5c 100644
--- a/experiments/jobscripts/tuning/generators/tabular.sh
+++ b/experiments/jobscripts/tuning/generators/tabular.sh
@@ -2,10 +2,10 @@
 
 #SBATCH --job-name="Grid-search Tabular (ECCCo)"
 #SBATCH --time=06:00:00
-#SBATCH --ntasks=1000
+#SBATCH --ntasks=100
 #SBATCH --cpus-per-task=1
 #SBATCH --partition=compute
-#SBATCH --mem-per-cpu=4GB
+#SBATCH --mem-per-cpu=8GB
 #SBATCH --account=research-eemcs-insy
 #SBATCH --mail-type=END     # Set mail type to 'END' to receive a mail when the job finishes. 
 
diff --git a/experiments/mnist.jl b/experiments/mnist.jl
index 12cc30f3223c32d81390a571ef65947b0aa44fa4..965b68c2148e81a0c2ab53d8797ff8605a25a454 100644
--- a/experiments/mnist.jl
+++ b/experiments/mnist.jl
@@ -28,7 +28,7 @@ add_models = Dict(
 # Parameter choices:
 params = (
     n_individuals=N_IND_SPECIFIED ? N_IND : 10,
-    builder=default_builder(n_hidden=128, n_layers=2, activation=Flux.swish),
+    builder=default_builder(n_hidden=128, n_layers=1, activation=Flux.swish),
     𝒟x=Uniform(-1.0, 1.0),
     α=[1.0, 1.0, 1e-2],
     sampling_batch_size=10,
diff --git a/experiments/model_tuning.jl b/experiments/model_tuning.jl
index 41efa91cb3d2c8d4052a2e0ef6980ab1e214774b..dfabfbaa0111dd98e0082675f0272b20a2fa38bf 100644
--- a/experiments/model_tuning.jl
+++ b/experiments/model_tuning.jl
@@ -6,11 +6,11 @@ Output path for tuned model.
 tuned_model_path(exper::Experiment) = joinpath(exper.output_path, "tuned_model")
 
 """
-    tune_model(exper::Experiment; kwargs...)
+    tune_mlp(exper::Experiment; kwargs...)
 
 Tunes MLP in place and saves the tuned model to disk.
 """
-function tune_model(exper::Experiment; kwargs...)
+function tune_mlp(exper::Experiment; kwargs...)
     if !(is_multi_processed(exper) && MPI.Comm_rank(exper.parallelizer.comm) != 0)
         @info "Tuning models."
         # Output path:
@@ -28,7 +28,7 @@ function tune_model(exper::Experiment; kwargs...)
         X, y, _ = prepare_data(exper::Experiment)
         # Tune model:
         measure = collect(values(exper.model_measures))
-        mach = tune_model(model, X, y; tuning_params=exper.model_tuning_params, measure=measure, kwargs...)
+        mach = tune_mlp(model, X, y; tuning_params=exper.model_tuning_params, measure=measure, kwargs...)
         # Machine is still on GPU, save CPU version of model:
         best_results = fitted_params(mach)
         Serialization.serialize(joinpath(model_tuning_path, "$(exper.save_name)_best_mlp.jls"), best_results)
@@ -39,11 +39,11 @@ function tune_model(exper::Experiment; kwargs...)
 end
 
 """
-    tune_model(mod::Supervised, X, y; tuning_params::NamedTuple, kwargs...)
+    tune_mlp(mod::Supervised, X, y; tuning_params::NamedTuple, kwargs...)
 
 Tunes a model by performing a grid search over the parameters specified in `tuning_params`.
 """
-function tune_model(
+function tune_mlp(
     model::Supervised, X, y; 
     tuning_params::NamedTuple,
     measure::Vector=MODEL_MEASURE_VEC,