diff --git a/experiments/mnist.jl b/experiments/mnist.jl
index a3fbbaae3a3c8dc98b59c8e1825946e46227272a..627e1d25121ad2985a2481f157e5bb4bc5abb689 100644
--- a/experiments/mnist.jl
+++ b/experiments/mnist.jl
@@ -13,7 +13,7 @@ test_data = load_mnist_test()
 
 # Additional models:
 add_models = Dict(
-    :lenet5 => lenet5,
+    "LeNet-5" => lenet5,
 )
 
 # Default builder:
diff --git a/experiments/models/additional_models.jl b/experiments/models/additional_models.jl
index a5b354d6dcc94d63febada6dec42cffe9470f757..f6ed7451615c7e32bf72076756be65eb2abf3684 100644
--- a/experiments/models/additional_models.jl
+++ b/experiments/models/additional_models.jl
@@ -21,7 +21,6 @@ function MLJFlux.build(b::LeNetBuilder, rng, n_in, n_out)
 	k, c1, c2 = b.filter_size, b.channels1, b.channels2
 	mod(k, 2) == 1 || error("`filter_size` must be odd. ")
     p = div(k - 1, 2) # padding to preserve image size on convolution:
-    preproc(x) = reshape(x, (_n_in, _n_in, 1, :))
 
     # Model:
 	front = Flux.Chain(
@@ -37,7 +36,7 @@ function MLJFlux.build(b::LeNetBuilder, rng, n_in, n_out)
         Dense(120, 84, relu),
         Dense(84, n_out),
     )
-    chain = Flux.Chain(preproc, front, back)
+    chain = Flux.Chain(ECCCo.ToConv(_n_in), front, back)
 
 	return chain
 end
diff --git a/src/utils.jl b/src/utils.jl
index 968fb26e150d04fcfbc2ddccd9f6635074997ca4..87c4d0a73fea4adb5a8fbc61eca5fd2a839d6b93 100644
--- a/src/utils.jl
+++ b/src/utils.jl
@@ -1,5 +1,24 @@
+"""
+    pre_process(x; noise=0.03f0)
+
+Helper function to add tiny noise to inputs.
+"""
 function pre_process(x; noise::Float32=0.03f0)
     ϵ = Float32.(randn(size(x)) * noise)
     x += ϵ
     return x
+end
+
+"A simple functor to convert a vector to a convolutional layer."
+struct ToConv
+    n_in::Int
+end
+
+"""
+    (f::ToConv)(x)
+
+Method to convert a vector to a convolutional layer.
+"""
+function (f::ToConv)(x)
+    return reshape(x, (f.n_in, f.n_in, 1, :))
 end
\ No newline at end of file