From f6fee64990bb9bd10c40745cf134b71f1b80d426 Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Sun, 8 Jan 2023 10:31:46 -0500 Subject: [PATCH 1/3] export rand32, zeros32, ones32 --- src/Flux.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Flux.jl b/src/Flux.jl index 3b4b4b9237..fa1fa26307 100644 --- a/src/Flux.jl +++ b/src/Flux.jl @@ -24,7 +24,7 @@ export Chain, Dense, Embedding, Maxout, SkipConnection, Parallel, PairwiseFusion AdaptiveMaxPool, AdaptiveMeanPool, GlobalMaxPool, GlobalMeanPool, MaxPool, MeanPool, Dropout, AlphaDropout, LayerNorm, BatchNorm, InstanceNorm, GroupNorm, Upsample, PixelShuffle, - fmap, cpu, gpu, f32, f64, + fmap, cpu, gpu, f32, f64, rand32, zeros32, ones32, testmode!, trainmode! include("optimise/Optimise.jl") From bcb82438b4b644ac21c8fa4018664493b14a01fd Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Sun, 8 Jan 2023 10:33:18 -0500 Subject: [PATCH 2/3] Update train.jl --- src/train.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/train.jl b/src/train.jl index a18b1db59e..90bec2534b 100644 --- a/src/train.jl +++ b/src/train.jl @@ -27,7 +27,7 @@ It differs from `Optimisers.setup` in that it: # Example ```jldoctest -julia> model = Dense(2=>1, leakyrelu; init=Flux.ones32); +julia> model = Dense(2=>1, leakyrelu; init=ones32); julia> opt_state = Flux.setup(Momentum(0.1), model) # this encodes the optimiser and its state (weight = Leaf(Momentum{Float64}(0.1, 0.9), Float32[0.0 0.0]), bias = Leaf(Momentum{Float64}(0.1, 0.9), Float32[0.0]), σ = ()) From 57cdbe65317ff0a1c83f2495e843d68ddb575a8b Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Sun, 8 Jan 2023 10:34:52 -0500 Subject: [PATCH 3/3] also randn32 --- src/Flux.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Flux.jl b/src/Flux.jl index fa1fa26307..afa47f6fc0 100644 --- a/src/Flux.jl +++ b/src/Flux.jl @@ -24,7 +24,7 @@ export Chain, Dense, Embedding, Maxout, SkipConnection, Parallel, PairwiseFusion AdaptiveMaxPool, AdaptiveMeanPool, GlobalMaxPool, GlobalMeanPool, MaxPool, MeanPool, Dropout, AlphaDropout, LayerNorm, BatchNorm, InstanceNorm, GroupNorm, Upsample, PixelShuffle, - fmap, cpu, gpu, f32, f64, rand32, zeros32, ones32, + fmap, cpu, gpu, f32, f64, rand32, randn32, zeros32, ones32, testmode!, trainmode! include("optimise/Optimise.jl")