Skip to content

Commit 1502e34

Browse files
author
Documenter.jl
committed
build based on 2518ce0
1 parent 9d49afc commit 1502e34

File tree

18 files changed

+1711
-1777
lines changed

18 files changed

+1711
-1777
lines changed

previews/PR262/examples/Thermal_Generation_Dispatch_Example/index.html

Lines changed: 149 additions & 149 deletions
Large diffs are not rendered by default.

previews/PR262/examples/autotuning-ridge/index.html

Lines changed: 159 additions & 159 deletions
Large diffs are not rendered by default.

previews/PR262/examples/chainrules_unit/index.html

Lines changed: 523 additions & 523 deletions
Large diffs are not rendered by default.

previews/PR262/examples/custom-relu.jl

Lines changed: 8 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ function matrix_relu(
3232
@variable(model, x[1:layer_size, 1:batch_size] >= 0)
3333
@objective(model, Min, x[:]'x[:] - 2y[:]'x[:])
3434
optimize!(model)
35-
return value.(x)
35+
return Float32.(value.(x))
3636
end
3737

3838
# Define the reverse differentiation rule, for the function we defined above.
@@ -76,13 +76,13 @@ m = Flux.Chain(
7676

7777
N = 1000 # batch size
7878
## Preprocessing train data
79-
imgs = MLDatasets.MNIST.traintensor(1:N)
80-
labels = MLDatasets.MNIST.trainlabels(1:N)
79+
imgs = MLDatasets.MNIST(; split = :train).features[:, :, 1:N]
80+
labels = MLDatasets.MNIST(; split = :train).targets[1:N]
8181
train_X = float.(reshape(imgs, size(imgs, 1) * size(imgs, 2), N)) # stack images
8282
train_Y = Flux.onehotbatch(labels, 0:9);
8383
## Preprocessing test data
84-
test_imgs = MLDatasets.MNIST.testtensor(1:N)
85-
test_labels = MLDatasets.MNIST.testlabels(1:N)
84+
test_imgs = MLDatasets.MNIST(; split = :test).features[:, :, 1:N]
85+
test_labels = MLDatasets.MNIST(; split = :test).targets[1:N];
8686
test_X = float.(reshape(test_imgs, size(test_imgs, 1) * size(test_imgs, 2), N))
8787
test_Y = Flux.onehotbatch(test_labels, 0:9);
8888

@@ -97,19 +97,12 @@ dataset = repeated((train_X, train_Y), epochs);
9797
# ## Network training
9898

9999
# training loss function, Flux optimizer
100-
custom_loss(x, y) = Flux.crossentropy(m(x), y)
101-
opt = Flux.Adam()
102-
evalcb = () -> @show(custom_loss(train_X, train_Y))
100+
custom_loss(m, x, y) = Flux.crossentropy(m(x), y)
101+
opt = Flux.setup(Flux.Adam(), m)
103102

104103
# Train to optimize network parameters
105104

106-
@time Flux.train!(
107-
custom_loss,
108-
Flux.params(m),
109-
dataset,
110-
opt,
111-
cb = Flux.throttle(evalcb, 5),
112-
);
105+
@time Flux.train!(custom_loss, m, dataset, opt);
113106

114107
# Although our custom implementation takes time, it is able to reach similar
115108
# accuracy as the usual ReLU function implementation.

previews/PR262/examples/custom-relu/index.html

Lines changed: 8 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
@variable(model, x[1:layer_size, 1:batch_size] >= 0)
1818
@objective(model, Min, x[:]'x[:] - 2y[:]'x[:])
1919
optimize!(model)
20-
return value.(x)
20+
return Float32.(value.(x))
2121
end</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">matrix_relu (generic function with 1 method)</code></pre><p>Define the reverse differentiation rule, for the function we defined above.</p><pre><code class="language-julia hljs">function ChainRulesCore.rrule(::typeof(matrix_relu), y::Matrix{T}) where {T}
2222
model = Model(() -&gt; DiffOpt.diff_optimizer(Ipopt.Optimizer))
2323
pv = matrix_relu(y; model = model)
@@ -53,44 +53,15 @@
5353
NNlib.softmax,
5454
) # Total: 4 arrays, 7_960 parameters, 31.297 KiB.</code></pre><h2 id="Prepare-data"><a class="docs-heading-anchor" href="#Prepare-data">Prepare data</a><a id="Prepare-data-1"></a><a class="docs-heading-anchor-permalink" href="#Prepare-data" title="Permalink"></a></h2><pre><code class="language-julia hljs">N = 1000 # batch size
5555
# Preprocessing train data
56-
imgs = MLDatasets.MNIST.traintensor(1:N)
57-
labels = MLDatasets.MNIST.trainlabels(1:N)
56+
imgs = MLDatasets.MNIST(; split = :train).features[:, :, 1:N]
57+
labels = MLDatasets.MNIST(; split = :train).targets[1:N]
5858
train_X = float.(reshape(imgs, size(imgs, 1) * size(imgs, 2), N)) # stack images
5959
train_Y = Flux.onehotbatch(labels, 0:9);
6060
# Preprocessing test data
61-
test_imgs = MLDatasets.MNIST.testtensor(1:N)
62-
test_labels = MLDatasets.MNIST.testlabels(1:N)
61+
test_imgs = MLDatasets.MNIST(; split = :test).features[:, :, 1:N]
62+
test_labels = MLDatasets.MNIST(; split = :test).targets[1:N];
6363
test_X = float.(reshape(test_imgs, size(test_imgs, 1) * size(test_imgs, 2), N))
64-
test_Y = Flux.onehotbatch(test_labels, 0:9);</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">┌ Warning: MNIST.traintensor() is deprecated, use `MNIST(split=:train).features` instead.
65-
└ @ MLDatasets ~/.julia/packages/MLDatasets/0MkOE/src/datasets/vision/mnist.jl:157
66-
┌ Warning: MNIST.trainlabels() is deprecated, use `MNIST(split=:train).targets` instead.
67-
└ @ MLDatasets ~/.julia/packages/MLDatasets/0MkOE/src/datasets/vision/mnist.jl:173
68-
┌ Warning: MNIST.testtensor() is deprecated, use `MNIST(split=:test).features` instead.
69-
└ @ MLDatasets ~/.julia/packages/MLDatasets/0MkOE/src/datasets/vision/mnist.jl:165
70-
┌ Warning: MNIST.testlabels() is deprecated, use `MNIST(split=:test).targets` instead.
71-
└ @ MLDatasets ~/.julia/packages/MLDatasets/0MkOE/src/datasets/vision/mnist.jl:180</code></pre><p>Define input data The original data is repeated <code>epochs</code> times because <code>Flux.train!</code> only loops through the data set once</p><pre><code class="language-julia hljs">epochs = 50 # ~1 minute (i7 8th gen with 16gb RAM)
64+
test_Y = Flux.onehotbatch(test_labels, 0:9);</code></pre><p>Define input data The original data is repeated <code>epochs</code> times because <code>Flux.train!</code> only loops through the data set once</p><pre><code class="language-julia hljs">epochs = 50 # ~1 minute (i7 8th gen with 16gb RAM)
7265
# epochs = 100 # leads to 77.8% in about 2 minutes
73-
dataset = repeated((train_X, train_Y), epochs);</code></pre><h2 id="Network-training"><a class="docs-heading-anchor" href="#Network-training">Network training</a><a id="Network-training-1"></a><a class="docs-heading-anchor-permalink" href="#Network-training" title="Permalink"></a></h2><p>training loss function, Flux optimizer</p><pre><code class="language-julia hljs">custom_loss(x, y) = Flux.crossentropy(m(x), y)
74-
opt = Flux.Adam()
75-
evalcb = () -&gt; @show(custom_loss(train_X, train_Y))</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">#11 (generic function with 1 method)</code></pre><p>Train to optimize network parameters</p><pre><code class="language-julia hljs">@time Flux.train!(
76-
custom_loss,
77-
Flux.params(m),
78-
dataset,
79-
opt,
80-
cb = Flux.throttle(evalcb, 5),
81-
);</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">┌ Warning: Layer with Float32 parameters got Float64 input.
82-
│ The input will be converted, but any earlier layers may be very slow.
83-
│ layer = Dense(10 =&gt; 10) # 110 parameters
84-
│ summary(x) = &quot;10×1000 Matrix{Float64}&quot;
85-
└ @ Flux ~/.julia/packages/Flux/hiqg1/src/layers/stateless.jl:60
86-
custom_loss(train_X, train_Y) = 2.355365f0
87-
custom_loss(train_X, train_Y) = 2.2240443f0
88-
custom_loss(train_X, train_Y) = 2.1510334f0
89-
custom_loss(train_X, train_Y) = 2.0600805f0
90-
custom_loss(train_X, train_Y) = 1.9604436f0
91-
custom_loss(train_X, train_Y) = 1.8702683f0
92-
custom_loss(train_X, train_Y) = 1.7790897f0
93-
custom_loss(train_X, train_Y) = 1.691865f0
94-
custom_loss(train_X, train_Y) = 1.610134f0
95-
custom_loss(train_X, train_Y) = 1.5316879f0
96-
106.215850 seconds (76.76 M allocations: 4.763 GiB, 1.44% gc time, 0.71% compilation time)</code></pre><p>Although our custom implementation takes time, it is able to reach similar accuracy as the usual ReLU function implementation.</p><h2 id="Accuracy-results"><a class="docs-heading-anchor" href="#Accuracy-results">Accuracy results</a><a id="Accuracy-results-1"></a><a class="docs-heading-anchor-permalink" href="#Accuracy-results" title="Permalink"></a></h2><p>Average of correct guesses</p><pre><code class="language-julia hljs">accuracy(x, y) = Statistics.mean(Flux.onecold(m(x)) .== Flux.onecold(y));</code></pre><p>Training accuracy</p><pre><code class="language-julia hljs">accuracy(train_X, train_Y)</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">0.562</code></pre><p>Test accuracy</p><pre><code class="language-julia hljs">accuracy(test_X, test_Y)</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">0.478</code></pre><p>Note that the accuracy is low due to simplified training. It is possible to increase the number of samples <code>N</code>, the number of epochs <code>epoch</code> and the connectivity <code>inner</code>.</p><hr/><p><em>This page was generated using <a href="https://github.com/fredrikekre/Literate.jl">Literate.jl</a>.</em></p></article><nav class="docs-footer"><a class="docs-footer-prevpage" href="../chainrules_unit/">« ChainRules integration demo: Relaxed Unit Commitment</a><a class="docs-footer-nextpage" href="../matrix-inversion-manual/">Differentiating a QP wrt a single variable »</a><div class="flexbox-break"></div><p class="footer-message">Powered by <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> and the <a href="https://julialang.org/">Julia Programming Language</a>.</p></nav></div><div class="modal" id="documenter-settings"><div class="modal-background"></div><div class="modal-card"><header class="modal-card-head"><p class="modal-card-title">Settings</p><button class="delete"></button></header><section class="modal-card-body"><p><label class="label">Theme</label><div class="select"><select id="documenter-themepicker"><option value="documenter-light">documenter-light</option><option value="documenter-dark">documenter-dark</option></select></div></p><hr/><p>This document was generated with <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> version 0.27.25 on <span class="colophon-date" title="Saturday 28 December 2024 09:26">Saturday 28 December 2024</span>. Using Julia version 1.11.2.</p></section><footer class="modal-card-foot"></footer></div></div></div></body></html>
66+
dataset = repeated((train_X, train_Y), epochs);</code></pre><h2 id="Network-training"><a class="docs-heading-anchor" href="#Network-training">Network training</a><a id="Network-training-1"></a><a class="docs-heading-anchor-permalink" href="#Network-training" title="Permalink"></a></h2><p>training loss function, Flux optimizer</p><pre><code class="language-julia hljs">custom_loss(m, x, y) = Flux.crossentropy(m(x), y)
67+
opt = Flux.setup(Flux.Adam(), m)</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">(layers = ((weight = Leaf(Adam(0.001, (0.9, 0.999), 1.0e-8), (Float32[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0], Float32[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0], (0.9, 0.999))), bias = Leaf(Adam(0.001, (0.9, 0.999), 1.0e-8), (Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], (0.9, 0.999))), σ = ()), (), (weight = Leaf(Adam(0.001, (0.9, 0.999), 1.0e-8), (Float32[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0], Float32[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0], (0.9, 0.999))), bias = Leaf(Adam(0.001, (0.9, 0.999), 1.0e-8), (Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], (0.9, 0.999))), σ = ()), ()),)</code></pre><p>Train to optimize network parameters</p><pre><code class="language-julia hljs">@time Flux.train!(custom_loss, m, dataset, opt);</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">103.973062 seconds (72.91 M allocations: 4.475 GiB, 1.44% gc time, 0.71% compilation time)</code></pre><p>Although our custom implementation takes time, it is able to reach similar accuracy as the usual ReLU function implementation.</p><h2 id="Accuracy-results"><a class="docs-heading-anchor" href="#Accuracy-results">Accuracy results</a><a id="Accuracy-results-1"></a><a class="docs-heading-anchor-permalink" href="#Accuracy-results" title="Permalink"></a></h2><p>Average of correct guesses</p><pre><code class="language-julia hljs">accuracy(x, y) = Statistics.mean(Flux.onecold(m(x)) .== Flux.onecold(y));</code></pre><p>Training accuracy</p><pre><code class="language-julia hljs">accuracy(train_X, train_Y)</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">0.562</code></pre><p>Test accuracy</p><pre><code class="language-julia hljs">accuracy(test_X, test_Y)</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">0.478</code></pre><p>Note that the accuracy is low due to simplified training. It is possible to increase the number of samples <code>N</code>, the number of epochs <code>epoch</code> and the connectivity <code>inner</code>.</p><hr/><p><em>This page was generated using <a href="https://github.com/fredrikekre/Literate.jl">Literate.jl</a>.</em></p></article><nav class="docs-footer"><a class="docs-footer-prevpage" href="../chainrules_unit/">« ChainRules integration demo: Relaxed Unit Commitment</a><a class="docs-footer-nextpage" href="../matrix-inversion-manual/">Differentiating a QP wrt a single variable »</a><div class="flexbox-break"></div><p class="footer-message">Powered by <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> and the <a href="https://julialang.org/">Julia Programming Language</a>.</p></nav></div><div class="modal" id="documenter-settings"><div class="modal-background"></div><div class="modal-card"><header class="modal-card-head"><p class="modal-card-title">Settings</p><button class="delete"></button></header><section class="modal-card-body"><p><label class="label">Theme</label><div class="select"><select id="documenter-themepicker"><option value="documenter-light">documenter-light</option><option value="documenter-dark">documenter-dark</option></select></div></p><hr/><p>This document was generated with <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> version 0.27.25 on <span class="colophon-date" title="Tuesday 14 January 2025 06:11">Tuesday 14 January 2025</span>. Using Julia version 1.11.2.</p></section><footer class="modal-card-foot"></footer></div></div></div></body></html>

previews/PR262/examples/matrix-inversion-manual/index.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,4 +68,4 @@
6868
0.0 * index(x[1]) - 1.0, # to indicate the direction vector to get directional derivatives
6969
)</code></pre><p>Note that <code>0.0 * index(x[1])</code> is used to make its type <code>typeof(0.0 * index(x[1]) - 1.0) &lt;: MOI.AbstractScalarFunction</code>. To indicate different direction to get directional derivative, users should replace <code>0.0 * index(x[1]) - 1.0</code> as the form of <code>dG*x - dh</code>, where <code>dG</code> and <code>dh</code> correspond to the elements of direction vectors along <code>G</code> and <code>h</code> axes, respectively.</p><p>Compute derivatives</p><pre><code class="language-julia hljs">DiffOpt.forward_differentiate!(model)</code></pre><p>Query derivative</p><pre><code class="language-julia hljs">dx = MOI.get.(model, DiffOpt.ForwardVariablePrimal(), x)</code></pre><pre class="documenter-example-output"><code class="nohighlight hljs ansi">2-element Vector{Float64}:
7070
0.2500000038571342
71-
0.7500000115714025</code></pre><hr/><p><em>This page was generated using <a href="https://github.com/fredrikekre/Literate.jl">Literate.jl</a>.</em></p></article><nav class="docs-footer"><a class="docs-footer-prevpage" href="../custom-relu/">« Custom ReLU layer</a><a class="docs-footer-nextpage" href="../nearest_correlation/">Nearest correlation »</a><div class="flexbox-break"></div><p class="footer-message">Powered by <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> and the <a href="https://julialang.org/">Julia Programming Language</a>.</p></nav></div><div class="modal" id="documenter-settings"><div class="modal-background"></div><div class="modal-card"><header class="modal-card-head"><p class="modal-card-title">Settings</p><button class="delete"></button></header><section class="modal-card-body"><p><label class="label">Theme</label><div class="select"><select id="documenter-themepicker"><option value="documenter-light">documenter-light</option><option value="documenter-dark">documenter-dark</option></select></div></p><hr/><p>This document was generated with <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> version 0.27.25 on <span class="colophon-date" title="Saturday 28 December 2024 09:26">Saturday 28 December 2024</span>. Using Julia version 1.11.2.</p></section><footer class="modal-card-foot"></footer></div></div></div></body></html>
71+
0.7500000115714025</code></pre><hr/><p><em>This page was generated using <a href="https://github.com/fredrikekre/Literate.jl">Literate.jl</a>.</em></p></article><nav class="docs-footer"><a class="docs-footer-prevpage" href="../custom-relu/">« Custom ReLU layer</a><a class="docs-footer-nextpage" href="../nearest_correlation/">Nearest correlation »</a><div class="flexbox-break"></div><p class="footer-message">Powered by <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> and the <a href="https://julialang.org/">Julia Programming Language</a>.</p></nav></div><div class="modal" id="documenter-settings"><div class="modal-background"></div><div class="modal-card"><header class="modal-card-head"><p class="modal-card-title">Settings</p><button class="delete"></button></header><section class="modal-card-body"><p><label class="label">Theme</label><div class="select"><select id="documenter-themepicker"><option value="documenter-light">documenter-light</option><option value="documenter-dark">documenter-dark</option></select></div></p><hr/><p>This document was generated with <a href="https://github.com/JuliaDocs/Documenter.jl">Documenter.jl</a> version 0.27.25 on <span class="colophon-date" title="Tuesday 14 January 2025 06:11">Tuesday 14 January 2025</span>. Using Julia version 1.11.2.</p></section><footer class="modal-card-foot"></footer></div></div></div></body></html>

0 commit comments

Comments
 (0)