-
Notifications
You must be signed in to change notification settings - Fork 27
Neural network #1188
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Neural network #1188
Changes from 4 commits
f6f8d8a
13d590c
fece842
3ca0225
9f89d9d
2790f3e
3a5b75e
ff41a68
80f3f15
2bfa670
85f1e44
f55a99a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,16 +1,17 @@ | ||
| use std::{iter::repeat, ops::Not}; | ||
| use std::{iter::{repeat, zip}, ops::Not}; | ||
|
|
||
| use futures::future::{try_join, try_join4, try_join5}; | ||
|
|
||
| use crate::{ | ||
| error::Error, | ||
| ff::boolean::Boolean, | ||
| protocol::{ | ||
| basics::mul::SecureMul, boolean::step::ThirtyTwoBitStep, context::Context, | ||
| basics::mul::SecureMul, boolean::{step::{ThirtyTwoBitStep, TwoHundredFiftySixBitOpStep}, NBitStep}, context::Context, | ||
| BooleanProtocols, RecordId, | ||
| }, | ||
| secret_sharing::{replicated::semi_honest::AdditiveShare, BitDecomposed, FieldSimd}, | ||
| }; | ||
| use super::{multiplication::integer_mul, addition_sequential::integer_add}; | ||
|
|
||
| async fn a_times_b_and_not_b<C, const N: usize>( | ||
| ctx: &C, | ||
|
|
@@ -158,18 +159,96 @@ where | |
| ])) | ||
| } | ||
|
|
||
| // Sigmoid( | ||
| // Sum(i = 1..N, neuron(i) in last layer activation times edge weight connecting that neuron to this) | ||
| // ) | ||
| // | ||
|
|
||
| // for i in 0..M-1 // For going through all layers | ||
| // for j in 0..N-1 // Current layer | ||
| // for k in 0..N-1 // For previous layer | ||
| // neuron(i*N + j) += neuron((i-1)*N + k) * edge_weight(neuron((i)*N + j), neuron((i-1)*N + k)) | ||
|
|
||
| // M' neurons wide and here M is M'/N, L layers tall | ||
| pub async fn neural_network<C, S, const M: usize, const N: usize, const MTimesN: usize>( | ||
| ctx: C, | ||
| last_layer_neurons: &[BitDecomposed<AdditiveShare<Boolean, N>>; M], | ||
|
||
| edge_weights: &[BitDecomposed<AdditiveShare<Boolean, N>>; M], | ||
|
||
| ) -> Result<BitDecomposed<AdditiveShare<Boolean, N>>, Error> | ||
| where | ||
| C: Context, | ||
| S: NBitStep, | ||
| Boolean: FieldSimd<N>, | ||
| AdditiveShare<Boolean, N>: BooleanProtocols<C, N>, | ||
| Boolean: FieldSimd<M>, | ||
| AdditiveShare<Boolean, M>: BooleanProtocols<C, M>, | ||
|
||
| { | ||
| // use super::step::MultiplicationStep as Step; | ||
| // for each layer we get M*M vector of edge_weights | ||
| let mut mults = ctx.parallel_join(zip(edge_weights.iter(), last_layer_neurons).enumerate().map(|(i, (edge_weight, neuron))| { | ||
|
||
| let ctx = ctx.narrow(&TwoHundredFiftySixBitOpStep::Bit(i)); | ||
| async move { | ||
| integer_mul::<_, S, N>( | ||
| ctx, | ||
| RecordId::FIRST, | ||
| &edge_weight, | ||
| &neuron, | ||
| ) | ||
| .await | ||
| } | ||
| })).await?; | ||
|
|
||
| let mut num = 0; | ||
| while mults.len() > 1 { | ||
| // Add each of the mults amongst themselves | ||
| for (a, b) in mults.iter().tuples() { | ||
| let (add_result, _) = integer_add::<_, S, N>( | ||
| ctx.narrow(&TwoHundredFiftySixBitOpStep::Bit(M+num)), | ||
| RecordId::from(num), | ||
| &a, | ||
| &b, | ||
| ) | ||
| .await?; | ||
| mults.push(add_result); | ||
| num += 1; | ||
| } | ||
|
|
||
| } | ||
|
||
| // now add the last N elements in 1 BitDecomposed | ||
| let mut one_cell = mults[0]; | ||
| while one_cell.len() > 1 { | ||
| let (left, right) = one_cell.split_at((one_cell.len()/2).try_into().unwrap()); | ||
| (one_cell, _) = integer_add::<_, S, N>( | ||
| ctx.narrow(&TwoHundredFiftySixBitOpStep::Bit(M+num)), | ||
| RecordId::FIRST, | ||
| &left, | ||
| &right, | ||
| ) | ||
| .await?; | ||
| num += 1; | ||
| } | ||
|
||
| sigmoid::<_, N>( | ||
| ctx.narrow(&TwoHundredFiftySixBitOpStep::Bit(M+num)), | ||
| RecordId::FIRST, | ||
| &one_cell, | ||
| ) | ||
| .await | ||
| } | ||
|
|
||
| #[cfg(all(test, unit_test))] | ||
| mod test { | ||
| use std::num::TryFromIntError; | ||
|
|
||
| use crate::{ | ||
| ff::{boolean_array::BA8, U128Conversions}, | ||
| protocol::{context::Context, ipa_prf::boolean_ops::sigmoid::sigmoid, RecordId}, | ||
| secret_sharing::{BitDecomposed, SharedValue, TransposeFrom}, | ||
| protocol::{context::Context, ipa_prf::boolean_ops::sigmoid::sigmoid, RecordId, boolean::step::DefaultBitStep}, | ||
| secret_sharing::{BitDecomposed, SharedValue, TransposeFrom, replicated::semi_honest::AdditiveShare}, | ||
| test_executor::run, | ||
| test_fixture::{Reconstruct, Runner, TestWorld}, | ||
| }; | ||
|
|
||
| use super::neural_network; | ||
|
|
||
| fn piecewise_linear_sigmoid_approximation(x: i128) -> Result<u128, TryFromIntError> { | ||
| Ok(match x { | ||
| i128::MIN..=-113 => 0, | ||
|
|
@@ -237,4 +316,33 @@ mod test { | |
| } | ||
| }); | ||
| } | ||
|
|
||
| #[test] | ||
| #[allow(clippy::cast_precision_loss)] | ||
| fn semi_honest_neural_network() { | ||
| run(|| async move { | ||
| let world = TestWorld::default(); | ||
|
|
||
| let edge_weights = (0..256).map(|i| BA8::truncate_from(u128::try_from(i).unwrap())); | ||
| let prev_neurons = (0..256).map(|i| BA8::truncate_from(u128::try_from(i).unwrap())); | ||
| let result = world | ||
| .upgraded_semi_honest((edge_weights, prev_neurons), |ctx, (edge_weights, prev_neurons)| async move { | ||
| let edge_weights1 = BitDecomposed::transposed_from(&edge_weights).unwrap(); | ||
| let prev_neurons1 = BitDecomposed::transposed_from(&prev_neurons).unwrap(); | ||
| let edge_weights = [edge_weights1.clone(), edge_weights1.clone(), edge_weights1.clone(), edge_weights1.clone(), edge_weights1.clone(), edge_weights1.clone(), edge_weights1.clone(), edge_weights1]; | ||
|
||
| let prev_neurons = [prev_neurons1.clone(), prev_neurons1.clone(), prev_neurons1.clone(), prev_neurons1.clone(), prev_neurons1.clone(), prev_neurons1.clone(), prev_neurons1.clone(), prev_neurons1]; | ||
| let result = neural_network::<_, DefaultBitStep, 8, 256, 2048>( | ||
| ctx.set_total_records(1), | ||
| &prev_neurons, | ||
| &edge_weights | ||
| ) | ||
| .await | ||
| .unwrap(); | ||
|
|
||
| // Vec::transposed_from(&result).unwrap() | ||
| }) | ||
| .await; | ||
|
|
||
| }); | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Are these comments in sync with the code?