diff --git a/Hacl.fst.config.json b/Hacl.fst.config.json index 24880e48bb..2ee225e4c0 100644 --- a/Hacl.fst.config.json +++ b/Hacl.fst.config.json @@ -19,6 +19,7 @@ "specs/drbg", "specs/frodo", "specs/tests/p256", + "code/gf128", "code/hash", "code/hmac", "code/hkdf", diff --git a/Makefile b/Makefile index 5d8a8c7ac3..b7f122097a 100644 --- a/Makefile +++ b/Makefile @@ -698,7 +698,9 @@ INTRINSIC_FLAGS = \ -add-include 'Hacl_SHA2_Vec256.c:"libintvector.h"' \ \ -add-include 'Hacl_Hash_Blake2b_Simd256:"libintvector.h"' \ - -add-include 'Hacl_MAC_Poly1305_Simd256:"libintvector.h"' + -add-include 'Hacl_MAC_Poly1305_Simd256:"libintvector.h"' \ + \ + -add-include 'Hacl_Gf128_NI:"libintvector.h"' # Disabled for distributions that don't include code based on intrinsics. INTRINSIC_INT_FLAGS = \ @@ -760,7 +762,8 @@ BUNDLE_FLAGS =\ $(INTTYPES_128_BUNDLE) \ $(RSAPSS_BUNDLE) \ $(FFDHE_BUNDLE) \ - $(LEGACY_BUNDLE) + $(LEGACY_BUNDLE) \ + $(GF128_BUNDLE) DEFAULT_FLAGS = \ $(HAND_WRITTEN_LIB_FLAGS) \ @@ -772,6 +775,8 @@ DEFAULT_FLAGS = \ $(REQUIRED_FLAGS) \ $(TARGET_H_INCLUDE) +IGNORE_GF128_BUNDLE = -bundle Hacl.Impl.Gf128.*,Hacl.Gf128.* + # WASM distribution # ----------------- # @@ -830,6 +835,9 @@ dist/wasm/Makefile.basic: POLY_BUNDLE = \ -bundle 'Hacl.Poly1305_128,Hacl.Poly1305_256,Hacl.Impl.Poly1305.*' \ -bundle 'Hacl.Streaming.Poly1305_128,Hacl.Streaming.Poly1305_256' +# Disabling GF128 +dist/wasm/Makefile.basic: GF128_BUNDLE = $(IGNORE_GF128_BUNDLE) + dist/wasm/Makefile.basic: CTR_BUNDLE = dist/wasm/Makefile.basic: RSAPSS_BUNDLE = -bundle Hacl.RSAPSS,Hacl.Impl.RSAPSS.*,Hacl.Impl.RSAPSS dist/wasm/Makefile.basic: FFDHE_BUNDLE = -bundle Hacl.FFDHE,Hacl.Impl.FFDHE.*,Hacl.Impl.FFDHE diff --git a/Makefile.common b/Makefile.common index 4ea25040f4..05dc1d1afe 100644 --- a/Makefile.common +++ b/Makefile.common @@ -225,6 +225,7 @@ BIGNUM_BUNDLE= \ -bundle Hacl.Bignum.Base,Hacl.Bignum.Addition,Hacl.Bignum.Convert,Hacl.Bignum.Lib,Hacl.Bignum.Multiplication[rename=Hacl_Bignum_Base] \ -static-header Hacl.Bignum.Base,Hacl.Bignum.Addition,Hacl.Bignum.Convert,Hacl.Bignum.Lib,Hacl.Bignum.Multiplication \ -bundle Hacl.Bignum,Hacl.Bignum.*[rename=Hacl_Bignum] +GF128_BUNDLE=-bundle Hacl.Gf128.NI=Hacl.Impl.Gf128.FieldNI # 3. OCaml diff --git a/Makefile.include b/Makefile.include index 07cba37dc9..57e8e1a95e 100644 --- a/Makefile.include +++ b/Makefile.include @@ -14,7 +14,8 @@ VALE_BUNDLES=\ LIB_DIR = $(HACL_HOME)/lib SPECS_DIR = $(HACL_HOME)/specs $(addprefix $(HACL_HOME)/specs/,lemmas tests drbg frodo tests/p256) CODE_DIRS = $(addprefix $(HACL_HOME)/code/,hash hmac hkdf drbg hpke sha3 sha2-mb ecdsap256 poly1305 streaming \ - blake2 chacha20 chacha20poly1305 curve25519 tests ed25519 salsa20 nacl-box meta frodo fallback bignum rsapss ffdhe k256) + blake2 chacha20 chacha20poly1305 curve25519 tests ed25519 salsa20 nacl-box meta frodo fallback bignum rsapss ffdhe k256 \ + gf128) EVERCRYPT_DIRS = $(addprefix $(HACL_HOME)/providers/,evercrypt evercrypt/fst test test/vectors) # Vale dirs also include directories that only contain .vaf files # (for a in $(find vale -iname '*.fst' -or -iname '*.fsti' -or -iname '*.vaf'); do dirname $a; done) | sort | uniq diff --git a/code/gf128/AUTHORS.md b/code/gf128/AUTHORS.md new file mode 100644 index 0000000000..f5d703ec9c --- /dev/null +++ b/code/gf128/AUTHORS.md @@ -0,0 +1 @@ +This code was primarily written by Karthikeyan Bhargavan (INRIA). diff --git a/code/gf128/Hacl.Gf128.NI.fst b/code/gf128/Hacl.Gf128.NI.fst new file mode 100644 index 0000000000..7121fac6a4 --- /dev/null +++ b/code/gf128/Hacl.Gf128.NI.fst @@ -0,0 +1,51 @@ +module Hacl.Gf128.NI + +open FStar.HyperStack +open FStar.HyperStack.All + +open Lib.IntTypes +open Lib.Buffer +open Lib.IntVector + +open Hacl.Impl.Gf128.Fields +open Hacl.Impl.Gf128.Generic + +module ST = FStar.HyperStack.ST +module Vec = Hacl.Spec.GF128.Vec + +#reset-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 1" + +inline_for_extraction noextract +let gcm_ctx_elem = vec_t U128 1 +inline_for_extraction noextract +let gcm_ctx_len = 5ul + +inline_for_extraction noextract +let gcm_ctx_elem_zero = vec_zero U128 1 +inline_for_extraction noextract +let gcm_ctx = lbuffer gcm_ctx_elem gcm_ctx_len + + +[@CInline] +let gcm_init : gf128_init_st Vec.NI = + gf128_init #Vec.NI + + +[@CInline] +let gcm_update_blocks: gf128_update_st Vec.NI = + gf128_update #Vec.NI + + +[@CInline] +let gcm_update_padded: gf128_update_st Vec.NI = + gcm_update_blocks + + +[@CInline] +let gcm_emit: gf128_emit_st Vec.NI = + gf128_emit #Vec.NI + + +[@CInline] +let ghash: ghash_st Vec.NI = + ghash #Vec.NI diff --git a/code/gf128/Hacl.Impl.Gf128.FieldNI.fst b/code/gf128/Hacl.Impl.Gf128.FieldNI.fst new file mode 100644 index 0000000000..8bb88966c4 --- /dev/null +++ b/code/gf128/Hacl.Impl.Gf128.FieldNI.fst @@ -0,0 +1,281 @@ +module Hacl.Impl.Gf128.FieldNI + +open FStar.HyperStack +open FStar.HyperStack.All +open FStar.Mul + +open Lib.IntTypes +open Lib.Buffer +open Lib.IntVector + +open Hacl.Spec.GF128.Poly_s + +module ST = FStar.HyperStack.ST +module LSeq = Lib.Sequence +module BSeq = Lib.ByteSequence + +module S = Spec.GF128 +module GF = Spec.GaloisField +module Vec = Hacl.Spec.GF128.Vec +module Lemmas = Hacl.Spec.GF128.Lemmas + +include Hacl.Spec.Gf128.FieldNI + +#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0" + +inline_for_extraction noextract +let vec128_zero = vec_zero U128 1 +noextract +let zero = GF.zero #S.gf128 + +let felem = lbuffer vec128 1ul +let felem4 = lbuffer vec128 4ul +let precomp = lbuffer vec128 4ul + +let block = lbuffer uint8 16ul +let block4 = lbuffer uint8 64ul + +unfold noextract +let op_String_Access #a #len = LSeq.index #a #len + + +noextract +let feval (h:mem) (f:felem) : GTot Vec.elem = + let f = as_seq h f in + (vec_v f.[0]).[0] + + +noextract +let feval4 (h:mem) (f:felem4) : GTot Vec.elem4 = + let f = as_seq h f in + let f0 = (vec_v f.[0]).[0] in + let f1 = (vec_v f.[1]).[0] in + let f2 = (vec_v f.[2]).[0] in + let f3 = (vec_v f.[3]).[0] in + LSeq.create4 f0 f1 f2 f3 + + +inline_for_extraction noextract +val create_felem: unit -> + StackInline felem + (requires fun h -> True) + (ensures fun h0 f h1 -> + stack_allocated f h0 h1 (LSeq.create 1 vec128_zero) /\ + feval h1 f == zero) + +let create_felem () = create 1ul vec128_zero + + +inline_for_extraction noextract +val copy_felem: f1:felem -> f2:felem -> + Stack unit + (requires fun h -> live h f1 /\ live h f2 /\ disjoint f1 f2) + (ensures fun h0 _ h1 -> modifies1 f1 h0 h1 /\ + as_seq h1 f1 == as_seq h0 f2) + +let copy_felem f1 f2 = + let h0 = ST.get () in + f1.(0ul) <- f2.(0ul); + let h1 = ST.get () in + LSeq.eq_intro (as_seq h1 f1) (as_seq h0 f2) + + +inline_for_extraction noextract +val felem_set_zero: f:felem -> + Stack unit + (requires fun h -> live h f) + (ensures fun h0 _ h1 -> modifies1 f h0 h1 /\ + feval h1 f == zero) + +let felem_set_zero f = f.(0ul) <- vec128_zero + + +inline_for_extraction noextract +val create_felem4: unit -> + StackInline felem4 + (requires fun h -> True) + (ensures fun h0 f h1 -> + stack_allocated f h0 h1 (LSeq.create 4 vec128_zero) /\ + feval4 h1 f == LSeq.create 4 zero) + +let create_felem4 () = + let f = create 4ul vec128_zero in + let h1 = ST.get () in + LSeq.eq_intro (feval4 h1 f) (LSeq.create 4 (u128 0)); + f + + +inline_for_extraction noextract +val load_felem: + x:felem + -> y:lbuffer uint8 16ul -> + Stack unit + (requires fun h -> live h x /\ live h y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == S.encode (as_seq h0 y)) + +let load_felem x y = + let h0 = ST.get () in + x.(size 0) <- vec_load_be U128 1 y; + BSeq.index_uints_from_bytes_be #U128 #SEC #1 (as_seq h0 y) 0 + + +inline_for_extraction noextract +val load_felem4: + x:felem4 + -> y:block4 -> + Stack unit + (requires fun h -> live h x /\ live h y /\ disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval4 h1 x == Vec.encode4 (as_seq h0 y)) + +let load_felem4 x y = + let h0 = ST.get () in + x.(size 0) <- vec_load_be U128 1 (sub y (size 0) (size 16)); + BSeq.index_uints_from_bytes_be #U128 #SEC #1 (LSeq.sub (as_seq h0 y) 0 16) 0; + x.(size 1) <- vec_load_be U128 1 (sub y (size 16) (size 16)); + BSeq.index_uints_from_bytes_be #U128 #SEC #1 (LSeq.sub (as_seq h0 y) 16 16) 0; + x.(size 2) <- vec_load_be U128 1 (sub y (size 32) (size 16)); + BSeq.index_uints_from_bytes_be #U128 #SEC #1 (LSeq.sub (as_seq h0 y) 32 16) 0; + x.(size 3) <- vec_load_be U128 1 (sub y (size 48) (size 16)); + BSeq.index_uints_from_bytes_be #U128 #SEC #1 (LSeq.sub (as_seq h0 y) 48 16) 0 + + +inline_for_extraction noextract +val store_felem: + x:lbuffer uint8 16ul + -> y:felem -> + Stack unit + (requires fun h -> live h x /\ live h y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + as_seq h1 x == S.store_elem (feval h0 y)) + +let store_felem x y = + let h0 = ST.get () in + vec_store_be x y.(size 0); + let ul = vec_v (as_seq h0 y).[0] in + FStar.Classical.forall_intro (BSeq.index_uints_to_bytes_be #U128 #SEC #1 ul); + LSeq.eq_intro (BSeq.uints_to_bytes_be #U128 #SEC #1 ul) (BSeq.uint_to_bytes_be ul.[0]) + + +val fadd: + x: felem + -> y: felem -> + Stack unit + (requires fun h -> live h x /\ live h y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == GF.fadd #S.gf128 (feval h0 x) (feval h0 y)) + +[@CInline] +let fadd x y = + x.(size 0) <- cl_add x.(size 0) y.(size 0) + + +val fadd4: + x:felem4 + -> y:felem4 -> + Stack unit + (requires fun h -> live h x /\ live h y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval4 h1 x == Vec.fadd4 (feval4 h0 x) (feval4 h0 y)) + +[@CInline] +let fadd4 x y = + let (x0, x1, x2, x3) = (x.(0ul), x.(1ul), x.(2ul), x.(3ul)) in + let (y0, y1, y2, y3) = (y.(0ul), y.(1ul), y.(2ul), y.(3ul)) in + let h0 = ST.get () in + x.(size 0) <- cl_add x0 y0; + x.(size 1) <- cl_add x1 y1; + x.(size 2) <- cl_add x2 y2; + x.(size 3) <- cl_add x3 y3; + let h1 = ST.get () in + LSeq.eq_intro (feval4 h1 x) (Vec.fadd4 (feval4 h0 x) (feval4 h0 y)) + + +val fmul: + x:felem + -> y:felem -> + Stack unit + (requires fun h -> live h x /\ live h y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == GF.fmul_be #S.gf128 (feval h0 x) (feval h0 y)) + +[@ CInline] +let fmul x y = + let xe = x.(size 0) in + let ye = y.(size 0) in + let (hi,lo) = clmul_wide xe ye in + let lo = gf128_reduce hi lo in + x.(size 0) <- lo; + gf128_clmul_wide_reduce_lemma xe ye + + +val load_precompute_r: + pre:precomp + -> key:block -> + Stack unit + (requires fun h -> live h pre /\ live h key /\ disjoint pre key) + (ensures fun h0 _ h1 -> modifies1 pre h0 h1 /\ + (let r = S.load_elem (as_seq h0 key) in + feval h1 (gsub pre 3ul 1ul) == r /\ + feval4 h1 pre == Vec.load_precompute_r r)) + +[@CInline] +let load_precompute_r pre key = + let r4 = sub pre (size 0) (size 1) in + let r3 = sub pre (size 1) (size 1) in + let r2 = sub pre (size 2) (size 1) in + let r1 = sub pre (size 3) (size 1) in + + load_felem r1 key; + copy_felem r4 r1; + copy_felem r3 r1; + copy_felem r2 r1; + fmul r2 r1; + fmul r3 r2; + fmul r4 r3 + +inline_for_extraction noextract +val fadd_acc4: + x:felem4 + -> acc:felem -> + Stack unit + (requires fun h -> + live h x /\ live h acc /\ disjoint x acc) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval4 h1 x == Vec.fadd4 (LSeq.create4 (feval h0 acc) zero zero zero) (feval4 h0 x)) + +let fadd_acc4 x acc = + let h0 = ST.get () in + x.(0ul) <- cl_add acc.(0ul) x.(0ul); + let h1 = ST.get () in + Lemmas.add_identity (feval h0 (gsub x 1ul 1ul)); + Lemmas.add_identity (feval h0 (gsub x 2ul 1ul)); + Lemmas.add_identity (feval h0 (gsub x 3ul 1ul)); + LSeq.eq_intro (feval4 h1 x) (Vec.fadd4 (LSeq.create4 (feval h0 acc) zero zero zero) (feval4 h0 x)) + + +val normalize4: + acc:felem + -> x:felem4 + -> pre:precomp -> + Stack unit + (requires fun h -> live h acc /\ live h x /\ live h pre) + (ensures fun h0 _ h1 -> modifies1 acc h0 h1 /\ + feval h1 acc == Vec.normalize4 (feval4 h0 pre) (feval4 h0 x)) + +[@CInline] +let normalize4 acc x pre = + let x1 = x.(size 0) in + let x2 = x.(size 1) in + let x3 = x.(size 2) in + let x4 = x.(size 3) in + let y1 = pre.(size 0) in + let y2 = pre.(size 1) in + let y3 = pre.(size 2) in + let y4 = pre.(size 3) in + + let (hi,lo) = clmul_wide4 x1 x2 x3 x4 y1 y2 y3 y4 in + let lo = gf128_reduce hi lo in + acc.(size 0) <- lo; + gf128_clmul_wide4_reduce_lemma x1 x2 x3 x4 y1 y2 y3 y4 diff --git a/code/gf128/Hacl.Impl.Gf128.Fields.fst b/code/gf128/Hacl.Impl.Gf128.Fields.fst new file mode 100644 index 0000000000..42846a9ac7 --- /dev/null +++ b/code/gf128/Hacl.Impl.Gf128.Fields.fst @@ -0,0 +1,294 @@ +module Hacl.Impl.Gf128.Fields + +open FStar.HyperStack +open FStar.HyperStack.All + +open Lib.IntTypes +open Lib.IntVector +open Lib.Buffer + +open Hacl.Spec.GF128.Vec + +module ST = FStar.HyperStack.ST +module LSeq = Lib.Sequence + +module S = Spec.GF128 +module GF = Spec.GaloisField + + +#reset-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 1" + +noextract +type field_spec = gf128_spec + +inline_for_extraction noextract +let elem_t (s:field_spec) = + match s with + | NI -> vec_t U128 1 + +inline_for_extraction noextract +let elem_zero (s:field_spec) : elem_t s = + match s with + | NI -> vec_zero U128 1 + +inline_for_extraction noextract +let felem_len (s:field_spec) = + match s with + | NI -> 1ul + +inline_for_extraction noextract +let felem4_len (s:field_spec) = + match s with + | NI -> 4ul + +inline_for_extraction noextract +let precomp_len (s:field_spec) = + match s with + | NI -> 4ul + +inline_for_extraction noextract +let felem (s:field_spec) = lbuffer (elem_t s) (felem_len s) + +inline_for_extraction noextract +let felem4 (s:field_spec) = lbuffer (elem_t s) (felem4_len s) + +inline_for_extraction noextract +let precomp (s:field_spec) = lbuffer (elem_t s) (precomp_len s) + +inline_for_extraction noextract +type block = lbuffer uint8 16ul + +inline_for_extraction noextract +type block4 = lbuffer uint8 64ul + +noextract +val feval: #s:field_spec -> h:mem -> e:felem s -> GTot elem +let feval #s h e = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.feval h e + +noextract +val feval4: #s:field_spec -> h:mem -> e:felem4 s -> GTot elem4 +let feval4 #s h e = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.feval4 h e + +noextract +let zero = GF.zero #S.gf128 + + +noextract +val get_r1: #s:field_spec -> h:mem -> p:precomp s -> GTot S.elem +let get_r1 #s h pre = + match s with + | NI -> feval h (gsub pre 3ul 1ul) + + +noextract +val get_r4321: #s:field_spec -> h:mem -> p:precomp s -> GTot elem4 +let get_r4321 #s h pre = + match s with + | NI -> feval4 h pre + + +noextract +val precomp_inv_t: #s:field_spec -> h:mem -> pre:precomp s -> Type0 +let precomp_inv_t #s h pre = + match s with + | NI -> get_r4321 h pre == load_precompute_r (get_r1 h pre) + + +inline_for_extraction noextract +val create_felem: s:field_spec -> + StackInline (felem s) + (requires fun h -> True) + (ensures fun h0 f h1 -> + stack_allocated f h0 h1 (LSeq.create (v (felem_len s)) (elem_zero s)) /\ + feval h1 f == zero) + +let create_felem s = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.create_felem () + + +inline_for_extraction noextract +val copy_felem: + #s:field_spec + -> f1:felem s + -> f2:felem s -> + Stack unit + (requires fun h -> live h f1 /\ live h f2 /\ disjoint f1 f2) + (ensures fun h0 _ h1 -> modifies1 f1 h0 h1 /\ + as_seq h1 f1 == as_seq h0 f2) + +let copy_felem #s f1 f2 = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.copy_felem f1 f2 + + +inline_for_extraction noextract +val felem_set_zero: + #s:field_spec + -> f:felem s -> + Stack unit + (requires fun h -> live h f) + (ensures fun h0 _ h1 -> modifies1 f h0 h1 /\ + feval h1 f == zero) + +let felem_set_zero #s f = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.felem_set_zero f + + +inline_for_extraction noextract +val create_felem4: s:field_spec -> + StackInline (felem4 s) + (requires fun h -> True) + (ensures fun h0 f h1 -> + stack_allocated f h0 h1 (LSeq.create (v (felem4_len s)) (elem_zero s)) /\ + feval4 h1 f == LSeq.create 4 zero) + +let create_felem4 s = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.create_felem4 () + + +inline_for_extraction noextract +val load_felem: + #s:field_spec + -> x:felem s + -> y:block -> + Stack unit + (requires fun h -> live h x /\ live h y /\ disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == S.encode (as_seq h0 y)) + +let load_felem #s x y = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.load_felem x y + + +inline_for_extraction noextract +val load_felem4: + #s:field_spec + -> x:felem4 s + -> y:block4 -> + Stack unit + (requires fun h -> live h x /\ live h y /\ disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval4 h1 x == encode4 (as_seq h0 y)) + +let load_felem4 #s x y = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.load_felem4 x y + + +inline_for_extraction noextract +val store_felem: + #s:field_spec + -> x:block + -> y:felem s -> + Stack unit + (requires fun h -> live h x /\ live h y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + as_seq h1 x == S.store_elem (feval h0 y)) + +let store_felem #s x y = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.store_felem x y + + +inline_for_extraction noextract +val load_precompute_r: + #s:field_spec + -> pre:precomp s + -> key:block -> + Stack unit + (requires fun h -> live h pre /\ live h key /\ disjoint pre key) + (ensures fun h0 _ h1 -> modifies1 pre h0 h1 /\ + get_r1 h1 pre == S.load_elem (as_seq h0 key) /\ + precomp_inv_t h1 pre) + +let load_precompute_r #s pre key = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.load_precompute_r pre key + + +inline_for_extraction noextract +val fadd: + #s:field_spec + -> x:felem s + -> y:felem s -> + Stack unit + (requires fun h -> live h x /\ live h y /\ eq_or_disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == GF.fadd #S.gf128 (feval h0 x) (feval h0 y)) + +let fadd #s x y = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.fadd x y + + +inline_for_extraction noextract +val fadd4: + #s:field_spec + -> x:felem4 s + -> y:felem4 s -> + Stack unit + (requires fun h -> live h x /\ live h y /\ eq_or_disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval4 h1 x == fadd4 (feval4 h0 x) (feval4 h0 y)) + +let fadd4 #s x y = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.fadd4 x y + + +inline_for_extraction noextract +val fmul: + #s:field_spec + -> x:felem s + -> y:felem s -> + Stack unit + (requires fun h -> live h x /\ live h y /\ eq_or_disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == GF.fmul_be #S.gf128 (feval h0 x) (feval h0 y)) + +let fmul #s x y = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.fmul x y + + +inline_for_extraction noextract +val fadd_acc4: + #s:field_spec + -> x:felem4 s + -> acc:felem s -> + Stack unit + (requires fun h -> + live h x /\ live h acc /\ disjoint x acc) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval4 h1 x == Hacl.Spec.GF128.Vec.fadd4 (LSeq.create4 (feval h0 acc) zero zero zero) (feval4 h0 x)) + +let fadd_acc4 #s x acc = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.fadd_acc4 x acc + + +inline_for_extraction noextract +val normalize4: + #s:field_spec + -> acc:felem s + -> x:felem4 s + -> y:precomp s -> + Stack unit + (requires fun h -> + live h acc /\ live h x /\ live h y /\ + disjoint acc x /\ disjoint acc y /\ disjoint x y /\ + precomp_inv_t h y) + (ensures fun h0 _ h1 -> modifies2 x acc h0 h1 /\ + feval h1 acc == normalize4 (get_r4321 h0 y) (feval4 h0 x)) + +let normalize4 #s acc x pre = + match s with + | NI -> Hacl.Impl.Gf128.FieldNI.normalize4 acc x pre diff --git a/code/gf128/Hacl.Impl.Gf128.Generic.fst b/code/gf128/Hacl.Impl.Gf128.Generic.fst new file mode 100644 index 0000000000..62ee704f35 --- /dev/null +++ b/code/gf128/Hacl.Impl.Gf128.Generic.fst @@ -0,0 +1,387 @@ +module Hacl.Impl.Gf128.Generic + +open FStar.HyperStack +open FStar.HyperStack.All + +open Lib.IntTypes +open Lib.Buffer + +open Hacl.Impl.Gf128.Fields + +module ST = FStar.HyperStack.ST +module LSeq = Lib.Sequence + +module S = Spec.GF128 +module GF = Spec.GaloisField +module Vec = Hacl.Spec.GF128.Vec + +friend Lib.LoopCombinators + + +#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0" + +let _: squash (inversion Vec.gf128_spec) = allow_inversion Vec.gf128_spec + +let as_get_acc #s h ctx = feval h (gsub ctx 0ul (felem_len s)) + +let as_get_r #s h ctx = feval h (gsub ctx (felem4_len s) (felem_len s)) + +inline_for_extraction noextract +let get_acc #s (ctx:gcm_ctx s) : Stack (felem s) + (requires fun h -> live h ctx) + (ensures fun h0 acc h1 -> h0 == h1 /\ live h1 acc /\ acc == gsub ctx 0ul (felem_len s)) + = sub ctx 0ul (felem_len s) + +inline_for_extraction noextract +let get_r #s (ctx:gcm_ctx s) : Stack (felem s) + (requires fun h -> live h ctx) + (ensures fun h0 acc h1 -> h0 == h1 /\ live h1 acc /\ acc == gsub ctx (felem4_len s) (felem_len s)) + = sub ctx (felem4_len s) (felem_len s) + +inline_for_extraction noextract +let get_precomp #s (ctx:gcm_ctx s) : Stack (precomp s) + (requires fun h -> live h ctx) + (ensures fun h0 acc h1 -> h0 == h1 /\ live h1 acc /\ acc == gsub ctx (felem_len s) (precomp_len s)) + = sub ctx (felem_len s) (precomp_len s) + +let state_inv_t #s h ctx = + let pre = gsub ctx (felem_len s) (precomp_len s) in + precomp_inv_t h pre + + +inline_for_extraction noextract +val encode: + #s:field_spec + -> x:felem s + -> y:block -> + Stack unit + (requires fun h -> live h x /\ live h y /\ disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == S.encode (as_seq h0 y)) + +let encode #s x y = load_felem x y + + +inline_for_extraction noextract +val encode4: + #s:field_spec + -> x:felem4 s + -> y:block4 -> + Stack unit + (requires fun h -> live h x /\ live h y /\ disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval4 h1 x == Vec.encode4 (as_seq h0 y)) + +let encode4 #s x y = load_felem4 x y + + +inline_for_extraction noextract +val decode: + #s:field_spec + -> x:block + -> y:felem s -> + Stack unit + (requires fun h -> live h x /\ live h y /\ disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + as_seq h1 x == S.decode (feval #s h0 y)) + +let decode #s x y = store_felem x y + + +inline_for_extraction noextract +val encode_last: + #s:field_spec + -> x:felem s + -> len:size_t{v len < 16} + -> y:lbuffer uint8 len -> + Stack unit + (requires fun h -> live h x /\ live h y /\ disjoint x y) + (ensures fun h0 _ h1 -> modifies1 x h0 h1 /\ + feval h1 x == S.encode_last (v len) (as_seq h0 y)) + +let encode_last #s x len y = + push_frame(); + let b = create 16ul (u8 0) in + update_sub b 0ul len y; + encode x b; + pop_frame() + + +inline_for_extraction noextract +val gf128_update1: + #s:field_spec + -> acc:felem s + -> x:block + -> r:felem s -> + Stack unit + (requires fun h -> + live h x /\ live h r /\ live h acc /\ + disjoint acc r /\ disjoint acc x) + (ensures fun h0 _ h1 -> modifies1 acc h0 h1 /\ + feval h1 acc == S.gf128_update1 (feval h0 r) (as_seq h0 x) (feval h0 acc)) + +let gf128_update1 #s acc x r = + push_frame(); + let elem = create_felem s in + encode elem x; + fadd acc elem; + fmul acc r; + pop_frame() + + +inline_for_extraction noextract +val gf128_update_last: + #s:field_spec + -> acc:felem s + -> len:size_t{0 < v len /\ v len < 16} + -> x:lbuffer uint8 len + -> r:felem s -> + Stack unit + (requires fun h -> + live h x /\ live h r /\ live h acc /\ + disjoint acc x /\ disjoint acc r) + (ensures fun h0 _ h1 -> modifies1 acc h0 h1 /\ + feval h1 acc == S.gf128_update_last (feval h0 r) (v len) (as_seq h0 x) (feval h0 acc)) + +let gf128_update_last #s acc len x r = + push_frame(); + let elem = create_felem s in + encode_last elem len x; + fadd acc elem; + fmul acc r; + pop_frame() + + +inline_for_extraction noextract +val gf128_update_scalar_f: + #s:field_spec + -> r:felem s + -> nb:size_t + -> len:size_t{v nb == v len / 16} + -> text:lbuffer uint8 len + -> i:size_t{v i < v nb} + -> acc:felem s -> + Stack unit + (requires fun h -> + live h r /\ live h acc /\ live h text /\ + disjoint acc r /\ disjoint acc text) + (ensures fun h0 _ h1 -> modifies1 acc h0 h1 /\ + feval h1 acc == LSeq.repeat_blocks_f #uint8 #S.elem 16 (as_seq h0 text) + (S.gf128_update1 (feval h0 r)) (v nb) (v i) (feval h0 acc)) + +let gf128_update_scalar_f #s r nb len text i acc = + let tb = sub text (i *. 16ul) 16ul in + gf128_update1 #s acc tb r + + +#push-options "--max_fuel 1" + +inline_for_extraction noextract +val gf128_update_scalar: + #s:field_spec + -> acc:felem s + -> r:felem s + -> len:size_t + -> text:lbuffer uint8 len -> + Stack unit + (requires fun h -> + live h acc /\ live h r /\ live h text /\ + disjoint acc r /\ disjoint acc text) + (ensures fun h0 _ h1 -> modifies1 acc h0 h1 /\ + feval h1 acc == S.gf128_update (as_seq h0 text) (feval h0 acc) (feval h0 r)) + +let gf128_update_scalar #s acc r len text = + let nb = len /. 16ul in + let rem = len %. 16ul in + let h0 = ST.get () in + + LSeq.lemma_repeat_blocks #uint8 #S.elem 16 (as_seq h0 text) + (S.gf128_update1 (feval h0 r)) + (S.gf128_update_last (feval h0 r)) + (feval h0 acc); + + [@ inline_let] + let spec_fh h0 = + LSeq.repeat_blocks_f 16 (as_seq h0 text) + (S.gf128_update1 (feval h0 r)) (v nb) in + + [@ inline_let] + let inv h (i:nat{i <= v nb}) = + modifies1 acc h0 h /\ + live h acc /\ live h r /\ live h text /\ + disjoint acc r /\ disjoint acc text /\ + feval h acc == Lib.LoopCombinators.repeati i (spec_fh h0) (feval h0 acc) in + + Lib.Loops.for (size 0) nb inv + (fun i -> + Lib.LoopCombinators.unfold_repeati (v nb) (spec_fh h0) (feval h0 acc) (v i); + gf128_update_scalar_f #s r nb len text i acc); + + let h1 = ST.get () in + assert (feval h1 acc == Lib.LoopCombinators.repeati (v nb) (spec_fh h0) (feval h0 acc)); + + if rem >. 0ul then ( + let last = sub text (nb *! 16ul) rem in + as_seq_gsub h1 text (nb *! 16ul) rem; + assert (disjoint acc last); + gf128_update_last #s acc rem last r) +#pop-options + +inline_for_extraction noextract +val gf128_update_multi_add_mul_f: + #s:field_spec + -> pre:precomp s + -> nb:size_t + -> len:size_t{v nb == v len / 64 /\ 0 < v len /\ v len % 64 = 0} + -> text:lbuffer uint8 len + -> b4:felem4 s + -> i:size_t{v i < v nb} + -> acc:felem s -> + Stack unit + (requires fun h -> + live h pre /\ live h acc /\ live h text /\ live h b4 /\ + disjoint acc pre /\ disjoint acc text /\ disjoint b4 acc /\ + disjoint b4 text /\ disjoint b4 pre /\ + precomp_inv_t h pre) + (ensures fun h0 _ h1 -> + modifies2 acc b4 h0 h1 /\ precomp_inv_t h1 pre /\ + feval h1 acc == LSeq.repeat_blocks_f #uint8 #S.elem 64 (as_seq h0 text) + (Vec.gf128_update4_add_mul (get_r4321 h0 pre)) (v nb) (v i) (feval h0 acc)) + +let gf128_update_multi_add_mul_f #s pre nb len text b4 i acc = + let tb = sub text (i *! 64ul) 64ul in + encode4 b4 tb; + fadd_acc4 b4 acc; + normalize4 acc b4 pre + + +#push-options "--z3rlimit 100 --max_fuel 1" +inline_for_extraction noextract +val gf128_update_multi_add_mul: + #s:field_spec + -> acc:felem s + -> pre:precomp s + -> len:size_t{0 < v len /\ v len % 64 = 0} + -> text:lbuffer uint8 len -> + Stack unit + (requires fun h -> + live h acc /\ live h pre /\ live h text /\ + disjoint acc pre /\ disjoint acc text /\ + precomp_inv_t h pre) + (ensures fun h0 _ h1 -> + modifies1 acc h0 h1 /\ precomp_inv_t h1 pre /\ + feval h1 acc == Vec.gf128_update_multi_add_mul (as_seq h0 text) (feval h0 acc) (get_r1 h0 pre)) + +let gf128_update_multi_add_mul #s acc pre len text = + push_frame (); + let b4 = create_felem4 s in + let nb = len /. 64ul in + + let h0 = ST.get () in + + LSeq.lemma_repeat_blocks_multi #uint8 #Vec.elem 64 (as_seq h0 text) + (Vec.gf128_update4_add_mul (get_r4321 h0 pre)) (feval h0 acc); + + [@ inline_let] + let spec_fh h0 = + LSeq.repeat_blocks_f 64 (as_seq h0 text) + (Vec.gf128_update4_add_mul (get_r4321 h0 pre)) (v nb) in + + [@ inline_let] + let inv h (i:nat{i <= v nb}) = + modifies2 acc b4 h0 h /\ + live h pre /\ live h acc /\ live h text /\ live h b4 /\ + disjoint acc pre /\ disjoint acc text /\ disjoint b4 acc /\ + disjoint b4 text /\ disjoint b4 pre /\ + precomp_inv_t h pre /\ + feval h acc == Lib.LoopCombinators.repeati i (spec_fh h0) (feval h0 acc) in + + Lib.Loops.for (size 0) nb inv + (fun i -> + Lib.LoopCombinators.unfold_repeati (v nb) (spec_fh h0) (feval h0 acc) (v i); + gf128_update_multi_add_mul_f #s pre nb len text b4 i acc); + + let h1 = ST.get () in + assert (feval h1 acc == Lib.LoopCombinators.repeati (v nb) (spec_fh h0) (feval h0 acc)); + pop_frame () +#pop-options + +inline_for_extraction noextract +val gf128_update_multi: + #s:field_spec + -> acc:felem s + -> pre:precomp s + -> len:size_t{0 < v len /\ v len % 64 = 0} + -> text:lbuffer uint8 len -> + Stack unit + (requires fun h -> + live h acc /\ live h pre /\ live h text /\ + disjoint acc pre /\ disjoint acc text /\ + precomp_inv_t h pre) + (ensures fun h0 _ h1 -> + modifies1 acc h0 h1 /\ precomp_inv_t h1 pre /\ + feval h1 acc == Vec.gf128_update_multi (as_seq h0 text) (feval h0 acc) (get_r1 h0 pre)) + +let gf128_update_multi #s acc pre len text = + gf128_update_multi_add_mul acc pre len text + + +#push-options "--z3rlimit 100" +inline_for_extraction noextract +val gf128_update_vec: + #s:field_spec + -> acc:felem s + -> pre:precomp s + -> len:size_t + -> text:lbuffer uint8 len -> + Stack unit + (requires fun h -> + live h acc /\ live h pre /\ live h text /\ + disjoint acc pre /\ disjoint acc text /\ + precomp_inv_t h pre) + (ensures fun h0 _ h1 -> + modifies1 acc h0 h1 /\ precomp_inv_t h1 pre /\ + feval h1 acc == Vec.gf128_update_vec (as_seq h0 text) (feval h0 acc) (get_r1 h0 pre)) + +let gf128_update_vec #s acc pre len text = + let len0 = len /. 64ul *! 64ul in + let t0 = sub text 0ul len0 in + if (len0 >. 0ul) then gf128_update_multi #s acc pre len0 t0; + + let len1 = len -! len0 in + let t1 = sub text len0 len1 in + let r1 = sub pre (3ul *! felem_len s) (felem_len s) in + gf128_update_scalar #s acc r1 len1 t1 +#pop-options + + +let gf128_init #s ctx key = + let acc = get_acc ctx in + let pre = get_precomp ctx in + + felem_set_zero acc; + load_precompute_r pre key + + +let gf128_update #s ctx len text = + let acc = get_acc ctx in + let pre = get_precomp ctx in + let h0 = ST.get () in + gf128_update_vec #s acc pre len text; + let h1 = ST.get () in + Hacl.Spec.GF128.Equiv.gf128_update_vec_eq_lemma (as_seq h0 text) (as_get_acc h0 ctx) (as_get_r h0 ctx); + assert (as_get_acc h1 ctx == S.gf128_update (as_seq h0 text) (as_get_acc h0 ctx) (as_get_r h0 ctx)) + + +let gf128_emit #s tag ctx = + let acc = get_acc ctx in + decode tag acc + + +let ghash #s tag len text key = + push_frame(); + let ctx : gcm_ctx s = create (felem_len s +! precomp_len s) (elem_zero s) in + gf128_init ctx key; + gf128_update ctx len text; + gf128_emit tag ctx; + pop_frame() diff --git a/code/gf128/Hacl.Impl.Gf128.Generic.fsti b/code/gf128/Hacl.Impl.Gf128.Generic.fsti new file mode 100644 index 0000000000..eabab8c17f --- /dev/null +++ b/code/gf128/Hacl.Impl.Gf128.Generic.fsti @@ -0,0 +1,88 @@ +module Hacl.Impl.Gf128.Generic + +open FStar.HyperStack +open FStar.HyperStack.All + +open Lib.IntTypes +open Lib.Buffer + +open Hacl.Impl.Gf128.Fields + +module S = Spec.GF128 + + +#set-options "--z3rlimit 50 --max_fuel 0" + +inline_for_extraction noextract +let gcm_ctx (s:field_spec) = lbuffer (elem_t s) (felem_len s +! precomp_len s) + +noextract +val as_get_acc: #s:field_spec -> h:mem -> ctx:gcm_ctx s -> GTot S.elem +noextract +val as_get_r: #s:field_spec -> h:mem -> ctx:gcm_ctx s -> GTot S.elem +noextract +val state_inv_t: #s:field_spec -> h:mem -> ctx:gcm_ctx s -> Type0 + +inline_for_extraction noextract +let gf128_init_st (s:field_spec) = + ctx:gcm_ctx s + -> key:block -> + Stack unit + (requires fun h -> + live h ctx /\ live h key /\ disjoint ctx key) + (ensures fun h0 _ h1 -> + modifies1 ctx h0 h1 /\ state_inv_t h1 ctx /\ + (as_get_acc h1 ctx, as_get_r h1 ctx) == S.gf128_init (as_seq h0 key)) + + +inline_for_extraction noextract +val gf128_init: #s:field_spec -> gf128_init_st s + + +inline_for_extraction noextract +let gf128_update_st (s:field_spec) = + ctx:gcm_ctx s + -> len:size_t + -> text:lbuffer uint8 len -> + Stack unit + (requires fun h -> + live h ctx /\ live h text /\ disjoint ctx text /\ + state_inv_t h ctx) + (ensures fun h0 _ h1 -> + modifies1 ctx h0 h1 /\ state_inv_t h1 ctx /\ + as_get_acc h1 ctx == S.gf128_update (as_seq h0 text) (as_get_acc h0 ctx) (as_get_r h0 ctx)) +//as_get_acc h1 ctx == Vec.gf128_update_vec s (as_seq h0 text) (as_get_acc h0 ctx) (as_get_r h0 ctx) + + +inline_for_extraction noextract +val gf128_update: #s:field_spec -> gf128_update_st s + + +inline_for_extraction noextract +let gf128_emit_st (s:field_spec) = + tag:lbuffer uint8 16ul + -> ctx:gcm_ctx s -> + Stack unit + (requires fun h -> live h ctx /\ live h tag /\ disjoint ctx tag) + (ensures fun h0 _ h1 -> modifies1 tag h0 h1 /\ + as_seq h1 tag == S.decode (as_get_acc h0 ctx)) + + +inline_for_extraction noextract +val gf128_emit: #s:field_spec -> gf128_emit_st s + + +inline_for_extraction noextract +let ghash_st (s:field_spec) = + tag:block + -> len:size_t + -> text:lbuffer uint8 len + -> key:block -> + Stack unit + (requires fun h -> live h tag /\ live h text /\ live h key) + (ensures fun h0 _ h1 -> modifies1 tag h0 h1 /\ + as_seq h1 tag == S.gmul (as_seq h0 text) (as_seq h0 key)) + + +inline_for_extraction noextract +val ghash: #s:field_spec -> ghash_st s diff --git a/code/gf128/Hacl.Spec.GF128.Equiv.fst b/code/gf128/Hacl.Spec.GF128.Equiv.fst new file mode 100644 index 0000000000..e51343c352 --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.Equiv.fst @@ -0,0 +1,122 @@ +module Hacl.Spec.GF128.Equiv + +open FStar.Mul +open Lib.IntTypes +open Lib.Sequence +open Lib.ByteSequence +open Lib.IntVector + +open Spec.GaloisField +open Hacl.Spec.GF128.Lemmas + +module Loops = Lib.LoopCombinators +module PLoops = Lib.Sequence.Lemmas +module S = Spec.GF128 + +include Hacl.Spec.GF128.Vec +include Lib.Vec.Lemmas + + +#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0" + +val gf128_update_multi_add_mul_lemma_loop: + r:elem + -> text:bytes{length text % 64 = 0} + -> acc0:elem + -> i:nat{i < length text / 64} + -> acc:elem -> + Lemma + (let pre = load_precompute_r r in + let len = length text in + + let f = S.gf128_update1 r in + let f_vec = gf128_update4_add_mul pre in + let repeat_bf_sc = repeat_blocks_f 16 text f (len / 16) in + let repeat_bf_vec = repeat_blocks_f 64 text f_vec (len / 64) in + + repeat_bf_vec i acc == Loops.repeat_right (4 * i) (4 * i + 4) (Loops.fixed_a elem) repeat_bf_sc acc) + +let gf128_update_multi_add_mul_lemma_loop r text acc0 i acc = + let pre = load_precompute_r r in + let len = length text in + + let f = S.gf128_update1 r in + let f_vec = gf128_update4_add_mul pre in + let repeat_bf_sc = repeat_blocks_f 16 text f (len / 16) in + let repeat_bf_vec = repeat_blocks_f 64 text f_vec (len / 64) in + + let acc1 = repeat_bf_vec i acc in + let b0 = S.encode (Seq.slice text (i * 64) (i * 64 + 16)) in + let b1 = S.encode (Seq.slice text (i * 64 + 16) (i * 64 + 32)) in + let b2 = S.encode (Seq.slice text (i * 64 + 32) (i * 64 + 48)) in + let b3 = S.encode (Seq.slice text (i * 64 + 48) (i * 64 + 64)) in + + let acc_vec1 = load_acc (Seq.slice text (i * 64) (i * 64 + 64)) acc in + assert (acc1 == (acc +% b0) *% pre.[0] +% (zero +% b1) *% pre.[1] +% (zero +% b2) *% pre.[2] +% (zero +% b3) *% pre.[3]); + + let acc2 = Loops.repeat_right (4 * i) (4 * i + 4) (Loops.fixed_a elem) repeat_bf_sc acc in + Loops.unfold_repeat_right (4 * i) (4 * i + 4) (Loops.fixed_a elem) repeat_bf_sc acc (4 * i + 3); + Loops.unfold_repeat_right (4 * i) (4 * i + 4) (Loops.fixed_a elem) repeat_bf_sc acc (4 * i + 2); + Loops.unfold_repeat_right (4 * i) (4 * i + 4) (Loops.fixed_a elem) repeat_bf_sc acc (4 * i + 1); + Loops.unfold_repeat_right (4 * i) (4 * i + 4) (Loops.fixed_a elem) repeat_bf_sc acc (4 * i); + Loops.eq_repeat_right (4 * i) (4 * i + 4) (Loops.fixed_a elem) repeat_bf_sc acc; + + assert (acc2 == ((((acc +% b0) *% r +% b1) *% r +% b2) *% r +% b3) *% r); + gf128_update_multi_mul_add_lemma_load_acc_aux acc b0 b1 b2 b3 pre.[3]; + assert (acc2 == acc1) + + +val gf128_update_multi_add_mul_lemma: text:bytes{length text % 64 = 0} -> acc0:elem -> r:elem -> Lemma + (let pre = load_precompute_r r in + repeat_blocks_multi #uint8 #elem 64 text (gf128_update4_add_mul pre) acc0 == + repeat_blocks_multi #uint8 #elem 16 text (S.gf128_update1 r) acc0) + +let gf128_update_multi_add_mul_lemma text acc0 r = + let len = length text in + let pre = load_precompute_r r in + + let f = S.gf128_update1 r in + let f_vec = gf128_update4_add_mul pre in + let repeat_bf_sc = repeat_blocks_f 16 text f (len / 16) in + let repeat_bf_vec = repeat_blocks_f 64 text f_vec (len / 64) in + + let acc1 = repeat_blocks_multi #uint8 #elem 64 text f_vec acc0 in + lemma_repeat_blocks_multi #uint8 #elem 64 text f_vec acc0; + assert (acc1 == Loops.repeati (len / 64) repeat_bf_vec acc0); + + FStar.Classical.forall_intro_2 (gf128_update_multi_add_mul_lemma_loop r text acc0); + Lib.Vec.Lemmas.lemma_repeati_vec 4 (len / 64) (fun x -> x) repeat_bf_sc repeat_bf_vec acc0; + assert (acc1 == Loops.repeati (len / 16) repeat_bf_sc acc0); + lemma_repeat_blocks_multi #uint8 #elem 16 text f acc0 + +val gf128_update_multi_lemma: + text:bytes{64 <= length text /\ length text % 64 = 0} + -> acc0:elem + -> r:elem -> + Lemma + (gf128_update_multi text acc0 r == + repeat_blocks_multi #uint8 #elem 16 text (S.gf128_update1 r) acc0) + +let gf128_update_multi_lemma text acc0 r = + gf128_update_multi_add_mul_lemma text acc0 r + +val gf128_update_vec_eq_lemma: + text:bytes + -> acc0:elem + -> r:elem -> + Lemma (gf128_update_vec text acc0 r == S.gf128_update text acc0 r) + +let gf128_update_vec_eq_lemma text acc0 r = + let len = length text in + let len0 = len / 64 * 64 in + + assert (len0 % 64 = 0); + assert (len0 % 16 = 0); + let t0 = Seq.slice text 0 len0 in + let f = S.gf128_update1 r in + let l = S.gf128_update_last r in + + if len0 > 0 then begin + gf128_update_multi_lemma t0 acc0 r; + PLoops.repeat_blocks_split #uint8 #elem 16 len0 text f l acc0 end + else () diff --git a/code/gf128/Hacl.Spec.GF128.Lemmas.fst b/code/gf128/Hacl.Spec.GF128.Lemmas.fst new file mode 100644 index 0000000000..6349cbf548 --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.Lemmas.fst @@ -0,0 +1,124 @@ +module Hacl.Spec.GF128.Lemmas + +open FStar.Mul +open Lib.IntTypes +open Spec.GaloisField + +open FStar.Tactics +open FStar.Algebra.CommMonoid +open FStar.Tactics.CanonCommSemiring + +friend Lib.IntTypes + +let add_identity a = + FStar.UInt.logxor_commutative #128 (v a) (v #U128 #SEC (zero #S.gf128)); + FStar.UInt.logxor_lemma_1 #128 (v a); + v_extensionality (zero ^. a) a + +let mul_identity a = admit() + +let add_associativity a b c = + FStar.UInt.logxor_associative #128 (v a) (v b) (v c); + v_extensionality ((a ^. b) ^. c) (a ^. (b ^. c)) + +let add_commutativity a b = + FStar.UInt.logxor_commutative #128 (v a) (v b); + v_extensionality (a ^. b) (b ^. a) + +let mul_associativity a b c = admit() + +let mul_commutativity a b = admit() + + +[@canon_attr] +let elem_add_cm : cm elem = + CM zero ( +% ) add_identity add_associativity add_commutativity + +[@canon_attr] +let elem_mul_cm : cm elem = + CM one ( *% ) mul_identity mul_associativity mul_commutativity + +val mul_add_distr: distribute_left_lemma elem elem_add_cm elem_mul_cm +let mul_add_distr a b c = admit() + +val mul_zero_l: mult_zero_l_lemma elem elem_add_cm elem_mul_cm +let mul_zero_l a = admit() + +[@canon_attr] +let elem_cr : cr elem = admit() //CR elem_add_cm elem_mul_cm mul_add_distr mul_zero_l + +let gf128_semiring () : Tac unit = canon_semiring elem_cr + + +let gf128_update_multi_mul_add_lemma_load_acc_aux a0 b0 b1 b2 b3 r = + admit(); + add_identity b1; + add_identity b2; + add_identity b3; + assert ( + (a0 +% b0) *% (r *% (r *% (r *% r))) +% b1 *% (r *% (r *% r)) +% b2 *% (r *% r) +% b3 *% r == + ((((a0 +% b0) *% r +% b1) *% r +% b2) *% r +% b3) *% r) + by (gf128_semiring ()) + + +let gf128_update_multi_mul_add_lemma_loop_aux a0 a1 a2 a3 b0 b1 b2 b3 r = + admit(); + assert ( + (a0 *% (r *% (r *% (r *% r))) +% b0) *% (r *% (r *% (r *% r))) +% + (a1 *% (r *% (r *% (r *% r))) +% b1) *% (r *% (r *% r)) +% + (a2 *% (r *% (r *% (r *% r))) +% b2) *% (r *% r) +% + (a3 *% (r *% (r *% (r *% r))) +% b3) *% r == + ((((a0 *% (r *% (r *% (r *% r))) +% + a1 *% (r *% (r *% r)) +% + a2 *% (r *% r) +% + a3 *% r +% b0) *% r +% b1) *% r +% b2) *% r +% b3) *% r) + by (gf128_semiring ()) + + +module BV = FStar.BitVector + +let to_vec128 (x:uint128) : BV.bv_t 128 = UInt.to_vec #128 (v x) +let to_vec64 (x:uint64) : BV.bv_t 64 = UInt.to_vec #64 (v x) + +val to_vec128_lemma : x:elem_s -> Lemma + (to_vec128 (to_elem x) == Seq.append (to_vec64 x.[1]) (to_vec64 x.[0])) +let to_vec128_lemma x = + UInt.append_lemma (to_vec64 x.[1]) (to_vec64 x.[0]) + +val logxor_vec_lemma: x:elem_s -> y:elem_s -> Lemma + (BV.logxor_vec (to_vec128 (to_elem x)) (to_vec128 (to_elem y)) == + Seq.append (BV.logxor_vec (to_vec64 x.[1]) (to_vec64 y.[1])) (BV.logxor_vec (to_vec64 x.[0]) (to_vec64 y.[0]))) +let logxor_vec_lemma x y = + to_vec128_lemma x; + to_vec128_lemma y; + Seq.lemma_eq_intro + (BV.logxor_vec (to_vec128 (to_elem x)) (to_vec128 (to_elem y))) + (Seq.append (BV.logxor_vec (to_vec64 x.[1]) (to_vec64 y.[1])) (BV.logxor_vec (to_vec64 x.[0]) (to_vec64 y.[0]))) + +let logxor_s_lemma x y = + assert (to_vec128 (to_elem x ^. to_elem y) == BV.logxor_vec (to_vec128 (to_elem x)) (to_vec128 (to_elem y))); + logxor_vec_lemma x y; + assert (to_vec128 (to_elem x ^. to_elem y) == Seq.append (BV.logxor_vec (to_vec64 x.[1]) (to_vec64 y.[1])) (BV.logxor_vec (to_vec64 x.[0]) (to_vec64 y.[0]))); + let lp = logxor_s x y in + assert (to_vec64 lp.[0] == BV.logxor_vec (to_vec64 x.[0]) (to_vec64 y.[0])); + assert (to_vec64 lp.[1] == BV.logxor_vec (to_vec64 x.[1]) (to_vec64 y.[1])); + to_vec128_lemma lp + +val logand_vec_lemma: x:elem_s -> y:elem_s -> Lemma + (BV.logand_vec (to_vec128 (to_elem x)) (to_vec128 (to_elem y)) == + Seq.append (BV.logand_vec (to_vec64 x.[1]) (to_vec64 y.[1])) (BV.logand_vec (to_vec64 x.[0]) (to_vec64 y.[0]))) +let logand_vec_lemma x y = + to_vec128_lemma x; + to_vec128_lemma y; + Seq.lemma_eq_intro + (BV.logand_vec (to_vec128 (to_elem x)) (to_vec128 (to_elem y))) + (Seq.append (BV.logand_vec (to_vec64 x.[1]) (to_vec64 y.[1])) (BV.logand_vec (to_vec64 x.[0]) (to_vec64 y.[0]))) + +let logand_s_lemma x y = + assert (to_vec128 (to_elem x &. to_elem y) == BV.logand_vec (to_vec128 (to_elem x)) (to_vec128 (to_elem y))); + logand_vec_lemma x y; + assert (to_vec128 (to_elem x &. to_elem y) == Seq.append (BV.logand_vec (to_vec64 x.[1]) (to_vec64 y.[1])) (BV.logand_vec (to_vec64 x.[0]) (to_vec64 y.[0]))); + let lp = logand_s x y in + assert (to_vec64 lp.[0] == BV.logand_vec (to_vec64 x.[0]) (to_vec64 y.[0])); + assert (to_vec64 lp.[1] == BV.logand_vec (to_vec64 x.[1]) (to_vec64 y.[1])); + to_vec128_lemma lp diff --git a/code/gf128/Hacl.Spec.GF128.Lemmas.fsti b/code/gf128/Hacl.Spec.GF128.Lemmas.fsti new file mode 100644 index 0000000000..7877df184b --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.Lemmas.fsti @@ -0,0 +1,80 @@ +module Hacl.Spec.GF128.Lemmas + +open FStar.Mul +open Spec.GaloisField + +module S = Spec.GF128 + + +#set-options "--z3rlimit 30 --max_fuel 0 --max_ifuel 0" + +let elem = S.elem +let zero : elem = zero +let one : elem = one + +let ( +% ) (a b:elem) : elem = fadd #S.gf128 a b +let ( *% ) (a b:elem) : elem = fmul_be #S.gf128 a b + +val add_identity: a:elem -> Lemma (zero +% a == a) + +val mul_identity: a:elem -> Lemma (one *% a == a) + +val add_associativity: a:elem -> b:elem -> c:elem -> + Lemma (a +% b +% c == a +% (b +% c)) + +val add_commutativity: a:elem -> b:elem -> Lemma (a +% b == b +% a) + +val mul_associativity: a:elem -> b:elem -> c:elem -> + Lemma (a *% b *% c == a *% (b *% c)) + +val mul_commutativity: a:elem -> b:elem -> Lemma (a *% b == b *% a) + + + +val gf128_update_multi_mul_add_lemma_load_acc_aux: + a0:elem -> b0:elem -> b1:elem -> b2:elem -> b3:elem + -> r:elem -> + Lemma + ((a0 +% b0) *% (r *% (r *% (r *% r))) +% (zero +% b1) *% (r *% (r *% r)) +% + (zero +% b2) *% (r *% r) +% (zero +% b3) *% r == + ((((a0 +% b0) *% r +% b1) *% r +% b2) *% r +% b3) *% r) + + +val gf128_update_multi_mul_add_lemma_loop_aux: + a0:elem -> a1:elem -> a2:elem -> a3:elem + -> b0:elem -> b1:elem -> b2:elem -> b3:elem + -> r:elem -> + Lemma + ((a0 *% (r *% (r *% (r *% r))) +% b0) *% (r *% (r *% (r *% r))) +% + (a1 *% (r *% (r *% (r *% r))) +% b1) *% (r *% (r *% r)) +% + (a2 *% (r *% (r *% (r *% r))) +% b2) *% (r *% r) +% + (a3 *% (r *% (r *% (r *% r))) +% b3) *% r == + ((((a0 *% (r *% (r *% (r *% r))) +% + a1 *% (r *% (r *% r)) +% + a2 *% (r *% r) +% + a3 *% r +% b0) *% r +% b1) *% r +% b2) *% r +% b3) *% r) + + +open Lib.Sequence +open Lib.IntTypes + +let elem_s = lseq uint64 2 + +let to_elem (x:elem_s) : elem = + mk_int #U128 (v x.[0] + v x.[1] * pow2 64) + +let logxor_s (x:elem_s) (y:elem_s) : elem_s = + let r0 = x.[0] ^. y.[0] in + let r1 = x.[1] ^. y.[1] in + Lib.Sequence.create2 r0 r1 + +val logxor_s_lemma: x:elem_s -> y:elem_s -> Lemma + (to_elem (logxor_s x y) == to_elem x ^. to_elem y) + +let logand_s (x:elem_s) (y:elem_s) : elem_s = + let r0 = x.[0] &. y.[0] in + let r1 = x.[1] &. y.[1] in + Lib.Sequence.create2 r0 r1 + +val logand_s_lemma: x:elem_s -> y:elem_s -> Lemma + (to_elem (logand_s x y) == (to_elem x &. to_elem y)) diff --git a/code/gf128/Hacl.Spec.GF128.PolyLemmas.fst b/code/gf128/Hacl.Spec.GF128.PolyLemmas.fst new file mode 100644 index 0000000000..9dcd0f8801 --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.PolyLemmas.fst @@ -0,0 +1,308 @@ +module Hacl.Spec.GF128.PolyLemmas + +open FStar.Mul +open FStar.UInt +open FStar.Seq +open FStar.BitVector +open FStar.Math.Lemmas + +open Lib.Sequence +open Lib.IntTypes +open Lib.IntVector + +open Vale.Math.Poly2_s +open Vale.Math.Poly2 +open Vale.Math.Poly2.Bits_s +open Vale.Math.Poly2.Bits +open Vale.Math.Poly2.Lemmas + +module U = FStar.UInt +module T = Lib.IntTypes +module V = Lib.IntVector +module P = Vale.Math.Poly2_s + +let lemma_of_to_uint_128 a = + reveal_defs (); + lemma_reverse_define_all (); + lemma_equal a (of_uint 128 (to_uint 128 a)) + +let lemma_to_of_vec128 q = + lemma_create_index_vec_w1 q + +let lemma_of_to_vec128 a = + () + +let lemma_vec128_zero () = + lemma_bitwise_all (); + Vale.Arch.TypesNative.lemma_zero_nth 128; + lemma_equal P.zero (of_uint 128 0); + () + +let lemma_vec128_ones () = + lemma_bitwise_all (); + lemma_equal (ones 128) (of_uint 128 0xffffffffffffffffffffffffffffffff); + () + +let lemma_add128 a b = + lemma_bitwise_all (); + logxor_spec (mk_int #U128 #SEC (to_uint 128 a)) (mk_int #U128 #SEC (to_uint 128 b)); + assert (of_vec128 (to_vec128 a ^| to_vec128 b) == + of_uint 128 (U.logxor (to_uint 128 a) (to_uint 128 b))); //OBSERVE + lemma_equal (P.add a b) (of_vec128 (to_vec128 a ^| to_vec128 b)); + () + +let lemma_add_vec128 a b = + lemma_add128 (of_vec128 a) (of_vec128 b); + () + +let lemma_and128 a b = + lemma_bitwise_all (); + logand_spec (mk_int #U128 #SEC (to_uint 128 a)) (mk_int #U128 #SEC (to_uint 128 b)); + assert (of_vec128 (to_vec128 a &| to_vec128 b) == + of_uint 128 (U.logand (to_uint 128 a) (to_uint 128 b))); //OBSERVE + lemma_equal (poly_and a b) (of_vec128 (to_vec128 a &| to_vec128 b)); + () + +let lemma_and_vec128 a b = + lemma_and128 (of_vec128 a) (of_vec128 b); + () + +#push-options "--z3rlimit 20" +let lemma_vec128_double_shift a = + lemma_bitwise_all (); + assert (of_vec128 (to_vec128 a <<| 64ul) == + of_uint 128 (U.shift_left (to_uint 128 a) 64)); + lemma_equal (P.shift (mask a 64) 64) (of_vec128 (to_vec128 a <<| 64ul)); + lemma_mask_is_mod a 64; + lemma_shift_is_mul (P.mod a (monomial 64)) 64; + assert (of_vec128 (to_vec128 a >>| 64ul) == + of_uint 128 (U.shift_right (to_uint 128 a) 64)); + lemma_equal (P.shift a (-64)) (of_vec128 (to_vec128 a >>| 64ul)); + lemma_shift_is_div a 64; + () +#pop-options + +val lemma_eq_poly_uint_mod_64 (a:poly) (p:FStar.UInt.uint_t 128) : Lemma + (requires degree a <= 127 /\ a == of_uint 128 p) + (ensures mod a (monomial 64) == of_uint 128 ((to_uint 128 a) % pow2 64)) + +#push-options "--z3rlimit 20" +let lemma_eq_poly_uint_mod_64 a p = + let h = to_vec p in + let l = U.shift_left p 64 in + let l_h = to_vec l in + let r = U.shift_right (U.shift_left p 64) 64 in + let r_h = to_vec r in + slice_right_lemma #128 h 64; + eq_elim #bool #64 (slice #bool #128 l_h 0 64) (slice #bool #128 h 64 128); + eq_elim #bool #64 (slice #bool #128 r_h 0 64) (zero_vec #64); + eq_elim #bool #64 (slice #bool #128 r_h 64 128) (slice #bool #128 l_h 0 64); + from_vec_propriety #128 r_h 64; + assert (0 * pow2 64 == 0); // Speed up verifying process + assert (r == p % pow2 64); + lemma_bitwise_all (); + lemma_mask_is_mod a 64; + lemma_equal (P.mod a (monomial 64)) (of_uint 128 (p % pow2 64)); + () +#pop-options + +#push-options "--z3rlimit 50 --max_fuel 1" +val lemma_eq_poly_uint_shift_right (a:poly) (p:U.uint_t 128) (s:nat{s <= 64}) : Lemma + (requires degree a <= 127 /\ a == of_uint 128 p) + (ensures P.shift a (-s) == of_uint 128 (p / pow2 s)) + +let lemma_eq_poly_uint_shift_right a p s = + reveal_defs (); + lemma_bitwise_all (); + lemma_equal (P.shift a (-s)) (of_uint 128 (U.shift_right p s)); + () +#pop-options + +val lemma_eq_poly_uint_shift_left (a:poly) (p:U.uint_t 128) (s:nat{s <= 64}) : Lemma + (requires degree a <= 63 /\ a == of_uint 128 p) + (ensures P.shift a s == of_uint 128 ((p * pow2 s) % pow2 128)) + +let lemma_eq_poly_uint_shift_left a p s = + lemma_bitwise_all (); + lemma_equal (P.shift a s) (of_uint 128 (U.shift_left p s)); + () + +val lemma_eq_poly_uint_add (a b:poly) (p1 p2:U.uint_t 128) : Lemma + (requires degree a <= 127 /\ degree b <= 127 /\ + a == of_uint 128 p1 /\ b == of_uint 128 p2) + (ensures P.add a b == of_uint 128 (U.logxor p1 p2)) + +let lemma_eq_poly_uint_add a b p1 p2 = + lemma_bitwise_all (); + lemma_equal (P.add a b) (of_uint 128 (U.logxor p1 p2)); + () + +val to_vec_mod_pow2: #n:size_nat -> a:U.uint_t n -> m:pos -> i:nat{n - m <= i /\ i < n} -> + Lemma (requires (a % pow2 m == 0)) + (ensures (index #bool #n (to_vec a) i == false)) + [SMTPat (index #bool #n (to_vec #n a) i); SMTPat (pow2 m)] +let rec to_vec_mod_pow2 #n a m i = + if i = n - 1 then + begin + lemma_index_app2 (to_vec #(n - 1) (a / 2)) (Seq.create 1 (a % 2 = 1)) i; + mod_mult_exact a 2 (pow2 (m - 1)) + end + else + begin + lemma_index_app1 (to_vec #(n - 1) (a / 2)) (Seq.create 1 (a % 2 = 1)) i; + assert (index #bool #n (to_vec a) i == index #bool #(n - 1) (to_vec #(n - 1) (a / 2)) i); + mod_pow2_div2 a m; + to_vec_mod_pow2 #(n - 1) (a / 2) (m - 1) i + end + +val to_vec_lt_pow2: #n:size_nat -> a:U.uint_t n -> m:nat -> i:nat{i < n - m} -> + Lemma (requires (a < pow2 m)) + (ensures (index #bool #n (to_vec a) i == false)) + [SMTPat (index #bool #n (to_vec #n a) i); SMTPat (pow2 m)] +let rec to_vec_lt_pow2 #n a m i = + if n = 0 then () + else + if m = 0 then + assert (a == U.zero n) + else + begin + lemma_index_app1 (to_vec #(n - 1) (a / 2)) (Seq.create 1 (a % 2 = 1)) i; + assert (index #bool #n (to_vec a) i == index #bool #(n - 1) (to_vec #(n - 1) (a / 2)) i); + to_vec_lt_pow2 #(n - 1) (a / 2) (m - 1) i + end + +val logxor_disjoint: #n:size_nat -> a:U.uint_t n -> b:U.uint_t n -> m:pos{m < n} -> + Lemma (requires (a % pow2 m == 0 /\ b < pow2 m)) + (ensures (U.logxor #n a b == a + b)) + +#push-options "--z3rlimit 20 --max_fuel 0" +let logxor_disjoint #n a b m = + assert (forall (i:nat{n - m <= i /\ i < n}).{:pattern (index #bool #n (to_vec a) i)} + index #bool #n (to_vec a) i == false); + assert (forall (i:nat{i < n - m}).{:pattern (index #bool #n (to_vec b) i)} + index #bool #n (to_vec b) i == false); + Seq.lemma_split (logxor_vec (to_vec a) (to_vec b)) (n - m); + assert (forall (i:nat). (n - m <= i /\ i < n) ==> + index #bool #n (to_vec a) i == Seq.index (Lib.Sequence.to_seq #bool #n (to_vec a)) i); + assert (forall (i:nat). (i < n - m) ==> + index #bool #n (to_vec b) i == Seq.index (Lib.Sequence.to_seq #bool #n (to_vec b)) i); + Seq.lemma_eq_intro + (logxor_vec (to_vec a) (to_vec b)) + (append (slice #bool #n (to_vec a) 0 (n - m)) (slice #bool #n (to_vec b) (n - m) n)); + append_lemma #(n - m) #m (slice #bool #n (to_vec a) 0 (n - m)) (slice #bool #n (to_vec b) (n - m) n); + slice_left_lemma #n (to_vec a) (n - m); + div_exact_r a (pow2 m); + assert (from_vec #(n - m) (slice #bool #n (to_vec a) 0 (n - m)) * pow2 m == a); + slice_right_lemma #n (to_vec b) m; + small_modulo_lemma_1 b (pow2 m); + assert (from_vec #m (slice #bool #n (to_vec b) (n - m) n) == b) +#pop-options + +#push-options "--z3rlimit 150 --max_fuel 1" +let lemma_vec128_shift_left_64 p s = + vec_cast_uint128 p; + vec_cast_2_uint64 (vec_shift_left (V.cast U64 2 p) s); + eq_elim (map (shift_left_i s) (vec_v (V.cast U64 2 p))) (create2 + (shift_left_i s (to_u64 (index (vec_v p) 0))) + (shift_left_i s (to_u64 (T.shift_right (index (vec_v p) 0) 64ul))) + ); + reveal_vec_v_1 p; + reveal_vec_v_1 (V.cast U128 1 (vec_shift_left (V.cast U64 2 p) s)); + + let n = monomial 64 in + let a = of_vec128 p in + let a0 = P.mod (P.shift (P.mod a n) (v s)) n in + let a1 = P.mod (P.shift (P.div a n) (v s)) n in + let p0 = (((to_uint 128 a) % pow2 64) * pow2 (v s)) % pow2 64 in + let p1 = ((((to_uint 128 a) / pow2 64) % pow2 64) * pow2 (v s)) % pow2 64 in + let t = T.add + (T.shift_left (to_u128 (shift_left_i s (to_u64 (T.shift_right #U128 #SEC p 64ul)))) 64ul) + (to_u128 (shift_left_i s (to_u64 #U128 #SEC p))) in + assert (t == V.cast U128 1 (vec_shift_left (V.cast U64 2 p) s)); + reveal_vec_v_1 #U128 t; + assert (of_vec128 t == of_uint 128 (p1 * pow2 64 + p0)); + + lemma_eq_poly_uint_shift_right a (to_uint 128 a) 64; + lemma_div_lt (to_uint 128 a) 128 64; + small_mod ((to_uint 128 a) / pow2 64) (pow2 64); + lemma_shift_is_div a 64; + lemma_eq_poly_uint_shift_left (P.div a n) (((to_uint 128 a) / pow2 64) % pow2 64) (v s); + lemma_eq_poly_uint_mod_64 (P.shift (P.div a n) (v s)) + (((((to_uint 128 a) / pow2 64) % pow2 64) * pow2 (v s)) % pow2 128); + pow2_modulo_modulo_lemma_1 + ((((to_uint 128 a) / pow2 64) % pow2 64) * pow2 (v s)) 64 128; + lemma_eq_poly_uint_shift_left (P.mod (P.shift (P.div a n) (v s)) n) + (((((to_uint 128 a) / pow2 64) % pow2 64) * pow2 (v s)) % pow2 64) 64; + pow2_multiplication_modulo_lemma_2 + (((((to_uint 128 a) / pow2 64) % pow2 64) * pow2 (v s)) % pow2 64) 128 64; + pow2_modulo_modulo_lemma_1 ((((to_uint 128 a) / pow2 64) % pow2 64) * pow2 (v s)) 64 64; + assert (P.shift a1 64 == of_uint 128 (p1 * pow2 64)); //OBSERVE + + lemma_eq_poly_uint_mod_64 a (to_uint 128 a); + lemma_eq_poly_uint_shift_left (P.mod a n) + ((to_uint 128 a) % pow2 64) (v s); + lemma_eq_poly_uint_mod_64 (P.shift (P.mod a n) (v s)) + ((((to_uint 128 a) % pow2 64) * pow2 (v s)) % pow2 128); + pow2_modulo_modulo_lemma_1 + (((to_uint 128 a) % pow2 64) * pow2 (v s)) 64 128; + assert (a0 == of_uint 128 p0); //OBSERVE + + lemma_eq_poly_uint_add (P.shift a1 64) a0 (p1 * pow2 64) p0; + lemma_mod_lt (((to_uint 128 a) % pow2 64) * pow2 (v s)) (pow2 64); + multiple_modulo_lemma (((((to_uint 128 a) / pow2 64) % pow2 64) * pow2 (v s)) % pow2 64) (pow2 64); + logxor_disjoint #128 (p1 * pow2 64) p0 64; + assert (P.add (P.shift a1 64) a0 == of_uint 128 (p1 * pow2 64 + p0)); + () +#pop-options + +#push-options "--z3rlimit 150 --max_fuel 1" +let lemma_vec128_shift_right_64 p s = + vec_cast_uint128 p; + vec_cast_2_uint64 (vec_shift_right (V.cast U64 2 p) s); + eq_elim (map (shift_right_i s) (vec_v (V.cast U64 2 p))) (create2 + (shift_right_i s (to_u64 (index (vec_v p) 0))) + (shift_right_i s (to_u64 (T.shift_right (index (vec_v p) 0) 64ul))) + ); + reveal_vec_v_1 p; + reveal_vec_v_1 (V.cast U128 1 (vec_shift_right (V.cast U64 2 p) s)); + + let n = monomial 64 in + let a = of_vec128 p in + let a0 = P.shift (P.mod a n) (-(v s)) in + let a1 = P.shift (P.div a n) (-(v s)) in + let p0 = ((to_uint 128 a) % pow2 64) / pow2 (v s) in + let p1 = (((to_uint 128 a) / pow2 64) % pow2 64) / pow2 (v s) in + let t = T.add + (T.shift_left (to_u128 (shift_right_i s (to_u64 (T.shift_right #U128 #SEC p 64ul)))) 64ul) + (to_u128 (shift_right_i s (to_u64 #U128 #SEC p))) in + assert (t == V.cast U128 1 (vec_shift_right (V.cast U64 2 p) s)); + reveal_vec_v_1 #U128 t; + assert (of_vec128 t == of_uint 128 (p1 * pow2 64 + p0)); + + lemma_eq_poly_uint_shift_right a (to_uint 128 a) 64; + lemma_div_lt (to_uint 128 a) 128 64; + small_mod ((to_uint 128 a) / pow2 64) (pow2 64); + lemma_shift_is_div a 64; + lemma_eq_poly_uint_shift_right (P.div a n) (((to_uint 128 a) / pow2 64) % pow2 64) (v s); + lemma_eq_poly_uint_shift_left (P.shift (P.div a n) (-(v s))) + ((((to_uint 128 a) / pow2 64) % pow2 64) / pow2 (v s)) 64; + lemma_mod_lt ((to_uint 128 a) / pow2 64) (pow2 64); + lemma_div_lt (((to_uint 128 a) / pow2 64) % pow2 64) 64 (v s); + small_mod ((((to_uint 128 a) / pow2 64) % pow2 64) / pow2 (v s)) (pow2 (64 - (v s))); + pow2_multiplication_modulo_lemma_2 + ((((to_uint 128 a) / pow2 64) % pow2 64) / pow2 (v s)) 128 64; + pow2_modulo_modulo_lemma_2 ((((to_uint 128 a) / pow2 64) % pow2 64) / pow2 (v s)) 64 (64 - (v s)); + assert (P.shift a1 64 == of_uint 128 (p1 * pow2 64)); //OBSERVE + + lemma_eq_poly_uint_mod_64 a (to_uint 128 a); + lemma_eq_poly_uint_shift_right (P.mod a n) + ((to_uint 128 a) % pow2 64) (v s); + assert (a0 == of_uint 128 p0); //OBSERVE + + lemma_eq_poly_uint_add (P.shift a1 64) a0 (p1 * pow2 64) p0; + lemma_mod_lt (((to_uint 128 a) % pow2 64) / pow2 (v s)) (pow2 64); + multiple_modulo_lemma ((((to_uint 128 a) / pow2 64) % pow2 64) / pow2 (v s)) (pow2 64); + logxor_disjoint #128 (p1 * pow2 64) p0 64; + assert (P.add (P.shift a1 64) a0 == of_uint 128 (p1 * pow2 64 + p0)); + () +#pop-options diff --git a/code/gf128/Hacl.Spec.GF128.PolyLemmas.fsti b/code/gf128/Hacl.Spec.GF128.PolyLemmas.fsti new file mode 100644 index 0000000000..b7f7c8a80e --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.PolyLemmas.fsti @@ -0,0 +1,76 @@ +module Hacl.Spec.GF128.PolyLemmas + +open FStar.Mul + +open Lib.IntTypes +open Lib.IntVector + +open Vale.Math.Poly2_s +open Vale.Math.Poly2 +open Vale.Math.Poly2.Bits_s +open Vale.Math.Poly2.Lemmas + +open Hacl.Spec.GF128.Poly_s + +val lemma_of_to_uint_128 (a:poly) : Lemma + (requires degree a < 128) + (ensures of_uint 128 (to_uint 128 a) == a) + [SMTPat (of_uint 128 (to_uint 128 a))] + +val lemma_to_of_vec128 (q:vec128) : Lemma + (ensures to_vec128 (of_vec128 q) == q) + [SMTPat (to_vec128 (of_vec128 q))] + +val lemma_of_to_vec128 (a:poly) : Lemma + (requires degree a < 128) + (ensures of_vec128 (to_vec128 a) == a) + [SMTPat (of_vec128 (to_vec128 a))] + +val lemma_vec128_zero (_:unit) : Lemma + (of_vec128 (uint_to_vec128 0) == zero /\ uint_to_vec128 0 == to_vec128 zero) + +val lemma_vec128_ones (_:unit) : Lemma + (let q = (uint_to_vec128 0xffffffffffffffffffffffffffffffff) in + of_vec128 q == ones 128 /\ q == to_vec128 (ones 128)) + +val lemma_add128 (a b:poly) : Lemma + (requires degree a <= 127 /\ degree b <= 127) + (ensures to_vec128 (add a b) == to_vec128 a ^| to_vec128 b) + +val lemma_add_vec128 (a b:vec128) : Lemma + (ensures add (of_vec128 a) (of_vec128 b) == of_vec128 (a ^| b)) + +val lemma_and128 (a b:poly) : Lemma + (requires degree a <= 127 /\ degree b <= 127) + (ensures to_vec128 (poly_and a b) == (to_vec128 a &| to_vec128 b)) + +val lemma_and_vec128 (a b:vec128) : Lemma + (ensures poly_and (of_vec128 a) (of_vec128 b) == of_vec128 (a &| b)) + +val lemma_vec128_double_shift (a:poly) : Lemma + (requires degree a <= 127) + (ensures ( + let q = to_vec128 a in + q <<| 64ul == to_vec128 (mul (mod a (monomial 64)) (monomial 64)) /\ + q >>| 64ul == to_vec128 (div a (monomial 64)) + )) + +val lemma_vec128_shift_left_64 (p:vec128) (s:shiftval U64) : Lemma + (ensures ( + let n = monomial 64 in + let a = of_vec128 p in + let a0 = mod (shift (mod a n) (v s)) n in + let a1 = mod (shift (div a n) (v s)) n in + cast U128 1 (vec_shift_left (cast U64 2 p) s) == + to_vec128 (add (shift a1 64) a0) + )) + +val lemma_vec128_shift_right_64 (p:vec128) (s:shiftval U64) : Lemma + (ensures ( + let n = monomial 64 in + let a = of_vec128 p in + let a0 = shift (mod a n) (-(v s)) in + let a1 = shift (div a n) (-(v s)) in + cast U128 1 (vec_shift_right (cast U64 2 p) s) == + to_vec128 (add (shift a1 64) a0) + )) diff --git a/code/gf128/Hacl.Spec.GF128.PolyLemmas_helpers.fst b/code/gf128/Hacl.Spec.GF128.PolyLemmas_helpers.fst new file mode 100644 index 0000000000..35b7d4085c --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.PolyLemmas_helpers.fst @@ -0,0 +1,666 @@ +module Hacl.Spec.GF128.PolyLemmas_helpers + +open FStar.Mul + +open Vale.Math.Poly2.Bits + +let lemma_mul_h_shift_right a h = + lemma_bitwise_all (); + calc (==) { + a *. h; + == {lemma_equal h (monomial 63 +. monomial 62 +. monomial 57)} + a *. (monomial 63 +. monomial 62 +. monomial 57); + == {lemma_mul_distribute_right a (monomial 63 +. monomial 62) (monomial 57)} + (a *. (monomial 63 +. monomial 62)) +. (a *. monomial 57); + == {lemma_mul_distribute_right a (monomial 63) (monomial 62)} + (a *. monomial 63) +. (a *. monomial 62) +. (a *. monomial 57); + == { + lemma_shift_is_mul a 63; + lemma_shift_is_mul a 62; + lemma_shift_is_mul a 57 + } + (shift a 63) +. (shift a 62) +. (shift a 57); + } + +let lemma_mul_h_shift_right_mod a h = + calc (==) { + (a *. h) %. monomial 64; + == {lemma_mul_h_shift_right a h} + (shift a 63 +. shift a 62 +. shift a 57) %. monomial 64; + == {lemma_mod_distribute (shift a 63 +. shift a 62) (shift a 57) (monomial 64)} + (shift a 63 +. shift a 62) %. monomial 64 +. shift a 57 %. monomial 64; + == {lemma_mod_distribute (shift a 63) (shift a 62) (monomial 64)} + shift a 63 %. monomial 64 +. shift a 62 %. monomial 64 +. shift a 57 %. monomial 64; + } + +let lemma_mul_h_shift_left a h = + lemma_bitwise_all (); + lemma_shift_is_div (a *. h) 64; + lemma_mul_h_shift_right a h; + lemma_equal (shift (shift a 63 +. shift a 62 +. shift a 57) (-64)) + (shift (shift a 63) (-64) +. shift (shift a 62) (-64) +. shift (shift a 57) (-64)); + lemma_shift_shift a 63 (-64); + lemma_shift_shift a 62 (-64); + lemma_shift_shift a 57 (-64) + +let lemma_mul_h a h = + lemma_div_mod (a *. h) (monomial 64); + lemma_mul_h_shift_right a h; + lemma_mul_h_shift_left a h + +val lemma_h_2 (h:poly) : Lemma + (requires h == reverse gf128_low_shift 63) + (ensures h *. h == monomial 126 +. monomial 124 +. monomial 114) + +let lemma_h_2 h = + lemma_bitwise_all (); + calc (==) { + h *. h; + == {lemma_equal h (monomial 63 +. monomial 62 +. monomial 57)} + (monomial 63 +. monomial 62 +. monomial 57) *. h; + == {lemma_mul_distribute_left (monomial 63 +. monomial 62) (monomial 57) h} + (monomial 63 +. monomial 62) *. h +. (monomial 57) *. h; + == {lemma_mul_distribute_left (monomial 63) (monomial 62) h} + (monomial 63) *. h +. (monomial 62) *. h +. (monomial 57) *. h; + == {lemma_equal h (monomial 63 +. monomial 62 +. monomial 57)} + (monomial 63) *. (monomial 63 +. monomial 62 +. monomial 57) +. + (monomial 62) *. (monomial 63 +. monomial 62 +. monomial 57) +. + (monomial 57) *. (monomial 63 +. monomial 62 +. monomial 57); + == { + lemma_mul_distribute_right (monomial 63) (monomial 63 +. monomial 62) (monomial 57); + lemma_mul_distribute_right (monomial 62) (monomial 63 +. monomial 62) (monomial 57); + lemma_mul_distribute_right (monomial 57) (monomial 63 +. monomial 62) (monomial 57) + } + (monomial 63 *. (monomial 63 +. monomial 62) +. monomial 63 *. monomial 57) +. + (monomial 62 *. (monomial 63 +. monomial 62) +. monomial 62 *. monomial 57) +. + (monomial 57 *. (monomial 63 +. monomial 62) +. monomial 57 *. monomial 57); + == { + lemma_mul_distribute_right (monomial 63) (monomial 63) (monomial 62); + lemma_mul_distribute_right (monomial 62) (monomial 63) (monomial 62); + lemma_mul_distribute_right (monomial 57) (monomial 63) (monomial 62) + } + (monomial 63 *. monomial 63 +. monomial 63 *. monomial 62 +. monomial 63 *. monomial 57) +. + (monomial 62 *. monomial 63 +. monomial 62 *. monomial 62 +. monomial 62 *. monomial 57) +. + (monomial 57 *. monomial 63 +. monomial 57 *. monomial 62 +. monomial 57 *. monomial 57); + == { + lemma_mul_monomials 63 63; lemma_mul_monomials 63 62; lemma_mul_monomials 63 57; + lemma_mul_monomials 62 63; lemma_mul_monomials 62 62; lemma_mul_monomials 62 57; + lemma_mul_monomials 57 63; lemma_mul_monomials 57 62; lemma_mul_monomials 57 57 + } + (monomial 126 +. monomial 125 +. monomial 120) +. + (monomial 125 +. monomial 124 +. monomial 119) +. + (monomial 120 +. monomial 119 +. monomial 114); + == { + lemma_equal ((monomial 126 +. monomial 125 +. monomial 120) +. + (monomial 125 +. monomial 124 +. monomial 119) +. + (monomial 120 +. monomial 119 +. monomial 114)) (monomial 126 +. monomial 124 +. monomial 114) + } + monomial 126 +. monomial 124 +. monomial 114; + } + +let lemma_mul_h_2_zero a h = + reveal_defs (); + calc (==) { + (((a *. h) %. monomial 64) *. h) %. monomial 64; + == {lemma_mod_mul_mod (a *. h) (monomial 64) h} + ((a *. h) *. h) %. monomial 64; + == {lemma_mul_associate a h h} + (a *. (h *. h)) %. monomial 64; + == {lemma_mod_mul_mod_right a (h *. h) (monomial 64)} + (a *. ((h *. h) %. monomial 64)) %. monomial 64; + == {lemma_h_2 h} + (a *. ((monomial 126 +. monomial 124 +. monomial 114) %. monomial 64)) %. monomial 64; + == {lemma_mod_distribute (monomial 126 +. monomial 124) (monomial 114) (monomial 64)} + (a *. ((monomial 126 +. monomial 124) %. monomial 64 +. monomial 114 %. monomial 64)) %. monomial 64; + == {lemma_mod_distribute (monomial 126) (monomial 124) (monomial 64)} + (a *. (monomial 126 %. monomial 64 +. monomial 124 %. monomial 64 +. monomial 114 %. monomial 64)) %. monomial 64; + == { + lemma_mul_monomials 62 64; + lemma_add_zero (monomial 126); + lemma_div_mod_unique ((monomial 126) +. zero) (monomial 64) (monomial 62) zero; + // --> monomial 126 %. monomial 64 == zero + lemma_mul_monomials 60 64; + lemma_add_zero (monomial 124); + lemma_div_mod_unique ((monomial 124) +. zero) (monomial 64) (monomial 60) zero; + // --> monomial 126 %. monomial 64 == zero + lemma_mul_monomials 50 64; + lemma_add_zero (monomial 114); + lemma_div_mod_unique ((monomial 114) +. zero) (monomial 64) (monomial 50) zero + // --> monomial 126 %. monomial 64 == zero + } + (a *. (zero +. zero +. zero)) %. monomial 64; + == {lemma_add_zero zero} + (a *. zero) %. monomial 64; + == {lemma_mul_zero a} + zero; + } + +let lemma_shift_left_cancel a a0 a1 = + reveal_defs (); + calc (==) { + shift (((shift a1 64) +. a0) %. monomial 64) 64; + == {lemma_shift_is_mul a1 64} + shift ((a1 *. monomial 64 +. a0) %. monomial 64) 64; + == {lemma_mod_distribute (a1 *. monomial 64) a0 (monomial 64)} + shift ((a1 *. monomial 64) %. monomial 64 +. a0 %. monomial 64) 64; + == {lemma_mod_small a0 (monomial 64)} + shift ((a1 *. monomial 64) %. monomial 64 +. a0) 64; + == {lemma_mod_mul_mod_right a1 (monomial 64) (monomial 64)} + shift ((a1 *. (mod (monomial 64) (monomial 64))) %. monomial 64 +. a0) 64; + == {lemma_mod_cancel (monomial 64)} + shift ((a1 *. zero) %. monomial 64 +. a0) 64; + == {lemma_mul_zero a1; lemma_add_zero_left a0} + shift a0 64; + } + +let lemma_shift_right_cancel a a0 a1 = + lemma_shift_is_div a 64; + lemma_shift_is_mul a1 64; + lemma_div_mod_unique a (monomial 64) a1 a0 + +let lemma_add_left_shift a0 a1 b = + lemma_bitwise_all (); + calc (==) { + (shift a1 64) +. a0 +. (shift b 64); + == {lemma_add_commute (shift a1 64) a0} + a0 +. (shift a1 64) +. (shift b 64); + == {lemma_add_associate a0 (shift a1 64) (shift b 64)} + a0 +. ((shift a1 64) +. (shift b 64)); + == {lemma_shift_is_mul a1 64; lemma_shift_is_mul b 64} + a0 +. (a1 *. monomial 64 +. b *. monomial 64); + == {lemma_mul_distribute_left a1 b (monomial 64)} + a0 +. (a1 +. b) *. monomial 64; + == {lemma_shift_is_mul (a1 +. b) 64} + a0 +. (shift (a1 +. b) 64); + == {lemma_add_commute a0 (shift (a1 +. b) 64)} + (shift (a1 +. b) 64) +. a0; + } + +let lemma_add_left_shift_double a0 a1 b0 b1 = + lemma_bitwise_all (); + calc (==) { + (shift a1 64) +. a0 +. (shift b1 64 +. b0); + == {lemma_add_associate ((shift a1 64) +. a0) (shift b1 64) b0} + ((shift a1 64) +. a0 +. shift b1 64) +. b0; + == {lemma_add_commute (shift a1 64) a0} + (a0 +. (shift a1 64) +. shift b1 64) +. b0; + == {lemma_add_associate a0 (shift a1 64) (shift b1 64)} + (a0 +. (shift a1 64 +. shift b1 64)) +. b0; + == {lemma_shift_is_mul a1 64; lemma_shift_is_mul b1 64} + (a0 +. (a1 *. monomial 64 +. b1 *. monomial 64)) +. b0; + == {lemma_mul_distribute_left a1 b1 (monomial 64)} + (a0 +. (a1 +. b1) *. monomial 64) +. b0; + == {lemma_shift_is_mul (a1 +. b1) 64} + (a0 +. (shift (a1 +. b1) 64)) +. b0; + == {lemma_add_commute a0 (shift (a1 +. b1) 64)} + ((shift (a1 +. b1) 64) +. a0) +. b0; + == {lemma_add_associate (shift (a1 +. b1) 64) a0 b0} + (shift (a1 +. b1) 64) +. (a0 +. b0); + } + +val lemma_mul_128_x (a:poly) : Lemma + (requires degree a <= 127 ) + (ensures ( + let n = monomial 64 in + let nn = monomial 128 in + let l0_r63 = shift (a %. n) (-63) in + let l1_r63 = shift (a /. n) (-63) in + let l0_l1 = (shift (a %. n) 1) %. n in + let l1_l1 = (shift (a /. n) 1) %. n in + a *. monomial 1 == l1_r63 *. nn +. (l1_l1 +. l0_r63) *. n +. l0_l1 + )) + +let lemma_mul_128_x a = + let n = monomial 64 in + let nn = monomial 128 in + let l0_r63 = shift (a %. n) (-63) in + let l1_r63 = shift (a /. n) (-63) in + let l0_l1 = (shift (a %. n) 1) %. n in + let l1_l1 = (shift (a /. n) 1) %. n in + calc (==) { + a *. monomial 1; + == {lemma_div_mod a n} + ((a /. n) *. n +. (a %. n)) *. monomial 1; + == {lemma_mul_distribute_left ((a /. n) *. n) (a %. n) (monomial 1)} + ((a /. n) *. n) *. monomial 1 +. (a %. n) *. monomial 1; + == {lemma_shift_is_mul (a %. n) 1} + ((a /. n) *. n) *. monomial 1 +. (shift (a %. n) 1); + == {lemma_div_mod (shift (a %. n) 1) n} + ((a /. n) *. n) *. monomial 1 +. + (((shift (a %. n) 1) /. n) *. n +. ((shift (a %. n) 1) %. n)); + == {lemma_shift_is_div (shift (a %. n) 1) 64} + ((a /. n) *. n) *. monomial 1 +. + ((shift (shift (a %. n) 1) (-64)) *. n +. ((shift (a %. n) 1) %. n)); + == {lemma_shift_shift (a %. n) 1 (-64)} + ((a /. n) *. n) *. monomial 1 +. (l0_r63 *. n +. l0_l1); + == { + lemma_mul_commute (a /. n) n; + lemma_mul_associate n (a /. n) (monomial 1); + lemma_mul_commute n ((a /. n) *. monomial 1) + } + ((a /. n) *. monomial 1) *. n +. (l0_r63 *. n +. l0_l1); + == {lemma_shift_is_mul (a /. n) 1} + (shift (a /. n) 1) *. n +. (l0_r63 *. n +. l0_l1); + == {lemma_div_mod (shift (a /. n) 1) n} + (((shift (a /. n) 1) /. n) *. n +. ((shift (a /. n) 1) %. n)) *. n +. + (l0_r63 *. n +. l0_l1); + == {lemma_mul_distribute_left (((shift (a /. n) 1) /. n) *. n) ((shift (a /. n) 1) %. n) n} + (((shift (a /. n) 1) /. n) *. n *. n +. ((shift (a /. n) 1) %. n) *. n) +. + (l0_r63 *. n +. l0_l1); + == {lemma_shift_is_div (shift (a /. n) 1) 64} + ((shift (shift (a /. n) 1) (-64)) *. n *. n +. ((shift (a /. n) 1) %. n) *. n) +. + (l0_r63 *. n +. l0_l1); + == {lemma_shift_shift (a /. n) 1 (-64)} + ((shift (a /. n) (-63)) *. n *. n +. ((shift (a /. n) 1) %. n) *. n) +. + (l0_r63 *. n +. l0_l1); + == {lemma_mul_associate (shift (a /. n) (-63)) n n; lemma_mul_monomials 64 64} + l1_r63 *. nn +. l1_l1 *. n +. (l0_r63 *. n +. l0_l1); + == {lemma_add_associate (l1_r63 *. nn +. l1_l1 *. n) (l0_r63 *. n) l0_l1} + (l1_r63 *. nn +. l1_l1 *. n +. l0_r63 *. n) +. l0_l1; + == {lemma_add_associate (l1_r63 *. nn) (l1_l1 *. n) (l0_r63 *. n)} + l1_r63 *. nn +. (l1_l1 *. n +. l0_r63 *. n) +. l0_l1; + == {lemma_mul_distribute_left l1_l1 l0_r63 n} + l1_r63 *. nn +. (l1_l1 +. l0_r63) *. n +. l0_l1; + } + +let lemma_mul_x hi lo = + let n = monomial 64 in + let nn = monomial 128 in + let l0_r63 = shift (lo %. n) (-63) in + let l1_r63 = shift (lo /. n) (-63) in + let l0_l1 = (shift (lo %. n) 1) %. n in + let l1_l1 = (shift (lo /. n) 1) %. n in + let h0_r63 = shift (hi %. n) (-63) in + let h1_r63 = shift (hi /. n) (-63) in + let h0_l1 = (shift (hi %. n) 1) %. n in + let h1_l1 = (shift (hi /. n) 1) %. n in + calc (==) { + shift (hi *. nn +. lo) 1; + == {lemma_shift_is_mul (hi *. nn +. lo) 1} + (hi *. nn +. lo) *. monomial 1; + == {lemma_mul_distribute_left (hi *. nn) lo (monomial 1)} + hi *. nn *. monomial 1 +. lo *. monomial 1; + == { + lemma_mul_commute hi nn; + lemma_mul_associate nn hi (monomial 1); + lemma_mul_commute nn (hi *. monomial 1) + } + hi *. monomial 1 *. nn +. lo *. monomial 1; + == {lemma_mul_128_x hi; lemma_mul_128_x lo} + (h1_r63 *. nn +. (h1_l1 +. h0_r63) *. n +. h0_l1) *. nn +. + (l1_r63 *. nn +. (l1_l1 +. l0_r63) *. n +. l0_l1); + == { + lemma_shift_is_div hi 64; + lemma_shift_shift hi (-64) (-63); + lemma_shift_degree hi (-127); + lemma_degree_negative h1_r63 + } + (zero *. nn +. (h1_l1 +. h0_r63) *. n +. h0_l1) *. nn +. + (l1_r63 *. nn +. (l1_l1 +. l0_r63) *. n +. l0_l1); + == { + lemma_mul_commute zero nn; + lemma_mul_zero nn; + lemma_add_zero_left ((h1_l1 +. h0_r63) *. n) + } + ((h1_l1 +. h0_r63) *. n +. h0_l1) *. nn +. + (l1_r63 *. nn +. (l1_l1 +. l0_r63) *. n +. l0_l1); + == {lemma_add_associate (l1_r63 *. nn) ((l1_l1 +. l0_r63) *. n) l0_l1} + ((h1_l1 +. h0_r63) *. n +. h0_l1) *. nn +. + (l1_r63 *. nn +. ((l1_l1 +. l0_r63) *. n +. l0_l1)); + == { + lemma_add_associate (((h1_l1 +. h0_r63) *. n +. h0_l1) *. nn) + (l1_r63 *. nn) ((l1_l1 +. l0_r63) *. n +. l0_l1) + } + (((h1_l1 +. h0_r63) *. n +. h0_l1) *. nn +. l1_r63 *. nn) +. + ((l1_l1 +. l0_r63) *. n +. l0_l1); + == {lemma_mul_distribute_left ((h1_l1 +. h0_r63) *. n +. h0_l1) l1_r63 nn} + ((((h1_l1 +. h0_r63) *. n +. h0_l1) +. l1_r63) *. nn) +. + ((l1_l1 +. l0_r63) *. n +. l0_l1); + == {lemma_add_associate ((h1_l1 +. h0_r63) *. n) h0_l1 l1_r63} + ((h1_l1 +. h0_r63) *. n +. (h0_l1 +. l1_r63)) *. nn +. + ((l1_l1 +. l0_r63) *. n +. l0_l1); + == { + lemma_shift_is_mul (h1_l1 +. h0_r63) 64; + lemma_shift_is_mul (l1_l1 +. l0_r63) 64 + } + (shift (h1_l1 +. h0_r63) 64 +. (h0_l1 +. l1_r63)) *. nn +. + (shift (l1_l1 +. l0_r63) 64 +. l0_l1); + } + +let lemma_reduce_helper a h = + let n = monomial 64 in + let y_10c = swap a 64 +. (mask a 64) *. h in + calc (==) { + swap a 64 +. (mask a 64) *. h; + == {lemma_mask_is_mod a 64; lemma_shift_is_div a 64} + (shift (a %. n) 64 +. a /. n) +. (a %. n) *. h; + == {lemma_div_mod ((a %. n) *. h) n} + (shift (a %. n) 64 +. a /. n) +. + ((((a %. n) *. h) /. n) *. n +. ((a %. n) *. h) %. n); + == {lemma_shift_is_mul (((a %. n) *. h) /. n) 64} + (shift (a %. n) 64 +. a /. n) +. + (shift (((a %. n) *. h) /. n) 64 +. ((a %. n) *. h) %. n); + == {lemma_add_left_shift_double (a /. n) (a %. n) + (((a %. n) *. h) %. n) (((a %. n) *. h) /. n)} + shift (a %. n +. ((a %. n) *. h) /. n) 64 +. + (a /. n +. ((a %. n) *. h) %. n); + }; + calc (==) { + swap y_10c 64; + == {lemma_mask_is_mod y_10c 64; lemma_shift_is_div y_10c 64} + shift (y_10c %. n) 64 +. y_10c /. n; + == { + lemma_shift_is_mul (a %. n +. ((a %. n) *. h) /. n) 64; + lemma_div_mod_unique y_10c n (a %. n +. ((a %. n) *. h) /. n) + (a /. n +. ((a %. n) *. h) %. n) + } + shift (a /. n +. ((a %. n) *. h) %. n) 64 +. + (a %. n +. ((a %. n) *. h) /. n); + }; + calc (==) { + mask y_10c 64 *. h; + == {lemma_mask_is_mod y_10c 64} + (y_10c %. n) *. h; + == { + lemma_shift_is_mul (a %. n +. ((a %. n) *. h) /. n) 64; + lemma_div_mod_unique y_10c n (a %. n +. ((a %. n) *. h) /. n) + (a /. n +. ((a %. n) *. h) %. n) + } + (a /. n +. ((a %. n) *. h) %. n) *. h; + == { + lemma_div_mod ((a /. n +. ((a %. n) *. h) %. n) *. h) n; + lemma_shift_is_mul (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n) 64 + } + shift (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n) 64 +. + ((a /. n +. ((a %. n) *. h) %. n) *. h) %. n; + == { + lemma_mul_distribute_left (a /. n) (((a %. n) *. h) %. n) h; + lemma_mod_distribute ((a /. n) *. h) ((((a %. n) *. h) %. n) *. h) n + } + shift (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n) 64 +. + (((a /. n) *. h) %. n +. ((((a %. n) *. h) %. n) *. h) %. n); + == { + lemma_mul_h_2_zero (a %. n) h; + lemma_add_zero_right (((a /. n) *. h) %. n) + } + shift (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n) 64 +. + ((a /. n) *. h) %. n; + }; + calc (==) { + swap y_10c 64 +. mask y_10c 64 *. h; + == {lemma_add_left_shift_double (a %. n +. ((a %. n) *. h) /. n) + (a /. n +. ((a %. n) *. h) %. n) + (((a /. n) *. h) %. n) + (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n)} + shift ((a /. n +. ((a %. n) *. h) %. n) +. + (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n)) 64 +. + ((a %. n +. ((a %. n) *. h) /. n) +. ((a /. n) *. h) %. n); + == {lemma_add_associate (a %. n) (((a %. n) *. h) /. n) + (((a /. n) *. h) %. n)} + shift ((a /. n +. ((a %. n) *. h) %. n) +. + (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n)) 64 +. + (a %. n +. (((a %. n) *. h) /. n +. ((a /. n) *. h) %. n)); + } + +open Vale.AES.GF128 + +let lemma_reduce_rev_helper a0 a1 h n = + let c = reverse (shift h (-1)) (n - 1) in + let y_10c = swap a0 64 +. (mask a0 64) *. c in + reveal_defs (); + lemma_mask_is_mod zero 64; + lemma_add_zero_right a0; + lemma_add_zero_right a1; + lemma_add_commute (swap y_10c 64) a1; + lemma_add_associate a1 (swap y_10c 64) (mask y_10c 64 *. c); + lemma_add_commute a1 ((swap y_10c 64) +. (mask y_10c 64 *. c)); + lemma_reduce_rev a0 zero a1 h n; + () + +let lemma_gf128_mul_accum z0 z1 a b c d = + admit() + +val lemma_add_helper1 (ac bc_m ad_m z_ac z_bc_m z_ad_m:poly) : + Lemma (ensures ( + (z_ac +. ac) +. + (z_bc_m +. bc_m) +. + (z_ad_m +. ad_m) == + (z_ac +. z_bc_m +. z_ad_m) +. (ac +. bc_m +. ad_m))) + +let lemma_add_helper1 ac bc_m ad_m z_ac z_bc_m z_ad_m = + calc (==) { + (z_ac +. ac) +. + (z_bc_m +. bc_m) +. + (z_ad_m +. ad_m); + == { + lemma_add_associate (z_ac +. ac) (z_bc_m +. bc_m) (z_ad_m +. ad_m); + lemma_add_associate (z_bc_m +. bc_m) (z_ad_m) (ad_m); + lemma_add_commute (z_bc_m) (bc_m); + lemma_add_associate (bc_m) (z_bc_m) (z_ad_m); + lemma_add_associate (z_ac +. ac) (bc_m +. (z_bc_m +. z_ad_m)) (ad_m) + } + (z_ac +. ac) +. + (bc_m +. (z_bc_m +. z_ad_m)) +. + ad_m; + == { + lemma_add_commute (bc_m) (z_bc_m +. z_ad_m); + lemma_add_associate (z_ac +. ac) (z_bc_m +. z_ad_m) (bc_m); + lemma_add_commute z_ac ac; + lemma_add_associate ac z_ac (z_bc_m +. z_ad_m); + lemma_add_associate z_ac (z_bc_m) (z_ad_m) + } + (ac +. (z_ac +. z_bc_m +. z_ad_m)) +. + bc_m +. + ad_m; + == { + lemma_add_commute ac (z_ac +. z_bc_m +. z_ad_m); + lemma_add_associate (z_ac +. z_bc_m +. z_ad_m) ac (bc_m); + lemma_add_associate (z_ac +. z_bc_m +. z_ad_m) (ac +. bc_m) (ad_m) + } + (z_ac +. z_bc_m +. z_ad_m) +. (ac +. bc_m +. ad_m); + } + +val lemma_add_helper2 (a b c:poly) : + Lemma (ensures (a +. b +. c == b +. c +. a)) + +let lemma_add_helper2 a b c = + lemma_add_commute b c; + lemma_add_associate c b a; + lemma_add_commute c (b +. a); + lemma_add_commute b a + +val lemma_add_helper3 (a b c d:poly) : + Lemma (ensures (a +. b +. (c +. d) == a +. c +. (b +. d))) + +let lemma_add_helper3 a b c d = + lemma_add_associate (a +. b) c d; + lemma_add_commute a b; + lemma_add_associate b a c; + lemma_add_commute b (a +. c); + lemma_add_associate (a +. c) b d + +val lemma_shift_helper (z_a a:poly) : + Lemma (ensures (shift (z_a +. a) 128 == shift z_a 128 +. shift a 128)) + +let lemma_shift_helper z_a a = + lemma_shift_is_mul_right (z_a +. a) 128; + lemma_mul_distribute_left z_a a (monomial 128); + lemma_shift_is_mul_right z_a 128; + lemma_shift_is_mul_right a 128 + +val lemma_add_div_dist (ac bc ad z_ac z_bc z_ad:poly) : + Lemma (ensures ( + let m = monomial 64 in + (z_ac +. ac) +. + (z_bc +. bc) /. m +. + (z_ad +. ad) /. m == + (z_ac +. z_bc /. m +. z_ad /. m) +. (ac +. bc /. m +. ad /. m))) + +let lemma_add_div_dist ac bc ad z_ac z_bc z_ad = + let m = monomial 64 in + Vale.AES.GHash_BE.lemma_div_distribute z_bc bc m; + Vale.AES.GHash_BE.lemma_div_distribute z_ad ad m; + lemma_add_helper1 ac (bc /. m) (ad /. m) z_ac (z_bc /. m) (z_ad /. m) + +val lemma_add_mod_mul_dist (bd bc ad z_bd z_bc z_ad:poly) : + Lemma (ensures ( + let m = monomial 64 in + ((z_bc +. bc) %. m) *. m +. + ((z_ad +. ad) %. m) *. m +. + (z_bd +. bd) == + ((z_bc %. m) *. m +. (z_ad %. m) *. m +. z_bd) +. + ((bc %. m) *. m +. (ad %. m) *. m +. bd))) + +let lemma_add_mod_mul_dist bd bc ad z_bd z_bc z_ad = + let m = monomial 64 in + let bc_m = (bc %. m) *. m in + let ad_m = (ad %. m) *. m in + let z_bc_m = (z_bc %. m) *. m in + let z_ad_m = (z_ad %. m) *. m in + lemma_mod_distribute z_bc bc m; + lemma_mod_distribute z_ad ad m; + lemma_mul_distribute_left (z_bc %. m) (bc %. m) m; + lemma_mul_distribute_left (z_ad %. m) (ad %. m) m; + lemma_add_helper1 bd bc_m ad_m z_bd z_bc_m z_ad_m; + lemma_add_helper2 (z_bd +. bd) (((z_bc +. bc) %. m) *. m) + (((z_ad +. ad) %. m) *. m); + lemma_add_helper2 z_bd z_bc_m z_ad_m; + lemma_add_helper2 bd bc_m ad_m + +let lemma_gf128_mul_4 a0 b0 c0 d0 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3 = + let m = monomial 64 in + (* first poly *) + let ab0 = a0 *. m +. b0 in + let cd0 = c0 *. m +. d0 in + let ac0 = a0 *. c0 in + let ad0 = a0 *. d0 in + let bc0 = b0 *. c0 in + let bd0 = b0 *. d0 in + (* seond poly *) + let ab1 = a1 *. m +. b1 in + let cd1 = c1 *. m +. d1 in + let ac1 = a1 *. c1 in + let ad1 = a1 *. d1 in + let bc1 = b1 *. c1 in + let bd1 = b1 *. d1 in + (* third poly *) + let ab2 = a2 *. m +. b2 in + let cd2 = c2 *. m +. d2 in + let ac2 = a2 *. c2 in + let ad2 = a2 *. d2 in + let bc2 = b2 *. c2 in + let bd2 = b2 *. d2 in + (* fourth poly *) + let ab3 = a3 *. m +. b3 in + let cd3 = c3 *. m +. d3 in + let ac3 = a3 *. c3 in + let ad3 = a3 *. d3 in + let bc3 = b3 *. c3 in + let bd3 = b3 *. d3 in + (* accums *) + let z_ac3 = ac0 +. ac1 +. ac2 in + let z_bc3 = bc0 +. bc1 +. bc2 in + let z_ad3 = ad0 +. ad1 +. ad2 in + let z_bd3 = bd0 +. bd1 +. bd2 in + let z_ac2 = ac0 +. ac1 in + let z_bc2 = bc0 +. bc1 in + let z_ad2 = ad0 +. ad1 in + let z_bd2 = bd0 +. bd1 in + (* lemmas *) + calc (==) { + shift ((z_ac3 +. ac3) +. + (z_bc3 +. bc3) /. m +. + (z_ad3 +. ad3) /. m) 128 +. + (((z_bc3 +. bc3) %. m) *. m +. + ((z_ad3 +. ad3) %. m) *. m +. + (z_bd3 +. bd3)); + == { + lemma_add_div_dist ac3 bc3 ad3 z_ac3 z_bc3 z_ad3; + lemma_add_mod_mul_dist bd3 bc3 ad3 z_bd3 z_bc3 z_ad3 + } + shift ((z_ac3 +. z_bc3 /. m +. z_ad3 /. m) +. + (ac3 +. bc3 /. m +. ad3 /. m)) 128 +. + (((z_bc3 %. m) *. m +. (z_ad3 %. m) *. m +. z_bd3) +. + ((bc3 %. m) *. m +. (ad3 %. m) *. m +. bd3)); + == { + lemma_shift_helper (z_ac3 +. z_bc3 /. m +. z_ad3 /. m) + (ac3 +. bc3 /. m +. ad3 /. m); + lemma_add_helper3 + (shift ((z_ac3 +. z_bc3 /. m +. z_ad3 /. m)) 128) + (shift (ac3 +. bc3 /. m +. ad3 /. m) 128) + ((z_bc3 %. m) *. m +. (z_ad3 %. m) *. m +. z_bd3) + ((bc3 %. m) *. m +. (ad3 %. m) *. m +. bd3) + } + shift ((z_ac3 +. z_bc3 /. m +. z_ad3 /. m)) 128 +. + ((z_bc3 %. m) *. m +. (z_ad3 %. m) *. m +. z_bd3) +. + ((shift (ac3 +. bc3 /. m +. ad3 /. m) 128) +. + ((bc3 %. m) *. m +. (ad3 %. m) *. m +. bd3)); + == {lemma_gf128_mul a3 b3 c3 d3 64} + shift (((z_ac2 +. ac2) +. (z_bc2 +. bc2) /. m +. (z_ad2 +. ad2) /. m)) 128 +. + (((z_bc2 +. bc2) %. m) *. m +. ((z_ad2 +. ad2) %. m) *. m +. (z_bd2 +. bd2)) +. + (ab3 *. cd3); + == { + lemma_add_div_dist ac2 bc2 ad2 z_ac2 z_bc2 z_ad2; + lemma_add_mod_mul_dist bd2 bc2 ad2 z_bd2 z_bc2 z_ad2 + } + (shift ((z_ac2 +. z_bc2 /. m +. z_ad2 /. m) +. + (ac2 +. bc2 /. m +. ad2 /. m)) 128 +. + (((z_bc2 %. m) *. m +. (z_ad2 %. m) *. m +. z_bd2) +. + ((bc2 %. m) *. m +. (ad2 %. m) *. m +. bd2))) +. + (ab3 *. cd3); + == { + lemma_shift_helper (z_ac2 +. z_bc2 /. m +. z_ad2 /. m) + (ac2 +. bc2 /. m +. ad2 /. m); + lemma_add_helper3 + (shift ((z_ac2 +. z_bc2 /. m +. z_ad2 /. m)) 128) + (shift (ac2 +. bc2 /. m +. ad2 /. m) 128) + ((z_bc2 %. m) *. m +. (z_ad2 %. m) *. m +. z_bd2) + ((bc2 %. m) *. m +. (ad2 %. m) *. m +. bd2) + } + (shift ((z_ac2 +. z_bc2 /. m +. z_ad2 /. m)) 128 +. + ((z_bc2 %. m) *. m +. (z_ad2 %. m) *. m +. z_bd2) +. + ((shift (ac2 +. bc2 /. m +. ad2 /. m) 128) +. + ((bc2 %. m) *. m +. (ad2 %. m) *. m +. bd2))) +. + (ab3 *. cd3); + == {lemma_gf128_mul a2 b2 c2 d2 64} + (shift (((ac0 +. ac1) +. (bc0 +. bc1) /. m +. (ad0 +. ad1) /. m)) 128 +. + (((bc0 +. bc1) %. m) *. m +. ((ad0 +. ad1) %. m) *. m +. (bd0 +. bd1)) +. + (ab2 *. cd2)) +. + (ab3 *. cd3); + == { + lemma_add_div_dist ac1 bc1 ad1 ac0 bc0 ad0; + lemma_add_mod_mul_dist bd1 bc1 ad1 bd0 bc0 ad0 + } + ((shift ((ac0 +. bc0 /. m +. ad0 /. m) +. + (ac1 +. bc1 /. m +. ad1 /. m)) 128 +. + (((bc0 %. m) *. m +. (ad0 %. m) *. m +. bd0) +. + ((bc1 %. m) *. m +. (ad1 %. m) *. m +. bd1))) +. + (ab2 *. cd2)) +. + (ab3 *. cd3); + == { + lemma_shift_helper (ac0 +. bc0 /. m +. ad0 /. m) + (ac1 +. bc1 /. m +. ad1 /. m); + lemma_add_helper3 + (shift ((ac0 +. bc0 /. m +. ad0 /. m)) 128) + (shift (ac1 +. bc1 /. m +. ad1 /. m) 128) + ((bc0 %. m) *. m +. (ad0 %. m) *. m +. bd0) + ((bc1 %. m) *. m +. (ad1 %. m) *. m +. bd1) + } + ((shift ((ac0 +. bc0 /. m +. ad0 /. m)) 128 +. + ((bc0 %. m) *. m +. (ad0 %. m) *. m +. bd0) +. + ((shift (ac1 +. bc1 /. m +. ad1 /. m) 128) +. + ((bc1 %. m) *. m +. (ad1 %. m) *. m +. bd1))) +. + (ab2 *. cd2)) +. + (ab3 *. cd3); + == { + lemma_gf128_mul a1 b1 c1 d1 64; + lemma_gf128_mul a0 b0 c0 d0 64 + } + (ab0 *. cd0) +. (ab1 *. cd1) +. (ab2 *. cd2) +. (ab3 *. cd3); + } + +let lemma_add_helper_m a b c d p = + lemma_add_associate p a b; + lemma_add_associate p (a +. b) c; + lemma_add_associate p (a +. b +. c) d; + lemma_add_commute p (a +. b +. c +. d) diff --git a/code/gf128/Hacl.Spec.GF128.PolyLemmas_helpers.fsti b/code/gf128/Hacl.Spec.GF128.PolyLemmas_helpers.fsti new file mode 100644 index 0000000000..e44ca1d9de --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.PolyLemmas_helpers.fsti @@ -0,0 +1,164 @@ +module Hacl.Spec.GF128.PolyLemmas_helpers + +open FStar.Mul + +open Vale.Math.Poly2_s +open Vale.Math.Poly2 +open Vale.Math.Poly2.Bits_s +open Vale.Math.Poly2.Lemmas +open Vale.AES.GF128_s + +// of_fun 8 (fun (i:nat) -> i = 0 || i = 1 || i = 2 || i = 7) +let gf128_low : poly = gf128_modulus_low_terms + +// of_fun 8 (fun (i:nat) -> i = 0 || i = 1 || i = 6) +let gf128_low_shift : poly = shift gf128_modulus_low_terms (-1) + +val lemma_mul_h_shift_right (a h:poly) : Lemma + (requires degree a <= 63 /\ h == reverse gf128_low_shift 63) + (ensures a *. h == shift a 63 +. shift a 62 +. shift a 57) + +val lemma_mul_h_shift_right_mod (a h:poly) : Lemma + (requires degree a <= 63 /\ h == reverse gf128_low_shift 63) + (ensures (a *. h) %. monomial 64 == + shift a 63 %. monomial 64 +. shift a 62 %. monomial 64 +. shift a 57 %. monomial 64) + +val lemma_mul_h_shift_left (a h:poly) : Lemma + (requires degree a <= 63 /\ h == reverse gf128_low_shift 63) + (ensures (a *. h) /. monomial 64 == shift a (-1) +. shift a (-2) +. shift a (-7)) + +val lemma_mul_h (a h:poly) : Lemma + (requires degree a <= 63 /\ h == reverse gf128_low_shift 63) + (ensures a *. h == (shift a (-1) +. shift a (-2) +. shift a (-7)) *. monomial 64 +. + (shift a 63 +. shift a 62 +. shift a 57) %. monomial 64) + +val lemma_mul_h_2_zero (a h:poly) : Lemma + (requires degree a <= 63 /\ h == reverse gf128_low_shift 63) + (ensures (((a *. h) %. monomial 64) *. h) %. monomial 64 == zero) + +val lemma_shift_left_cancel (a a0 a1:poly) : Lemma + (requires degree a <= 127 /\ degree a0 <= 63 /\ + degree a1 <= 63 /\ a == (shift a1 64) +. a0) + (ensures shift (a %. monomial 64) 64 == shift a0 64) + +val lemma_shift_right_cancel (a a0 a1:poly) : Lemma + (requires degree a <= 127 /\ degree a0 <= 63 /\ + degree a1 <= 63 /\ a == (shift a1 64) +. a0) + (ensures shift a (-64) == a1) + +val lemma_add_left_shift (a0 a1 b:poly) : Lemma + (requires degree a0 <= 63 /\ degree a1 <= 63 /\ degree b <= 63) + (ensures (shift a1 64) +. a0 +. (shift b 64) == (shift (a1 +. b) 64) +. a0) + +val lemma_add_left_shift_double (a0 a1 b0 b1:poly) : Lemma + (requires degree a0 <= 63 /\ degree a1 <= 63 /\ + degree b0 <= 63 /\ degree b1 <= 63) + (ensures (shift a1 64) +. a0 +. (shift b1 64 +. b0) == + (shift (a1 +. b1) 64) +. (a0 +. b0)) + +val lemma_mul_x (hi lo:poly) : Lemma + (requires degree hi <= 126 /\ degree lo <= 127) + (ensures ( + let n = monomial 64 in + let nn = monomial 128 in + let l0_r63 = shift (lo %. n) (-63) in + let l1_r63 = shift (lo /. n) (-63) in + let l0_l1 = (shift (lo %. n) 1) %. n in + let l1_l1 = (shift (lo /. n) 1) %. n in + let h0_r63 = shift (hi %. n) (-63) in + let h0_l1 = (shift (hi %. n) 1) %. n in + let h1_l1 = (shift (hi /. n) 1) %. n in + shift (hi *. nn +. lo) 1 == + (shift (h1_l1 +. h0_r63) 64 +. (h0_l1 +. l1_r63)) *. nn +. + (shift (l1_l1 +. l0_r63) 64 +. l0_l1) + )) + +val lemma_reduce_helper (a h:poly) : Lemma + (requires degree a <= 127 /\ h == reverse gf128_low_shift 63) + (ensures ( + let n = monomial 64 in + let y_10c = swap a 64 +. (mask a 64) *. h in + swap y_10c 64 +. mask y_10c 64 *. h == + (shift ((a /. n +. ((a %. n) *. h) %. n) +. + (((a /. n +. ((a %. n) *. h) %. n) *. h) /. n)) 64 +. + (a %. n +. (((a %. n) *. h) /. n +. ((a /. n) *. h) %. n))) + )) + +val lemma_reduce_rev_helper (a0 a1 h:poly) (n:pos) : Lemma + (requires + n == 64 /\ + degree a0 < n + n /\ + degree a1 < n + n /\ + degree (monomial (n + n) +. h) == n + n /\ + degree h < n /\ + h.[0] + ) + (ensures ( + let nn = n + n in + let mm = monomial nn in + let m = monomial n in + let g = mm +. h in + let c = reverse (shift h (-1)) (n - 1) in + let y_10c = swap a0 64 +. (mask a0 64) *. c in + let a = a0 +. shift a1 128 in + let x = reverse a (nn + nn - 1) in + reverse (x %. g) (nn - 1) == swap y_10c 64 +. mask y_10c 64 *. c +. a1 + )) + +val lemma_gf128_mul_accum (z0 z1 a b c d:poly) (n:nat) : Lemma + (ensures ( + let z = shift z1 (n + n) +. z0 in + let m = monomial n in + let ab = a *. m +. b in + let cd = c *. m +. d in + let ac = a *. c in + let ad = a *. d in + let bc = b *. c in + let bd = b *. d in + z +. ab *. cd == + shift (z1 +. ac +. bc /. m +. ad /. m) (n + n) +. + (z0 +. (bc %. m) *. m +. (ad %. m) *. m +. bd) + )) + +val lemma_gf128_mul_4 (a0 b0 c0 d0 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3:poly) : + Lemma (ensures ( + let m = monomial 64 in + (* first poly *) + let ab0 = a0 *. m +. b0 in + let cd0 = c0 *. m +. d0 in + let ac0 = a0 *. c0 in + let ad0 = a0 *. d0 in + let bc0 = b0 *. c0 in + let bd0 = b0 *. d0 in + (* seond poly *) + let ab1 = a1 *. m +. b1 in + let cd1 = c1 *. m +. d1 in + let ac1 = a1 *. c1 in + let ad1 = a1 *. d1 in + let bc1 = b1 *. c1 in + let bd1 = b1 *. d1 in + (* third poly *) + let ab2 = a2 *. m +. b2 in + let cd2 = c2 *. m +. d2 in + let ac2 = a2 *. c2 in + let ad2 = a2 *. d2 in + let bc2 = b2 *. c2 in + let bd2 = b2 *. d2 in + (* fourth poly *) + let ab3 = a3 *. m +. b3 in + let cd3 = c3 *. m +. d3 in + let ac3 = a3 *. c3 in + let ad3 = a3 *. d3 in + let bc3 = b3 *. c3 in + let bd3 = b3 *. d3 in + (ab0 *. cd0) +. (ab1 *. cd1) +. (ab2 *. cd2) +. (ab3 *. cd3) == + shift ((ac0 +. ac1 +. ac2 +. ac3) +. + (bc0 +. bc1 +. bc2 +. bc3) /. m +. + (ad0 +. ad1 +. ad2 +. ad3) /. m) 128 +. + (((bc0 +. bc1 +. bc2 +. bc3) %. m) *. m +. + ((ad0 +. ad1 +. ad2 +. ad3) %. m) *. m +. + (bd0 +. bd1 +. bd2 +. bd3)) + )) + +val lemma_add_helper_m (a b c d p:poly) : + Lemma (ensures (p +. a +. b +. c +. d == (a +. b +. c +. d) +. p)) diff --git a/code/gf128/Hacl.Spec.GF128.Poly_s.fst b/code/gf128/Hacl.Spec.GF128.Poly_s.fst new file mode 100644 index 0000000000..97618adecf --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.Poly_s.fst @@ -0,0 +1,46 @@ +module Hacl.Spec.GF128.Poly_s + +open FStar.UInt + +open Lib.IntTypes +open Lib.IntVector + +open Vale.Math.Poly2_s +open Vale.Math.Poly2.Bits_s +open Vale.Math.Poly2 + +inline_for_extraction noextract +let vec128 = vec_t U128 1 + +inline_for_extraction noextract +let uint_to_vec128 (s:UInt.uint_t 128) : vec128 = + vec_t_v (Lib.Sequence.create 1 (mk_int s)) + +inline_for_extraction noextract +let vec128_to_uint (s:vec128) : UInt.uint_t 128 = + v (Lib.Sequence.index (vec_v s) 0) + +inline_for_extraction noextract +let to_vec128 (a:poly{degree a < 128}) : vec128 = + uint_to_vec128 (to_uint 128 a) + +inline_for_extraction noextract +let of_vec128 (u:vec128) : a:poly{degree a < 128} = + reveal_defs (); + of_uint 128 (vec128_to_uint u) + +assume val vec_clmul_lo_lo_lemma: x:vec128 -> y:vec128 -> Lemma + (of_vec128 (vec_clmul_lo_lo x y) == + mul (mod (of_vec128 x) (monomial 64)) (mod (of_vec128 y) (monomial 64))) + +assume val vec_clmul_hi_lo_lemma: x:vec128 -> y:vec128 -> Lemma + (of_vec128 (vec_clmul_hi_lo x y) == + mul (shift (of_vec128 x) (-64)) (mod (of_vec128 y) (monomial 64))) + +assume val vec_clmul_lo_hi_lemma: x:vec128 -> y:vec128 -> Lemma + (of_vec128 (vec_clmul_lo_hi x y) == + mul (mod (of_vec128 x) (monomial 64)) (shift (of_vec128 y) (-64))) + +assume val vec_clmul_hi_hi_lemma: x:vec128 -> y:vec128 -> Lemma + (of_vec128 (vec_clmul_hi_hi x y) == + mul (shift (of_vec128 x) (-64)) (shift (of_vec128 y) (-64))) diff --git a/code/gf128/Hacl.Spec.GF128.Vec.fst b/code/gf128/Hacl.Spec.GF128.Vec.fst new file mode 100644 index 0000000000..1335b23fe0 --- /dev/null +++ b/code/gf128/Hacl.Spec.GF128.Vec.fst @@ -0,0 +1,69 @@ +module Hacl.Spec.GF128.Vec + +open FStar.Mul +open Lib.IntTypes +open Lib.Sequence +open Lib.ByteSequence +open Lib.IntVector + +open Spec.GaloisField + +module Scalar = Spec.GF128 + +type gf128_spec = + | NI + +#reset-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0" + +let _: squash (inversion gf128_spec) = allow_inversion gf128_spec + +let elem = Scalar.elem +let gf128 = Scalar.gf128 + +let elem2 = lseq elem 2 + +let elem4 = lseq elem 4 +let fadd4 (a:elem4) (b:elem4) : elem4 = map2 fadd a b +let fmul4 (a:elem4) (b:elem4) : elem4 = map2 fmul_be a b + +let load_elem4 (b:lbytes 64) : elem4 = + let b1 = load_felem_be #gf128 (sub b 0 16) in + let b2 = load_felem_be #gf128 (sub b 16 16) in + let b3 = load_felem_be #gf128 (sub b 32 16) in + let b4 = load_felem_be #gf128 (sub b 48 16) in + create4 b1 b2 b3 b4 + +let encode4 (w:lbytes 64) : Tot elem4 = load_elem4 w + +let load_acc (text:lbytes 64) (acc:elem): elem4 = + fadd4 (create4 acc zero zero zero) (encode4 text) + +let normalize4 (pre:elem4) (acc:elem4) : elem = + let a = fmul4 acc pre in + fadd (fadd (fadd a.[0] a.[1]) a.[2]) a.[3] + +let load_precompute_r (r:elem) : elem4 = + let r2 = r `fmul_be` r in + let r3 = r `fmul_be` r2 in + let r4 = r `fmul_be` r3 in + create4 r4 r3 r2 r + +let gf128_update4_add_mul (pre:elem4) (b:lbytes 64) (acc:elem) : Tot elem = + let acc = load_acc b acc in + normalize4 pre acc + +let gf128_update_multi_add_mul (text:bytes{0 < length text /\ length text % 64 = 0}) (acc:elem) (r:elem) : elem = + let pre = load_precompute_r r in + repeat_blocks_multi #uint8 #elem 64 text (gf128_update4_add_mul pre) acc + +let gf128_update_multi (text:bytes{0 < length text /\ length text % 64 = 0}) (acc:elem) (r:elem) : elem = + gf128_update_multi_add_mul text acc r + +let gf128_update_vec (text:bytes) (acc:elem) (r:elem) : Tot elem = + let len = length text in + let len0 = len / 64 * 64 in + let t0 = Seq.slice text 0 len0 in + let acc = if len0 > 0 then gf128_update_multi t0 acc r else acc in + + let t1 = Seq.slice text len0 len in + Scalar.gf128_update t1 acc r diff --git a/code/gf128/Hacl.Spec.Gf128.FieldNI.fst b/code/gf128/Hacl.Spec.Gf128.FieldNI.fst new file mode 100644 index 0000000000..3041b6a646 --- /dev/null +++ b/code/gf128/Hacl.Spec.Gf128.FieldNI.fst @@ -0,0 +1,573 @@ +module Hacl.Spec.Gf128.FieldNI + +open FStar.Mul + +open Lib.IntTypes +open Lib.IntVector +open Lib.Sequence + +open Vale.Math.Poly2_s +open Vale.Math.Poly2 +open Vale.Math.Poly2.Lemmas +open Vale.AES.GF128 +open Vale.AES.GHash_BE + +open Hacl.Spec.GF128.Poly_s +open Hacl.Spec.GF128.PolyLemmas +open Hacl.Spec.GF128.PolyLemmas_helpers + +module S = Spec.GF128 +module GF = Spec.GaloisField +module Vec = Hacl.Spec.GF128.Vec + +#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0" + +inline_for_extraction noextract +let to_elem (s:vec128) : S.elem = + index (vec_v s) 0 + +inline_for_extraction noextract +let to_elem4 (x1:vec128) (x2:vec128) (x3:vec128) (x4:vec128) : Vec.elem4 = + create4 (index (vec_v x1) 0) (index (vec_v x2) 0) (index (vec_v x3) 0) (index (vec_v x4) 0) + +inline_for_extraction +let cl_add (x:vec128) (y:vec128) : Tot vec128 = vec_xor x y + +val lemma_cl_add (x:vec128) (y:vec128) : Lemma + (ensures cl_add x y == to_vec128 (add (of_vec128 x) (of_vec128 y))) + +let lemma_cl_add x y = + lemma_add_vec128 x y; + () + +inline_for_extraction +let clmul_wide (x:vec128) (y:vec128) : Tot (vec128 & vec128) = + let lo = vec_clmul_lo_lo x y in + let m1 = vec_clmul_hi_lo x y in + let m2 = vec_clmul_lo_hi x y in + let hi = vec_clmul_hi_hi x y in + let m1 = cl_add m1 m2 in + let m2 = vec_shift_left m1 64ul in + let m1 = vec_shift_right m1 64ul in + let lo = cl_add lo m2 in + let hi = cl_add hi m1 in + (hi,lo) + +val lemma_clmul_wide (x:vec128) (y:vec128) : Lemma + (ensures ( + let (hi, lo) = clmul_wide x y in + mul (of_vec128 x) (of_vec128 y) == add (shift (of_vec128 hi) 128) (of_vec128 lo) + )) + +let lemma_clmul_wide x y = + (* vec128 variables *) + let lo = vec_clmul_lo_lo x y in + let m1 = vec_clmul_hi_lo x y in + let m2 = vec_clmul_lo_hi x y in + let hi = vec_clmul_hi_hi x y in + let m12 = cl_add m1 m2 in + let m12_0 = vec_shift_left m12 64ul in + let m12_1 = vec_shift_right m12 64ul in + let lo_m = cl_add lo m12_0 in + let hi_m = cl_add hi m12_1 in + (* poly variables *) + let ab = of_vec128 x in + let cd = of_vec128 y in + let n = monomial 64 in + let a = div ab n in + let b = mod ab n in + let c = div cd n in + let d = mod cd n in + let ac = mul a c in + let ad = mul a d in + let bc = mul b c in + let bd = mul b d in + (* vec128 lemmas *) + vec_clmul_lo_lo_lemma x y; + vec_clmul_hi_lo_lemma x y; + vec_clmul_lo_hi_lemma x y; + vec_clmul_hi_hi_lemma x y; + lemma_cl_add m1 m2; + lemma_vec128_double_shift (of_vec128 m12); + lemma_cl_add lo m12_0; + lemma_cl_add hi m12_1; + (* poly lemmas *) + lemma_shift_is_div ab 64; + lemma_shift_is_div cd 64; + lemma_div_distribute ad bc n; + lemma_add_commute (div ad n) (div bc n); + lemma_add_associate ac (div bc n) (div ad n); + lemma_mod_distribute ad bc n; + lemma_mul_commute (add (mod ad n) (mod bc n)) n; + lemma_mul_distribute n (mod ad n) (mod bc n); + lemma_mul_commute n (mod ad n); + lemma_mul_commute n (mod bc n); + lemma_add_commute (mul (mod ad n) n) (mul (mod bc n) n); + lemma_add_commute bd (add (mul (mod bc n) n) (mul (mod ad n) n)); + assert (add (add ac (div bc n)) (div ad n) == of_vec128 hi_m); //OBSERVE + assert (add (add (mul (mod bc n) n) (mul (mod ad n) n)) bd == of_vec128 lo_m); //OBSERVE + lemma_split_define ab 64; + lemma_split_define cd 64; + lemma_gf128_mul a b c d 64; + () + +inline_for_extraction +let clmul_wide4 + (x1:vec128) (x2:vec128) (x3:vec128) (x4:vec128) + (y1:vec128) (y2:vec128) (y3:vec128) (y4:vec128): Tot (vec128 & vec128) = + + let lo1 = vec_clmul_lo_lo x1 y1 in + let lo2 = vec_clmul_lo_lo x2 y2 in + let lo3 = vec_clmul_lo_lo x3 y3 in + let lo4 = vec_clmul_lo_lo x4 y4 in + let lo = cl_add lo1 lo2 in + let lo = cl_add lo lo3 in + let lo = cl_add lo lo4 in + + let m1 = vec_clmul_hi_lo x1 y1 in + let m2 = vec_clmul_hi_lo x2 y2 in + let m3 = vec_clmul_hi_lo x3 y3 in + let m4 = vec_clmul_hi_lo x4 y4 in + let m = cl_add m1 m2 in + let m = cl_add m m3 in + let m = cl_add m m4 in + + let m1 = vec_clmul_lo_hi x1 y1 in + let m2 = vec_clmul_lo_hi x2 y2 in + let m3 = vec_clmul_lo_hi x3 y3 in + let m4 = vec_clmul_lo_hi x4 y4 in + let m = cl_add m m1 in + let m = cl_add m m2 in + let m = cl_add m m3 in + let m = cl_add m m4 in + + let hi1 = vec_clmul_hi_hi x1 y1 in + let hi2 = vec_clmul_hi_hi x2 y2 in + let hi3 = vec_clmul_hi_hi x3 y3 in + let hi4 = vec_clmul_hi_hi x4 y4 in + let hi = cl_add hi1 hi2 in + let hi = cl_add hi hi3 in + let hi = cl_add hi hi4 in + + let m1 = vec_shift_left m 64ul in + let m2 = vec_shift_right m 64ul in + let lo = cl_add lo m1 in + let hi = cl_add hi m2 in + (hi, lo) + +val lemma_clmul_wide4 + (x1:vec128) (x2:vec128) (x3:vec128) (x4:vec128) + (y1:vec128) (y2:vec128) (y3:vec128) (y4:vec128) : Lemma + (ensures ( + let (hi, lo) = clmul_wide4 x1 x2 x3 x4 y1 y2 y3 y4 in + add (add (add + (mul (of_vec128 x1) (of_vec128 y1)) + (mul (of_vec128 x2) (of_vec128 y2))) + (mul (of_vec128 x3) (of_vec128 y3))) + (mul (of_vec128 x4) (of_vec128 y4)) == + add (shift (of_vec128 hi) 128) (of_vec128 lo) + )) + +let lemma_clmul_wide4 x1 x2 x3 x4 y1 y2 y3 y4 = + (* vec128 variables *) + let lo1 = vec_clmul_lo_lo x1 y1 in + let lo2 = vec_clmul_lo_lo x2 y2 in + let lo3 = vec_clmul_lo_lo x3 y3 in + let lo4 = vec_clmul_lo_lo x4 y4 in + let lo_12 = cl_add lo1 lo2 in + let lo_13 = cl_add lo_12 lo3 in + let lo_14 = cl_add lo_13 lo4 in + + let m1 = vec_clmul_hi_lo x1 y1 in + let m2 = vec_clmul_hi_lo x2 y2 in + let m3 = vec_clmul_hi_lo x3 y3 in + let m4 = vec_clmul_hi_lo x4 y4 in + let m_12 = cl_add m1 m2 in + let m_13 = cl_add m_12 m3 in + let m_14 = cl_add m_13 m4 in + + let m1' = vec_clmul_lo_hi x1 y1 in + let m2' = vec_clmul_lo_hi x2 y2 in + let m3' = vec_clmul_lo_hi x3 y3 in + let m4' = vec_clmul_lo_hi x4 y4 in + let m' = cl_add m_14 m1' in + let m_12' = cl_add m' m2' in + let m_13' = cl_add m_12' m3' in + let m_14' = cl_add m_13' m4' in + + let hi1 = vec_clmul_hi_hi x1 y1 in + let hi2 = vec_clmul_hi_hi x2 y2 in + let hi3 = vec_clmul_hi_hi x3 y3 in + let hi4 = vec_clmul_hi_hi x4 y4 in + let hi_12 = cl_add hi1 hi2 in + let hi_13 = cl_add hi_12 hi3 in + let hi_14 = cl_add hi_13 hi4 in + + let m_14_0 = vec_shift_left m_14' 64ul in + let m_14_1 = vec_shift_right m_14' 64ul in + let lo_m = cl_add lo_14 m_14_0 in + let hi_m = cl_add hi_14 m_14_1 in + (* poly variables *) + let n = monomial 64 in + let ab0 = of_vec128 x1 in + let cd0 = of_vec128 y1 in + let a0 = div ab0 n in + let b0 = mod ab0 n in + let c0 = div cd0 n in + let d0 = mod cd0 n in + let ac0 = mul a0 c0 in + let ad0 = mul a0 d0 in + let bc0 = mul b0 c0 in + let bd0 = mul b0 d0 in + + let ab1 = of_vec128 x2 in + let cd1 = of_vec128 y2 in + let a1 = div ab1 n in + let b1 = mod ab1 n in + let c1 = div cd1 n in + let d1 = mod cd1 n in + let ac1 = mul a1 c1 in + let ad1 = mul a1 d1 in + let bc1 = mul b1 c1 in + let bd1 = mul b1 d1 in + + let ab2 = of_vec128 x3 in + let cd2 = of_vec128 y3 in + let a2 = div ab2 n in + let b2 = mod ab2 n in + let c2 = div cd2 n in + let d2 = mod cd2 n in + let ac2 = mul a2 c2 in + let ad2 = mul a2 d2 in + let bc2 = mul b2 c2 in + let bd2 = mul b2 d2 in + + let ab3 = of_vec128 x4 in + let cd3 = of_vec128 y4 in + let a3 = div ab3 n in + let b3 = mod ab3 n in + let c3 = div cd3 n in + let d3 = mod cd3 n in + let ac3 = mul a3 c3 in + let ad3 = mul a3 d3 in + let bc3 = mul b3 c3 in + let bd3 = mul b3 d3 in + (* vec128 lemmas *) + vec_clmul_lo_lo_lemma x1 y1; + vec_clmul_lo_lo_lemma x2 y2; + vec_clmul_lo_lo_lemma x3 y3; + vec_clmul_lo_lo_lemma x4 y4; + vec_clmul_hi_lo_lemma x1 y1; + vec_clmul_hi_lo_lemma x2 y2; + vec_clmul_hi_lo_lemma x3 y3; + vec_clmul_hi_lo_lemma x4 y4; + vec_clmul_lo_hi_lemma x1 y1; + vec_clmul_lo_hi_lemma x2 y2; + vec_clmul_lo_hi_lemma x3 y3; + vec_clmul_lo_hi_lemma x4 y4; + vec_clmul_hi_hi_lemma x1 y1; + vec_clmul_hi_hi_lemma x2 y2; + vec_clmul_hi_hi_lemma x3 y3; + vec_clmul_hi_hi_lemma x4 y4; + lemma_cl_add lo1 lo2; + lemma_cl_add lo_12 lo3; + lemma_cl_add lo_13 lo4; + lemma_cl_add m1 m2; + lemma_cl_add m_12 m3; + lemma_cl_add m_13 m4; + lemma_cl_add m_14 m1'; + lemma_cl_add m' m2'; + lemma_cl_add m_12' m3'; + lemma_cl_add m_13' m4'; + lemma_cl_add hi1 hi2; + lemma_cl_add hi_12 hi3; + lemma_cl_add hi_13 hi4; + lemma_vec128_double_shift (of_vec128 m_14'); + lemma_cl_add lo_14 m_14_0; + lemma_cl_add hi_14 m_14_1; + (* poly lemmas *) + lemma_shift_is_div ab0 64; + lemma_shift_is_div ab1 64; + lemma_shift_is_div ab2 64; + lemma_shift_is_div ab3 64; + lemma_shift_is_div cd0 64; + lemma_shift_is_div cd1 64; + lemma_shift_is_div cd2 64; + lemma_shift_is_div cd3 64; + let bc_p = add (add (add bc0 bc1) bc2) bc3 in + let ad_p = add (add (add ad0 ad1) ad2) ad3 in + let m_p = add (add (add (add ad_p bc0) bc1) bc2) bc3 in + let hi_p = add (add (add ac0 ac1) ac2) ac3 in + let lo_p = add (add (add bd0 bd1) bd2) bd3 in + lemma_add_commute lo_p (mul (mod m_p n) n); + assert (add hi_p (div m_p n) == of_vec128 hi_m); + assert (add (mul (mod m_p n) n) lo_p == of_vec128 lo_m); + lemma_add_helper_m bc0 bc1 bc2 bc3 ad_p; + assert (m_p == add bc_p ad_p); + Vale.AES.GHash_BE.lemma_div_distribute bc_p ad_p n; + lemma_add_associate (add (add (add ac0 ac1) ac2) ac3) (div bc_p n) (div ad_p n); + lemma_mod_distribute bc_p ad_p n; + lemma_mul_distribute_left (mod bc_p n) (mod ad_p n) n; + lemma_split_define ab0 64; + lemma_split_define ab1 64; + lemma_split_define ab2 64; + lemma_split_define ab3 64; + lemma_split_define cd0 64; + lemma_split_define cd1 64; + lemma_split_define cd2 64; + lemma_split_define cd3 64; + lemma_gf128_mul_4 a0 b0 c0 d0 a1 b1 c1 d1 a2 b2 c2 d2 a3 b3 c3 d3; + () + + +// inline_for_extraction +// let vec128_shift_left_bits (x:vec128) (y:size_t): Tot vec128 = +// if (y %. size 8 =. size 0) then +// vec128_shift_left x y +// else if (y <. size 64) then +// let x1 = vec128_shift_right64 x (size 64 -. y) in +// let x2 = vec128_shift_left64 x y in +// let x3 = vec128_shift_left x1 (size 64) in +// let x4 = vec128_or x3 x2 in +// x4 +// else +// let x1 = vec128_shift_left64 x (y -. size 64) in +// let x2 = vec128_shift_left x1 (size 64) in +// x2 + + +// inline_for_extraction +// let vec128_shift_right_bits (x:vec128) (y:size_t) : Tot vec128 = +// if (y %. size 8 =. size 0) then +// vec128_shift_right x y +// else if (y <. size 64) then +// let x1 = vec128_shift_left64 x (size 64 -. y) in +// let x2 = vec128_shift_right64 x y in +// let x3 = vec128_shift_right x1 (size 64) in +// let x4 = vec128_or x3 x2 in +// x4 +// else +// let x1 = vec128_shift_right64 x (y -. size 64) in +// let x2 = vec128_shift_right x1 (size 64) in +// x2 + + +inline_for_extraction +let gf128_reduce (hi:vec128) (lo:vec128) : Tot vec128 = + (* LEFT SHIFT [hi:lo] by 1 *) + let lo1 = cast U128 1 (vec_shift_right (cast U64 2 lo) 63ul) in + let lo2 = vec_shift_left lo1 64ul in + let lo3 = cast U128 1 (vec_shift_left (cast U64 2 lo) 1ul) in + let lo3 = vec_xor lo3 lo2 in + + let hi1 = cast U128 1 (vec_shift_right (cast U64 2 hi) 63ul) in + let hi1 = vec_shift_left hi1 64ul in + let hi2 = cast U128 1 (vec_shift_left (cast U64 2 hi) 1ul) in + let hi2 = vec_xor hi2 hi1 in + + let lo1 = cast U128 1 (vec_shift_right (cast U64 2 lo) 63ul) in + let lo1 = vec_shift_right lo1 64ul in + let hi2 = vec_xor hi2 lo1 in + + let lo = lo3 in + let hi = hi2 in +(* + let lo1 = vec128_shift_right_bits lo (size 127) in + let lo = vec128_shift_left_bits lo (size 1) in + let hi = vec128_shift_left_bits hi (size 1) in + let hi = vec128_xor hi lo1 in +*) + (* LEFT SHIFT [x0:0] BY 63,62,57 and xor with [x1:x0] *) + let lo1 = cast U128 1 (vec_shift_left (cast U64 2 lo) 63ul) in + let lo2 = cast U128 1 (vec_shift_left (cast U64 2 lo) 62ul) in + let lo3 = cast U128 1 (vec_shift_left (cast U64 2 lo) 57ul) in + let lo1 = vec_xor lo1 lo2 in + let lo1 = vec_xor lo1 lo3 in + let lo2 = vec_shift_right lo1 64ul in + let lo3 = vec_shift_left lo1 64ul in + let lo = vec_xor lo lo3 in + let lo' = lo2 in + + (* RIGHT SHIFT [x1:x0] BY 1,2,7 and xor with [x1:x0] *) + let lo1 = cast U128 1 (vec_shift_right (cast U64 2 lo) 1ul) in + let lo2 = cast U128 1 (vec_shift_right (cast U64 2 lo) 2ul) in + let lo3 = cast U128 1 (vec_shift_right (cast U64 2 lo) 7ul) in + let lo1 = vec_xor lo1 lo2 in + let lo1 = vec_xor lo1 lo3 in + + let lo1 = vec_xor lo1 lo' in + let lo = vec_xor lo lo1 in + + let lo = vec_xor lo hi in + lo + +val lemma_gf128_reduce (h:vec128) (l:vec128) : Lemma + (requires degree (of_vec128 h) <= 126 /\ degree (of_vec128 l) <= 127) + (ensures ( + let mm = monomial 128 in + let g = add mm gf128_low in + let a = shift (add (mul (of_vec128 h) mm) (of_vec128 l)) 1 in + let x = reverse a 255 in + gf128_reduce h l == to_vec128 (reverse (mod x g) 127) + )) + +let lemma_gf128_reduce h l = + let n = monomial 64 in + let nn = monomial 128 in + let g = reverse gf128_low_shift 63 in + let lo = of_vec128 l in + let hi = of_vec128 h in + let l0_r63 = shift (mod lo n) (-63) in + let l1_r63 = shift (div lo n) (-63) in + let l0_l1 = mod (shift (mod lo n) 1) n in + let l1_l1 = mod (shift (div lo n) 1) n in + let h0_r63 = shift (mod hi n) (-63) in + let h1_r63 = shift (div hi n) (-63) in + let h0_l1 = mod (shift (mod hi n) 1) n in + let h1_l1 = mod (shift (div hi n) 1) n in + + lemma_vec128_shift_right_64 l 63ul; + let lo1 = add (shift l1_r63 64) l0_r63 in + lemma_vec128_double_shift lo1; + lemma_shift_is_mul (mod lo1 n) 64; + lemma_shift_left_cancel lo1 l0_r63 l1_r63; + let lo2 = shift l0_r63 64 in + lemma_vec128_shift_left_64 l 1ul; + let lo3 = add (shift l1_l1 64) l0_l1 in + lemma_cl_add (to_vec128 lo3) (to_vec128 lo2); + lemma_add_left_shift l0_l1 l1_l1 l0_r63; + let lo3 = add (shift (add l1_l1 l0_r63) 64) l0_l1 in + + lemma_vec128_shift_right_64 h 63ul; + let hi1 = add (shift h1_r63 64) h0_r63 in + lemma_vec128_double_shift hi1; + lemma_shift_is_mul (mod hi1 n) 64; + lemma_shift_left_cancel hi1 h0_r63 h1_r63; + let hi1 = shift h0_r63 64 in + lemma_vec128_shift_left_64 h 1ul; + let hi2 = add (shift h1_l1 64) h0_l1 in + lemma_cl_add (to_vec128 hi2) (to_vec128 hi1); + lemma_add_left_shift h0_l1 h1_l1 h0_r63; + let hi2 = add (shift (add h1_l1 h0_r63) 64) h0_l1 in + + lemma_shift_is_div lo1 64; + lemma_shift_right_cancel lo1 l0_r63 l1_r63; + let lo1 = l1_r63 in + lemma_cl_add (to_vec128 hi2) (to_vec128 lo1); + lemma_add_associate (shift (add h1_l1 h0_r63) 64) h0_l1 l1_r63; + let hi2 = add (shift (add h1_l1 h0_r63) 64) (add h0_l1 l1_r63) in + + let lo = lo3 in + let hi = hi2 in + + lemma_mul_x (of_vec128 h) (of_vec128 l); + assert (shift (add (mul (of_vec128 h) nn) (of_vec128 l)) 1 == add (mul hi nn) lo); //OBSERVE + + let l0_l63 = mod (shift (mod lo n) 63) n in + let l1_l63 = mod (shift (div lo n) 63) n in + let l0_l62 = mod (shift (mod lo n) 62) n in + let l1_l62 = mod (shift (div lo n) 62) n in + let l0_l57 = mod (shift (mod lo n) 57) n in + let l1_l57 = mod (shift (div lo n) 57) n in + lemma_vec128_shift_left_64 (to_vec128 lo) 63ul; + lemma_vec128_shift_left_64 (to_vec128 lo) 62ul; + lemma_vec128_shift_left_64 (to_vec128 lo) 57ul; + let lo1 = add (shift l1_l63 64) l0_l63 in + let lo2 = add (shift l1_l62 64) l0_l62 in + let lo3 = add (shift l1_l57 64) l0_l57 in + lemma_cl_add (to_vec128 lo1) (to_vec128 lo2); + lemma_add_left_shift_double l0_l63 l1_l63 l0_l62 l1_l62; + let lo1 = add (shift (add l1_l63 l1_l62) 64) (add l0_l63 l0_l62) in + lemma_cl_add (to_vec128 lo1) (to_vec128 lo3); + lemma_add_left_shift_double (add l0_l63 l0_l62) (add l1_l63 l1_l62) l0_l57 l1_l57; + lemma_mul_h_shift_right_mod (mod lo n) g; + lemma_mul_h_shift_right_mod (div lo n) g; + let lo1 = add (shift (mod (mul (div lo n) g) n) 64) + (mod (mul (mod lo n) g) n) in + lemma_vec128_double_shift lo1; + lemma_shift_is_div lo1 64; + lemma_shift_right_cancel lo1 (mod (mul (mod lo n) g) n) + (mod (mul (div lo n) g) n); + let lo2 = mod (mul (div lo n) g) n in + lemma_shift_is_mul (mod lo1 n) 64; + lemma_shift_left_cancel lo1 (mod (mul (mod lo n) g) n) + (mod (mul (div lo n) g) n); + let lo3 = shift (mod (mul (mod lo n) g) n) 64 in + lemma_div_mod lo n; + lemma_shift_is_mul (div lo n) 64; + lemma_cl_add (to_vec128 lo) (to_vec128 lo3); + lemma_add_left_shift (mod lo n) (div lo n) (mod (mul (mod lo n) g) n); + + let l_o = lo in + let lo = add (shift (add (div lo n) (mod (mul (mod lo n) g) n)) 64) + (mod lo n) in + let lo' = lo2 in + + let l0_r1 = shift (mod lo n) (-1) in + let l1_r1 = shift (div lo n) (-1) in + let l0_r2 = shift (mod lo n) (-2) in + let l1_r2 = shift (div lo n) (-2) in + let l0_r7 = shift (mod lo n) (-7) in + let l1_r7 = shift (div lo n) (-7) in + lemma_vec128_shift_right_64 (to_vec128 lo) 1ul; + lemma_vec128_shift_right_64 (to_vec128 lo) 2ul; + lemma_vec128_shift_right_64 (to_vec128 lo) 7ul; + let lo1 = add (shift l1_r1 64) l0_r1 in + let lo2 = add (shift l1_r2 64) l0_r2 in + let lo3 = add (shift l1_r7 64) l0_r7 in + lemma_cl_add (to_vec128 lo1) (to_vec128 lo2); + lemma_add_left_shift_double l0_r1 l1_r1 l0_r2 l1_r2; + let lo1 = add (shift (add l1_r1 l1_r2) 64) (add l0_r1 l0_r2) in + lemma_cl_add (to_vec128 lo1) (to_vec128 lo3); + lemma_add_left_shift_double (add l0_r1 l0_r2) (add l1_r1 l1_r2) l0_r7 l1_r7; + lemma_mul_h_shift_left (mod lo n) g; + lemma_mul_h_shift_left (div lo n) g; + let lo1 = add (shift (div (mul (div lo n) g) n) 64) + (div (mul (mod lo n) g) n) in + + lemma_cl_add (to_vec128 lo1) (to_vec128 lo'); + lemma_add_associate (shift (div (mul (div lo n) g) n) 64) + (div (mul (mod lo n) g) n) (mod (mul (div l_o n) g) n); + let lo1 = add (shift (div (mul (div lo n) g) n) 64) + (add (div (mul (mod lo n) g) n) (mod (mul (div l_o n) g) n)) in + lemma_div_mod lo n; + lemma_shift_is_mul (div lo n) 64; + lemma_cl_add (to_vec128 lo) (to_vec128 lo1); + lemma_add_left_shift_double (mod lo n) (div lo n) + (add (div (mul (mod lo n) g) n) (mod (mul (div l_o n) g) n)) + (div (mul (div lo n) g) n); + lemma_shift_is_mul (add (div l_o n) (mod (mul (mod l_o n) g) n)) 64; + lemma_div_mod_unique lo n (add (div l_o n) (mod (mul (mod l_o n) g) n)) (mod l_o n); + let lo = add (shift (add (add (div l_o n) (mod (mul (mod l_o n) g) n)) + (div (mul (add (div l_o n) (mod (mul (mod l_o n) g) n)) g) n)) 64) + (add (mod l_o n) + (add (div (mul (mod l_o n) g) n) (mod (mul (div l_o n) g) n))) in + + lemma_reduce_helper l_o g; + let y_10c = add (swap l_o 64) (mul (mask l_o 64) g) in + assert (lo == add (swap y_10c 64) (mul (mask y_10c 64) g)); //OBSERVE + + lemma_add_commute (mul hi nn) l_o; + lemma_shift_is_mul hi 128; + lemma_cl_add (to_vec128 lo) (to_vec128 hi); + + lemma_gf128_degree (); + lemma_reduce_rev_helper l_o hi gf128_low 64; + let lo = add (add (swap y_10c 64) (mul (mask y_10c 64) g)) hi in + let a = shift (add (mul (of_vec128 h) nn) (of_vec128 l)) 1 in + let x = reverse a 255 in + assert (lo == reverse (mod x (add nn gf128_low)) 127); //OBSERVE + () + +val gf128_clmul_wide_reduce_lemma: x:vec128 -> y:vec128 -> Lemma + (let (hi, lo) = clmul_wide x y in + to_elem (gf128_reduce hi lo) == GF.fmul_be #S.gf128 (to_elem x) (to_elem y)) +let gf128_clmul_wide_reduce_lemma x y = admit() + + +val gf128_clmul_wide4_reduce_lemma: + x1:vec128 -> x2:vec128 -> x3:vec128 -> x4:vec128 + -> y1:vec128 -> y2:vec128 -> y3:vec128 -> y4:vec128 -> Lemma + (let (hi, lo) = clmul_wide4 x1 x2 x3 x4 y1 y2 y3 y4 in + to_elem (gf128_reduce hi lo) == Vec.normalize4 (to_elem4 y1 y2 y3 y4) (to_elem4 x1 x2 x3 x4)) +let gf128_clmul_wide4_reduce_lemma x1 x2 x3 x4 y1 y2 y3 y4 = admit() diff --git a/code/gf128/Makefile b/code/gf128/Makefile new file mode 100644 index 0000000000..610778082b --- /dev/null +++ b/code/gf128/Makefile @@ -0,0 +1,44 @@ +HACL_HOME=$(realpath ../..) + +# CUSTOMIZE HERE: determine what is the main target of this Makefile, e.g. a C +# test, a Low* test, or just a binary archive (like libcurve.a). +all: \ + dist/gf128-precomp-test.exe \ + dist/gf128-ni-test.exe + +test: all + dist/gf128-ni-test.exe + dist/gf128-precomp-test.exe + +# Defines rules for producing .checked, .krml, .depend, etc. +include ../../Makefile.local + +CFLAGS += -I$(HACL_HOME)/lib/c -mavx -mavx2 -mpclmul +export CFLAGS + +# CUSTOMIZE HERE: how to produce binary objects +# An archive with all the compiled code in this directory. +dist/libgf128.a: dist/Makefile.basic + $(MAKE) -C dist -f Makefile.basic + +GF128_PRECOMP_BUNDLE=-bundle Hacl.Gf128.PreComp=Hacl.Impl.Gf128.FieldPreComp +GF128_NI_BUNDLE=-bundle Hacl.Gf128.NI=Hacl.Impl.Gf128.FieldNI + +# Note: POLY_BUNDLE is found in Makefile.common -- shared definition. +dist/Makefile.basic: $(filter-out %/prims.krml,$(ALL_KRML_FILES)) + $(KRML) $^ -o libgf128.a $(BASE_FLAGS) $(GF128_PRECOMP_BUNDLE) $(GF128_NI_BUNDLE) \ + -tmpdir dist \ + -ccopts -std=gnu11,-g,-O3 \ + -add-include '"libintvector.h"' \ + -add-include '' \ + -skip-compilation + +dist/gf128-precomp-test.exe: $(HACL_HOME)/tests/gf128-precomp-test.o dist/libgf128.a + +dist/gf128-ni-test.exe: $(HACL_HOME)/tests/gf128-ni-test.o dist/libgf128.a + +%.exe: + $(CC) $(CFLAGS) $^ -o $@ + +clean-c: + $(MAKE) -C dist/ -f Makefile.basic clean diff --git a/lib/Lib.IntVector.fst b/lib/Lib.IntVector.fst index c9b1a87f72..989eba9b05 100644 --- a/lib/Lib.IntVector.fst +++ b/lib/Lib.IntVector.fst @@ -22,8 +22,16 @@ let vec_t t w = let reveal_vec_1 _ = () +let vec_t_v #t #w x = admit() + let vec_v #t #w x = admit() +let lemma_of_vec_t #t #w f = admit() + +let lemma_of_vec_v_t #t #w f = admit() + +let lemma_create_index_vec_w1 #t f = admit() + let vecv_extensionality #t #w f1 f2 = admit() let reveal_vec_v_1 #t f = admit() @@ -324,70 +332,8 @@ let vec_interleave_high_n_lemma_uint64_4_2 v1 v2 = admit() let vec_shift_right_uint128_small2 v1 s = admit() -(* Generic Permutations: Possible on Intel, but not on ARM. - So we comment this out and only leave interleaving and rotate_lanes functions in the API *) -(* -inline_for_extraction -val vec_permute2: #t:v_inttype -> v1:vec_t t 2 - -> i1:vec_index 2 -> i2:vec_index 2 -> - vec_t t 2 - -inline_for_extraction -val vec_permute2_lemma: #t:v_inttype -> v1:vec_t t 2 - -> i1:vec_index 2 -> i2:vec_index 2 -> - Lemma (ensures (vec_v (vec_permute2 v1 i1 i2) == create2 (vec_v v1).[v i1] (vec_v v1).[v i2])) - [SMTPat (vec_v (vec_permute2 v1 i1 i2))] - - -inline_for_extraction -val vec_permute4: #t:v_inttype -> v1:vec_t t 4 - -> i1:vec_index 4 -> i2:vec_index 4 -> i3:vec_index 4 -> i4:vec_index 4 -> - vec_t t 4 - -inline_for_extraction -val vec_permute4_lemma: #t:v_inttype -> v1:vec_t t 4 - -> i1:vec_index 4 -> i2:vec_index 4 -> i3:vec_index 4 -> i4:vec_index 4 -> - Lemma (ensures (vec_v (vec_permute4 v1 i1 i2 i3 i4) == create4 (vec_v v1).[v i1] (vec_v v1).[v i2] (vec_v v1).[v i3] (vec_v v1).[v i4])) - [SMTPat (vec_v (vec_permute4 v1 i1 i2 i3 i4))] - -inline_for_extraction -val vec_permute8: #t:v_inttype -> v1:vec_t t 8 - -> i1:vec_index 8 -> i2:vec_index 8 -> i3:vec_index 8 -> i4:vec_index 8 - -> i5:vec_index 8 -> i6:vec_index 8 -> i7:vec_index 8 -> i8:vec_index 8 -> - v2:vec_t t 8{vec_v v2 == create8 (vec_v v1).[v i1] (vec_v v1).[v i2] (vec_v v1).[v i3] (vec_v v1).[v i4] - (vec_v v1).[v i5] (vec_v v1).[v i6] (vec_v v1).[v i7] (vec_v v1).[v i8]} - -inline_for_extraction -val vec_permute16: #t:v_inttype -> v1:vec_t t 16 - -> i1:vec_index 16 -> i2:vec_index 16 -> i3:vec_index 16 -> i4:vec_index 16 - -> i5:vec_index 16 -> i6:vec_index 16 -> i7:vec_index 16 -> i8:vec_index 16 - -> i9:vec_index 16 -> i10:vec_index 16 -> i11:vec_index 16 -> i12:vec_index 16 - -> i13:vec_index 16 -> i14:vec_index 16 -> i15:vec_index 16 -> i16:vec_index 16 -> - v2:vec_t t 16{let vv1 = vec_v v1 in - vec_v v2 == create16 vv1.[v i1] vv1.[v i2] vv1.[v i3] vv1.[v i4] - vv1.[v i5] vv1.[v i6] vv1.[v i7] vv1.[v i8] - vv1.[v i9] vv1.[v i10] vv1.[v i11] vv1.[v i12] - vv1.[v i13] vv1.[v i14] vv1.[v i15] vv1.[v i16]} - -inline_for_extraction -val vec_permute32: #t:v_inttype -> v1:vec_t t 32 - -> i1:vec_index 16 -> i2:vec_index 16 -> i3:vec_index 16 -> i4:vec_index 16 - -> i5:vec_index 16 -> i6:vec_index 16 -> i7:vec_index 16 -> i8:vec_index 16 - -> i9:vec_index 16 -> i10:vec_index 16 -> i11:vec_index 16 -> i12:vec_index 16 - -> i13:vec_index 16 -> i14:vec_index 16 -> i15:vec_index 16 -> i16:vec_index 16 - -> i17:vec_index 16 -> i18:vec_index 16 -> i19:vec_index 16 -> i20:vec_index 16 - -> i21:vec_index 16 -> i22:vec_index 16 -> i23:vec_index 16 -> i24:vec_index 16 - -> i25:vec_index 16 -> i26:vec_index 16 -> i27:vec_index 16 -> i28:vec_index 16 - -> i29:vec_index 16 -> i30:vec_index 16 -> i31:vec_index 16 -> i32:vec_index 16 -> - v2:vec_t t 32{let vv1 = vec_v v1 in - vec_v v2 == create32 vv1.[v i1] vv1.[v i2] vv1.[v i3] vv1.[v i4] - vv1.[v i5] vv1.[v i6] vv1.[v i7] vv1.[v i8] - vv1.[v i9] vv1.[v i10] vv1.[v i11] vv1.[v i12] - vv1.[v i13] vv1.[v i14] vv1.[v i15] vv1.[v i16] - vv1.[v i17] vv1.[v i18] vv1.[v i19] vv1.[v i20] - vv1.[v i21] vv1.[v i22] vv1.[v i23] vv1.[v i24] - vv1.[v i25] vv1.[v i26] vv1.[v i27] vv1.[v i28] - vv1.[v i29] vv1.[v i30] vv1.[v i31] vv1.[v i32]} +let vec_cast_uint128 v1 = admit() +let vec_cast_2_uint64 v1 = admit() let vec_permute2 #t v i1 i2 = match t with @@ -406,7 +352,6 @@ let vec_permute4_lemma #t v i1 i2 i3 i4 = () let vec_permute8 #t v i1 i2 i3 i4 i5 i6 i7 i8 = admit() let vec_permute16 #t = admit() let vec_permute32 #t = admit() -*) let vec_rotate_right_lanes (#t:v_inttype) (#w:width) (x:vec_t t w) (y:rotval t) = match t,w with diff --git a/lib/Lib.IntVector.fsti b/lib/Lib.IntVector.fsti index 3d3200aaaf..37bf61130c 100644 --- a/lib/Lib.IntVector.fsti +++ b/lib/Lib.IntVector.fsti @@ -19,9 +19,25 @@ val reveal_vec_1: t:v_inttype -> Lemma (requires t <> U128) (ensures vec_t t 1 == sec_int_t t) +inline_for_extraction +val vec_t_v: #t:v_inttype -> #w:width -> vec_v_t t w -> vec_t t w + inline_for_extraction val vec_v: #t:v_inttype -> #w:width -> vec_t t w -> vec_v_t t w +val lemma_of_vec_t: #t:v_inttype -> #w:width -> f:vec_t t w -> Lemma + (ensures vec_t_v (vec_v f) == f) + [SMTPat (vec_t_v #t #w (vec_v #t #w f))] + +val lemma_of_vec_v_t: #t:v_inttype -> #w:width -> f:vec_v_t t w -> Lemma + (ensures vec_v (vec_t_v f) == f) + [SMTPat (vec_v #t #w (vec_t_v #t #w f))] + +val lemma_create_index_vec_w1: #t:v_inttype -> f:vec_t t 1 -> Lemma +  (ensures +    (let x:uint_t t SEC = mk_int (v (index (vec_v f) 0)) in +    create 1 x == vec_v f)) + val vecv_extensionality: #t:v_inttype -> #w:width -> f1:vec_t t w -> f2:vec_t t w -> Lemma (requires vec_v f1 == vec_v f2) (ensures f1 == f2) @@ -243,6 +259,77 @@ val vec_shift_right_uint128_small2: v1:vec_t U64 4 -> s:shiftval U128{uint_v s % (((vec_v v1).[2] >>. s) |. ((vec_v v1).[3] <<. (64ul -! s))) ((vec_v v1).[3] >>. s)) +val vec_cast_uint128: v1:vec_t U128 1 -> Lemma + (vec_v (cast U64 2 v1) == create2 + (to_u64 (vec_v v1).[0]) + (to_u64 ((vec_v v1).[0] >>. 64ul))) + +val vec_cast_2_uint64: v1:vec_t U64 2 -> Lemma + (vec_v (cast U128 1 v1) == create 1 + (((to_u128 (vec_v v1).[1]) <<. 64ul) +! (to_u128 (vec_v v1).[0]))) + +inline_for_extraction +val vec_permute2: #t:v_inttype -> v1:vec_t t 2 + -> i1:vec_index 2 -> i2:vec_index 2 -> + vec_t t 2 + +inline_for_extraction +val vec_permute2_lemma: #t:v_inttype -> v1:vec_t t 2 + -> i1:vec_index 2 -> i2:vec_index 2 -> + Lemma (ensures (vec_v (vec_permute2 v1 i1 i2) == Lib.Sequence.create2 (vec_v v1).[v i1] (vec_v v1).[v i2])) + [SMTPat (vec_v (vec_permute2 v1 i1 i2))] + + +inline_for_extraction +val vec_permute4: #t:v_inttype -> v1:vec_t t 4 + -> i1:vec_index 4 -> i2:vec_index 4 -> i3:vec_index 4 -> i4:vec_index 4 -> + vec_t t 4 + +inline_for_extraction +val vec_permute4_lemma: #t:v_inttype -> v1:vec_t t 4 + -> i1:vec_index 4 -> i2:vec_index 4 -> i3:vec_index 4 -> i4:vec_index 4 -> + Lemma (ensures (vec_v (vec_permute4 v1 i1 i2 i3 i4) == Lib.Sequence.create4 (vec_v v1).[v i1] (vec_v v1).[v i2] (vec_v v1).[v i3] (vec_v v1).[v i4])) + [SMTPat (vec_v (vec_permute4 v1 i1 i2 i3 i4))] + +inline_for_extraction +val vec_permute8: #t:v_inttype -> v1:vec_t t 8 + -> i1:vec_index 8 -> i2:vec_index 8 -> i3:vec_index 8 -> i4:vec_index 8 + -> i5:vec_index 8 -> i6:vec_index 8 -> i7:vec_index 8 -> i8:vec_index 8 -> + v2:vec_t t 8{vec_v v2 == Lib.Sequence.create8 (vec_v v1).[v i1] (vec_v v1).[v i2] (vec_v v1).[v i3] (vec_v v1).[v i4] + (vec_v v1).[v i5] (vec_v v1).[v i6] (vec_v v1).[v i7] (vec_v v1).[v i8]} + +inline_for_extraction +val vec_permute16: #t:v_inttype -> v1:vec_t t 16 + -> i1:vec_index 16 -> i2:vec_index 16 -> i3:vec_index 16 -> i4:vec_index 16 + -> i5:vec_index 16 -> i6:vec_index 16 -> i7:vec_index 16 -> i8:vec_index 16 + -> i9:vec_index 16 -> i10:vec_index 16 -> i11:vec_index 16 -> i12:vec_index 16 + -> i13:vec_index 16 -> i14:vec_index 16 -> i15:vec_index 16 -> i16:vec_index 16 -> + v2:vec_t t 16{let vv1 = vec_v v1 in + vec_v v2 == Lib.Sequence.create16 vv1.[v i1] vv1.[v i2] vv1.[v i3] vv1.[v i4] + vv1.[v i5] vv1.[v i6] vv1.[v i7] vv1.[v i8] + vv1.[v i9] vv1.[v i10] vv1.[v i11] vv1.[v i12] + vv1.[v i13] vv1.[v i14] vv1.[v i15] vv1.[v i16]} + +inline_for_extraction +val vec_permute32: #t:v_inttype -> v1:vec_t t 32 + -> i1:vec_index 16 -> i2:vec_index 16 -> i3:vec_index 16 -> i4:vec_index 16 + -> i5:vec_index 16 -> i6:vec_index 16 -> i7:vec_index 16 -> i8:vec_index 16 + -> i9:vec_index 16 -> i10:vec_index 16 -> i11:vec_index 16 -> i12:vec_index 16 + -> i13:vec_index 16 -> i14:vec_index 16 -> i15:vec_index 16 -> i16:vec_index 16 + -> i17:vec_index 16 -> i18:vec_index 16 -> i19:vec_index 16 -> i20:vec_index 16 + -> i21:vec_index 16 -> i22:vec_index 16 -> i23:vec_index 16 -> i24:vec_index 16 + -> i25:vec_index 16 -> i26:vec_index 16 -> i27:vec_index 16 -> i28:vec_index 16 + -> i29:vec_index 16 -> i30:vec_index 16 -> i31:vec_index 16 -> i32:vec_index 16 -> + v2:vec_t t 32{let vv1 = vec_v v1 in + vec_v v2 == Lib.Sequence.create32 vv1.[v i1] vv1.[v i2] vv1.[v i3] vv1.[v i4] + vv1.[v i5] vv1.[v i6] vv1.[v i7] vv1.[v i8] + vv1.[v i9] vv1.[v i10] vv1.[v i11] vv1.[v i12] + vv1.[v i13] vv1.[v i14] vv1.[v i15] vv1.[v i16] + vv1.[v i17] vv1.[v i18] vv1.[v i19] vv1.[v i20] + vv1.[v i21] vv1.[v i22] vv1.[v i23] vv1.[v i24] + vv1.[v i25] vv1.[v i26] vv1.[v i27] vv1.[v i28] + vv1.[v i29] vv1.[v i30] vv1.[v i31] vv1.[v i32]} + inline_for_extraction val vec_rotate_right_lanes: #t:v_inttype -> #w:width -> v1:vec_t t w -> s:size_t{v s <= w} -> vec_t t w diff --git a/lib/Lib.Sequence.fst b/lib/Lib.Sequence.fst index f3d3011bfa..562a81fa20 100644 --- a/lib/Lib.Sequence.fst +++ b/lib/Lib.Sequence.fst @@ -399,3 +399,21 @@ let create16 #a x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 = let create16_lemma #a x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 = Seq.elim_of_list [x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15] + +let create32 #a + x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 + x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 = + let l = [ + x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15; + x16; x17; x18; x19; x20; x21; x22; x23; x24; x25; x26; x27; x28; x29; x30; x31 + ] in + assert_norm (List.Tot.length l = 32); + createL l + +let create32_lemma #a +x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 +x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 = + Seq.elim_of_list [ + x0; x1; x2; x3; x4; x5; x6; x7; x8; x9; x10; x11; x12; x13; x14; x15; + x16; x17; x18; x19; x20; x21; x22; x23; x24; x25; x26; x27; x28; x29; x30; x31 + ] diff --git a/lib/Lib.Sequence.fsti b/lib/Lib.Sequence.fsti index 92b324db28..620b7cd658 100644 --- a/lib/Lib.Sequence.fsti +++ b/lib/Lib.Sequence.fsti @@ -589,3 +589,25 @@ val create16_lemma: #a:Type s.[8] == x8 /\ s.[9] == x9 /\ s.[10] == x10 /\ s.[11] == x11 /\ s.[12] == x12 /\ s.[13] == x13 /\ s.[14] == x14 /\ s.[15] == x15) [SMTPat (create16 #a x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15)] + +val create32: #a:Type + -> x0:a -> x1:a -> x2:a -> x3:a -> x4:a -> x5:a -> x6:a -> x7:a + -> x8:a -> x9:a -> x10:a -> x11:a -> x12:a -> x13:a -> x14:a -> x15:a + -> x16:a -> x17:a -> x18:a -> x19:a -> x20:a -> x21:a -> x22:a -> x23:a + -> x24:a -> x25:a -> x26:a -> x27:a -> x28:a -> x29:a -> x30:a -> x31:a -> lseq a 32 + +val create32_lemma: #a:Type + -> x0:a -> x1:a -> x2:a -> x3:a -> x4:a -> x5:a -> x6:a -> x7:a + -> x8:a -> x9:a -> x10:a -> x11:a -> x12:a -> x13:a -> x14:a -> x15:a + -> x16:a -> x17:a -> x18:a -> x19:a -> x20:a -> x21:a -> x22:a -> x23:a + -> x24:a -> x25:a -> x26:a -> x27:a -> x28:a -> x29:a -> x30:a -> x31:a -> + Lemma (let s = create32 x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 in + s.[0] == x0 /\ s.[1] == x1 /\ s.[2] == x2 /\ s.[3] == x3 /\ + s.[4] == x4 /\ s.[5] == x5 /\ s.[6] == x6 /\ s.[7] == x7 /\ + s.[8] == x8 /\ s.[9] == x9 /\ s.[10] == x10 /\ s.[11] == x11 /\ + s.[12] == x12 /\ s.[13] == x13 /\ s.[14] == x14 /\ s.[15] == x15 /\ + s.[16] == x16 /\ s.[17] == x17 /\ s.[18] == x18 /\ s.[19] == x19 /\ + s.[20] == x20 /\ s.[21] == x21 /\ s.[22] == x22 /\ s.[23] == x23 /\ + s.[24] == x24 /\ s.[25] == x25 /\ s.[26] == x26 /\ s.[27] == x27 /\ + s.[28] == x28 /\ s.[29] == x29 /\ s.[30] == x30 /\ s.[31] == x31) + [SMTPat (create32 #a x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31)] diff --git a/specs/Spec.GF128.fst b/specs/Spec.GF128.fst new file mode 100644 index 0000000000..9110739783 --- /dev/null +++ b/specs/Spec.GF128.fst @@ -0,0 +1,68 @@ +module Spec.GF128 + +open FStar.Mul +open Lib.IntTypes +open Lib.Sequence +open Lib.ByteSequence + +open Spec.GaloisField + +//GF128 Field: to load bits in little-endian order set the parameters as follows +//let irred = u128 0x87 +//let load_elem (b:lbytes 16) : elem = load_felem_le #gf128 b +//let store_elem (e:elem) : lbytes 16 = store_felem_le #gf128 e +//and use `fmul` instead of `fmul_be` + +let irred = mk_int #U128 #SEC 0xE1000000000000000000000000000000 +let gf128 = gf U128 irred +let elem = felem gf128 +let load_elem (b:lbytes 16) : elem = load_felem_be #gf128 b +let store_elem (e:elem) : lbytes 16 = store_felem_be #gf128 e + +(* GCM types and specs *) +let size_block : size_nat = 16 +let size_key : size_nat = 16 + +type block = lbytes size_block +type tag = lbytes size_block +type key = lbytes size_key + +/// Specification + +let encode (w:lbytes size_block) : Tot elem = load_elem w + +let encode_last (len:size_nat{len < size_block}) (w:lbytes len) : Tot elem = + let b = create size_block (u8 0) in + let b = update_slice b 0 len w in + load_elem b + +let decode (e:elem) : Tot block = store_elem e + +let gf128_init (h:lbytes size_block) : Tot (elem & elem) = + let r = load_elem h in + zero, r + +let gf128_update1 (r:elem) (b:lbytes size_block) (acc:elem) : Tot elem = + (acc `fadd` encode b) `fmul_be` r + +let gf128_finish (s:key) (acc:elem) : Tot tag = + decode (acc `fadd` load_elem s) + +let gf128_update_last (r:elem) (l:size_nat{l < size_block}) (b:lbytes l) (acc:elem) = + if l = 0 then acc else (acc `fadd` encode_last l b) `fmul_be` r + +let gf128_update (text:bytes) (acc:elem) (r:elem) : Tot elem = + repeat_blocks #uint8 #elem size_block text + (gf128_update1 r) + (gf128_update_last r) + acc + +let gmul (text:bytes) (h:lbytes size_block) : Tot tag = + let acc, r = gf128_init h in + let acc = gf128_update text acc r in + decode acc + +let gmac (text:bytes) (h:lbytes size_block) (k:key) : Tot tag = + let acc, r = gf128_init h in + let acc = gf128_update text acc r in + gf128_finish k acc