diff --git a/proof/.gitignore b/proof/.gitignore index e54f987..4122c31 100644 --- a/proof/.gitignore +++ b/proof/.gitignore @@ -2,3 +2,7 @@ *.ec.out *.eco .ci +*~ +\#*\# +*.elc +.\#* diff --git a/proof/Makefile b/proof/Makefile index a02c9ad..cb39e77 100644 --- a/proof/Makefile +++ b/proof/Makefile @@ -86,7 +86,7 @@ CI_ALL_FILES := $(shell find $(PROOF) -name '*.log') $(CI_ERROR_FILES) reporter: $(JLOG) "Extraction from Jasmin to EasyCrypt status" src/ *_s.ec $(CI_REMOVE_OK_LOGS) $(JLOG) "Checking EasyCrypt - extracted files status" proof/ *_s.ec.out $(CI_REMOVE_OK_LOGS) - #$(JLOG) "Checking EasyCrypt - all files status" proof/ *.ec.out $(CI_REMOVE_OK_LOGS) + $(JLOG) "Checking EasyCrypt - all files status" proof/ *.ec.out $(CI_REMOVE_OK_LOGS) logs: $(LOGS) @@ -114,7 +114,7 @@ all: $(MAKE) distclean $(MAKE) -C $(SRC) extract-to-easycrypt $(MAKE) check-extracted - #$(MAKE) check-all + $(MAKE) check-all $(MAKE) reporter $(MAKE) err diff --git a/proof/README.md b/proof/README.md new file mode 100644 index 0000000..3251e74 --- /dev/null +++ b/proof/README.md @@ -0,0 +1,63 @@ +# Formosa-25519 proof +Correctness proof for the reference and optimised 4-limb implementation in Jasmin. + +Note, some lemmas are left with the tactic `admit`, which means that the lemmas are not proven, but assumed to be correct. All `admits` have been proved in Cryptoline and including them is future work. + +## Overview +### Common files +The logic in the `common/` folder is that the following files provide various lemmas used throughout the correctness proofs. + +#### Generic files +- **W64limbs.ec** includes lemmas that lay the foundation of implementations using limbs of unsigned 64-bit words, such as digit representation, addition and redundant limbs. No dependencies. +- **EClib.ec** includes lemmas relating the implementation of elliptic curve cryptography, such as packing/unpacking. Depends on W64limbs. +- **Zp_25519** includes lemmas relating to the finite field of size 2^255 - 19, such as congruence over said finite field and reduction. Depends on both EClib and W64limbs. +- **Zp_limbs** includes lemmas concerning the implementation of 4 limbs representations (called `valRep4` in these proofs). Some miscellaneous lemmas concerning, for example, valid pointers are also present. Depends on the above three files. + +Naturally, not all the lemmas in these files are used, but are still present as they allow for efficient modification of proofs in case of either software changes (e.g. Jasmin or Easycrypt) and if the implementation changes. + +#### Curve25519 files +The logic behind these files is that a specification of the various "core" operations used in the X25519 are defined (`Curve25519_Specs.ec`). From these, various other related operations and lemmas are defined in another file (`Curve25519_Operations`). + +From these two files, we can write an abstract specification of scalar multiplication in Easycrypt, which we will be shown to be equivalent to the implementation (`Curve25519_Procedures`). In this file, we also prove that the specification conforms to the aforementioned operations (i.e., the specification is proven to be correct as long as the defined operations are correct). + +Finally, `Curve25519_PHoare` contains probabilistic hoare statements relating to the specification. + +From this, we now have a foundation to be able to prove the correctness of both the reference and optimised implementations of scalar multiplication. + +Note that the dependency chain (where x <- y indicates that y depends on x) is: + +``` +Curve25519_Specs <- Curve25519_Operations <- Curve25519_Procedures <- Curve25519_PHoare +``` + +and all of these depend on the generic files mentioned above. + + +### Correctness proofs +The correctness proofs are similar for both implementations, so unless specified, all explanations apply to both implementations. Each lemma corresponds to a step in the X25519 implementation and are presented and proven in the same order. This order is: + +0. Arithmetic, such as addition, subtraction, multiplication and squaring. +1. Decoding scalar value. +2. Decoding `u` coordinate. +3. Obtaining the `i`-th bit of a 4-limb representation of a 256-bit word. +4. Conditional swapping. +5. Add and doubling an elliptic curve point. +6. Montgomery ladder step. +7. Montgomery ladder (elliptic curve point multiplication). +8. Iterated square. +9. Inverting elliptic curve point. +10. Encoding of the resulting elliptic curve point (includes reduction to \mod p). +11. Scalar multiplication. + +These correctness proofs prove that all computations are correct with respect to the abstract specifications. + +### Note on admitted lemmas +The following list of lemmas are left as `admit`, but are proven in Cryptoline (not available on this repository, yet): + +1. `h_add_rrs_ref4` and `h_add_rrs_mulx` (correctness proof for the implementation of 4-limb addition implementation). +2. `h_sub_rrs_ref4` and `h_sub_rrs_mulx` (correctness proof for the implementation of 4-limb subtraction). +3. `h_mul_a24_ref4` and `h_mul_a24_mulx` (correctness proof for the implementation of 4-limb multiplication with a constant). +4. `h_mul_rss_ref4` and `h_mul_rsr_mulx` (correctness proof for the implementation of 4-limb multiplication). +5. `h_mul_pp_ref4` (correctness proof for the implementation of 4-limb multiplication). +6. `h_sqr_rs_ref4` and `h_sqr_rr_mulx` (correctness proof for the implementation of 4-limb squaring). +7. `h_sqr_p_ref4` (correctness proof for the implementation of 4-limb squaring with a constant). diff --git a/proof/crypto_scalarmult/curve25519/Curve25519_Hop1.ec b/proof/crypto_scalarmult/curve25519/Curve25519_Hop1.ec deleted file mode 100644 index 14f581f..0000000 --- a/proof/crypto_scalarmult/curve25519/Curve25519_Hop1.ec +++ /dev/null @@ -1,414 +0,0 @@ -require import Bool List Int IntExtra IntDiv CoreMap Real Zp. -from Jasmin require import JModel. -require import Curve25519_Spec. -import Zp ZModpRing. - -(** generic stuff **) - -search ZModpRing.exp. - -(* exp exp *) -lemma expE (z : zp) (e1 e2 : int) : 0 <= e1 /\ 0 <= e2 => - ZModpRing.exp (ZModpRing.exp z e1) e2 = - ZModpRing.exp z (e1*e2). -proof. -admit. (**TODO**) -qed. - -(* returns the first 2 elements of the input triple *) -op select_tuple_12 (t : ('a * 'a) * ('a * 'a) * 'c) = (t.`1, t.`2). - -(* if the third element is true then the first 2 elements are swapped *) -(* - this op returns the first 2 elements in the correct order *) -op reconstruct_tuple (t : ('a * 'a) * ('a * 'a) * bool) = - if t.`3 - then swap_tuple (select_tuple_12 t) - else select_tuple_12 t. - -lemma eq_reconstruct_select_tuple (t : (('a * 'a) * ('a * 'a) * bool)) : - t.`3 = false => - select_tuple_12 t = reconstruct_tuple t. -proof. - rewrite /reconstruct_tuple /select_tuple_12. - by move => ? /#. -qed. - -(* similar to foldl_in_eq -- the proof is basically the same -- defined in JMemory *) -(* - foldl_in_eq states that any 2 foldl's are the same if the functions are equiv *) -(* - we will need to prove that + that the state a2 have a relational invariant r *) -lemma foldl_in_eq_r (f1 : 'a1 -> 'b -> 'a1) - (f2 : 'a2 -> 'b -> 'a2) - (s : 'b list) - (a2 : 'a2) - (r : 'a2 -> 'a1) : -(forall a2 b, b \in s => f1 (r a2) b = r (f2 a2 b)) => foldl f1 (r a2) s = r (foldl f2 a2 s). -proof. - move: s a2. elim. - by move => a2. - move => x l hrec a2 /= hin. rewrite hin. - by left. - rewrite hrec //; move => ? ? h; rewrite hin. - by right. - by trivial. -qed. - -(** step1: add_and_double = add_and_double1 : reordered to match implementation **) -op add_and_double1 (qx : zp) (nqs : (zp * zp) * (zp * zp)) = - let x1 = qx in - let (x2, z2) = nqs.`1 in - let (x3, z3) = nqs.`2 in - let t0 = x2 + (- z2) in - let x2 = x2 + z2 in - let t1 = x3 + (- z3) in - let z2 = x3 + z3 in - let z3 = x2 * t1 in - let z2 = z2 * t0 in - let t2 = x2 * x2 in - let t1 = t0 * t0 in - let x3 = z3 + z2 in - let z2 = z3 + (- z2) in - let x2 = t2 * t1 in - let t0 = t2 + (- t1) in - let z2 = z2 * z2 in - let z3 = t0 * (inzp 121665) in - let x3 = x3 * x3 in - let t2 = t2 + z3 in - let z3 = x1 * z2 in - let z2 = t0 * t2 - in ((x2,z2), (x3,z3)). - -lemma eq_add_and_double1 (qx : zp) (nqs : (zp * zp) * (zp * zp)) : - add_and_double qx nqs = add_and_double1 qx nqs. -proof. - rewrite /add_and_double /add_and_double1. - simplify => /#. -qed. - -op montgomery_ladder1(init : zp, k : W256.t) = - let nqs0 = ((Zp.one,Zp.zero),(init,Zp.one)) in - foldl (fun (nqs : (zp * zp) * (zp * zp)) ctr => - if ith_bit k ctr - then swap_tuple (add_and_double1 init (swap_tuple(nqs))) - else add_and_double1 init nqs) nqs0 (rev (iota_ 0 255)). - -(* lemma: montgomery_ladder = montgomery_ladder1 *) -lemma eq_montgomery_ladder1 (init : zp) (k : W256.t) : - montgomery_ladder init k = montgomery_ladder1 init k. -proof. - rewrite /montgomery_ladder /montgomery_ladder1 /=. - apply foldl_in_eq. - move => nqs ctr inlist => /=. - case (ith_bit k ctr). - by move => ?; rewrite /swap_tuple /#. - by move => ?; rewrite /swap_tuple /#. -qed. - -(** step 2: isolate foldl function and introduce reconstruct tuple **) -op montgomery_ladder2_step(k : W256.t, init : zp, nqs : (zp * zp) * (zp * zp), ctr : int) = - if ith_bit k ctr - then swap_tuple(add_and_double1 init (swap_tuple(nqs))) - else add_and_double1 init nqs. - -op montgomery_ladder2(init : zp, k : W256.t) = - let nqs0 = reconstruct_tuple ((Zp.one,Zp.zero),(init,Zp.one),false) in - foldl (montgomery_ladder2_step k init) nqs0 (rev (iota_ 0 255)). - -(* lemma: montgomery_ladder1 = montgomery_ladder2 *) -lemma eq_montgomery_ladder2 (init : zp) (k : W256.t) : - montgomery_ladder1 init k = montgomery_ladder2 init k. -proof. - rewrite /montgomery_ladder1 /montgomery_ladder2 /reconstruct_tuple /select_tuple_12. - rewrite /montgomery_ladder2_step. - by simplify. -qed. - -(** step 3: extend the state to contain an additional bit stating if the state is swapped **) -op cswap( t : ('a * 'a) * ('a * 'a), b : bool ) = - if b - then swap_tuple t - else t. - -op montgomery_ladder3_step(k : W256.t, init : zp, nqs : (zp * zp) * (zp * zp) * bool, ctr : int) = - let nqs = cswap (select_tuple_12 nqs) (nqs.`3 ^^ (ith_bit k ctr)) in - let nqs = add_and_double1 init nqs in - (nqs.`1, nqs.`2, (ith_bit k ctr)). - -op montgomery_ladder3(init : zp, k : W256.t) = - let nqs0 = ((Zp.one,Zp.zero),(init,Zp.one),false) in - foldl (montgomery_ladder3_step k init) nqs0 (rev (iota_ 0 255)). - -lemma eq_montgomery_ladder3_reconstruct (init : zp) (k: W256.t) : - montgomery_ladder2 init k = reconstruct_tuple (montgomery_ladder3 init k). -proof. - rewrite /montgomery_ladder2 /montgomery_ladder3 //=. - apply foldl_in_eq_r. - move => ? ? ?. - rewrite /reconstruct_tuple /montgomery_ladder2_step /montgomery_ladder3_step. - rewrite /swap_tuple /select_tuple_12 /cswap. - simplify => /#. -qed. - -(* lemma: if the first bit of k is 0, which will be because of decodeScalar25519, *) -(* then montgomery_ladder2 = select_tuple_12 montgomery_ladder3 *) -lemma eq_montgomery_ladder3 (init : zp) (k: W256.t) : - k.[0] = false => - montgomery_ladder2 init k = select_tuple_12 (montgomery_ladder3 init k). -proof. - move => hkf. - have tbf : (montgomery_ladder3 init k).`3 = false. (*third bit false*) - rewrite /montgomery_ladder3 /montgomery_ladder3_step /select_tuple_12 /cswap /ith_bit. - by simplify rev. - have seqr : select_tuple_12 (montgomery_ladder3 init k) = (*select eq reconstruct*) - reconstruct_tuple (montgomery_ladder3 init k). - by apply /eq_reconstruct_select_tuple /tbf. - rewrite seqr. - by apply eq_montgomery_ladder3_reconstruct. -qed. - -(** step 4: montgomery_ladder = select_tuple_12 montgomery_ladder3 **) -lemma eq_montgomery_ladder123 (init : zp) (k: W256.t) : - k.[0] = false => - montgomery_ladder init k = select_tuple_12 (montgomery_ladder3 init k). -proof. - move => hkf. - by rewrite eq_montgomery_ladder1 eq_montgomery_ladder2 eq_montgomery_ladder3. -qed. - -(** step 5: introduce reordering in encode point **) -(* - we split invert in 3 parts to make the proof faster *) -op invert_p_p1(z1 : zp) : (zp*zp) = - let z2 = exp z1 2 in - let z8 = exp z2 (2*2) in - let z9 = z1 * z8 in - let z11 = z2 * z9 in - let z22 = exp z11 2 in - let z_5_0 = z9 * z22 in - (z_5_0, z11). - -op invert_p_p2(z_5_0 : zp) : zp = - let z_10_5 = ZModpRing.exp z_5_0 (2^5) in - let z_10_0 = z_10_5 * z_5_0 in - let z_20_10 = ZModpRing.exp z_10_0 (2^10) in - let z_20_0 = z_20_10 * z_10_0 in - let z_40_20 = ZModpRing.exp z_20_0 (2^20) in - let z_40_0 = z_40_20 * z_20_0 in - let z_50_10 = ZModpRing.exp z_40_0 (2^10) in - let z_50_0 = z_50_10 * z_10_0 in - z_50_0. - -op invert_p_p3(z_50_0 z11 : zp) : zp = - let z_100_50 = ZModpRing.exp z_50_0 (2^50) in - let z_100_0 = z_100_50 * z_50_0 in - let z_200_100 = ZModpRing.exp z_100_0 (2^100) in - let z_200_0 = z_200_100 * z_100_0 in - let z_250_50 = ZModpRing.exp z_200_0 (2^50) in - let z_250_0 = z_250_50 * z_50_0 in - let z_255_5 = ZModpRing.exp z_250_0 (2^5) in - let z_255_21 = z_255_5 * z11 in - z_255_21. - -op invert_p(z1 : zp) : zp = - let (z_5_0, z11) = invert_p_p1 z1 in - let z_50_0 = invert_p_p2 z_5_0 in - let z_255_21 = invert_p_p3 z_50_0 z11 in - z_255_21 axiomatized by invert_pE. - -lemma eq_invert_p (z1: zp) : - invert_p z1 = ZModpRing.exp z1 (p-2). -proof. -rewrite invert_pE. -(*invert_p1*) -rewrite /invert_p_p1 /= expE //=. - cut -> : invert_p_p3 (invert_p_p2 (z1 * exp z1 8 * - exp (exp z1 2 * (z1 * exp z1 8)) 2)) - (exp z1 2 * (z1 * exp z1 8)) = - invert_p_p3 (invert_p_p2 (exp z1 (2^5 - 2^0))) (exp z1 11). - smt(expE exprS exprD). -(*invert_p2*) -rewrite /invert_p_p2 //=. - cut -> : invert_p_p3 (exp (exp (exp - (exp (exp z1 31) 32 * exp z1 31) 1024 * - (exp (exp z1 31) 32 * exp z1 31)) 1048576 * - (exp (exp (exp z1 31) 32 * exp z1 31) 1024 * - (exp (exp z1 31) 32 * exp z1 31))) 1024 * - (exp (exp z1 31) 32 * exp z1 31)) (exp z1 11) = - invert_p_p3 (exp z1 (2^50 - 2^0)) (exp z1 11). - smt(expE exprS exprD). -(*invert_p3*) -rewrite /invert_p_p3 //= pE //=. -smt(expE exprS exprD). -qed. - -(* now we define invert as one op and prove it equiv to exp z1 (p-2) *) -op invert0(z1 : zp) : zp = - let z2 = ZModpRing.exp z1 2 in - let z8 = ZModpRing.exp z2 (2*2) in - let z9 = z1 * z8 in - let z11 = z2 * z9 in - let z22 = ZModpRing.exp z11 2 in - let z_5_0 = z9 * z22 in - let z_10_5 = ZModpRing.exp z_5_0 (2^5) in - let z_10_0 = z_10_5 * z_5_0 in - let z_20_10 = ZModpRing.exp z_10_0 (2^10) in - let z_20_0 = z_20_10 * z_10_0 in - let z_40_20 = ZModpRing.exp z_20_0 (2^20) in - let z_40_0 = z_40_20 * z_20_0 in - let z_50_10 = ZModpRing.exp z_40_0 (2^10) in - let z_50_0 = z_50_10 * z_10_0 in - let z_100_50 = ZModpRing.exp z_50_0 (2^50) in - let z_100_0 = z_100_50 * z_50_0 in - let z_200_100 = ZModpRing.exp z_100_0 (2^100) in - let z_200_0 = z_200_100 * z_100_0 in - let z_250_50 = ZModpRing.exp z_200_0 (2^50) in - let z_250_0 = z_250_50 * z_50_0 in - let z_255_5 = ZModpRing.exp z_250_0 (2^5) in - let z_255_21 = z_255_5 * z11 in - z_255_21 axiomatized by invert0E. - -lemma eq_invert0 (z1 : zp) : - invert0 z1 = invert_p z1. -proof. - rewrite invert0E invert_pE /invert_p_p1 /invert_p_p2 /invert_p_p3 //. -qed. - -lemma eq_invert0p (z1 : zp) : - invert0 z1 = ZModpRing.exp z1 (p-2). -proof. - rewrite eq_invert0 eq_invert_p //. -qed. - -op sqr(z : zp) : zp = - ZModpRing.exp z 2. - -op it_sqr(e : int, z : zp) : zp = - ZModpRing.exp z (2^e). - -op it_sqr1(e : int, z : zp) : zp = - foldl (fun (z' : zp) _ => exp z' 2) z (iota_ 0 e). - -lemma eq_it_sqr1 (e : int, z : zp) : - 0 <= e => - it_sqr1 e z = it_sqr e z. -proof. - move : e. - rewrite /it_sqr1 /it_sqr. elim. by simplify; smt(expr1). - move => i ige0 hin. - rewrite iotaSr // -cats1 foldl_cat hin /= expE /=. smt(gt0_pow2). - congr. clear hin. - rewrite powS // mulzC //. -qed. - -op invert1(z1 : zp) : zp = - let t0 = sqr z1 in (* z1^2 *) - let t1 = sqr t0 in (* z1^4 *) - let t1 = sqr t1 in (* z1^8 *) - let t1 = z1 * t1 in (* z1^9 *) - let t0 = t0 * t1 in (* z1^11 *) - let t2 = sqr t0 in (* z1^22 *) - let t1 = t1 * t2 in (* z_5_0 *) - let t2 = sqr t1 in (* z_10_5 *) - let t2 = it_sqr 4 t2 in - let t1 = t1 * t2 in (* z_10_0 *) - let t2 = it_sqr 10 t1 in (* z_20_10 *) - let t2 = t1 * t2 in (* z_20_0 *) - let t3 = it_sqr 20 t2 in (* z_40_20 *) - let t2 = t2 * t3 in (* z_40_0 *) - let t2 = it_sqr 10 t2 in (* z_50_10 *) - let t1 = t1 * t2 in (* z_50_0 *) - let t2 = it_sqr 50 t1 in (* z_100_50 *) - let t2 = t1 * t2 in (* z_100_0 *) - let t3 = it_sqr 100 t2 in (* z_200_100 *) - let t2 = t2 * t3 in (* z_200_0 *) - let t2 = it_sqr 50 t2 in (* z_250_50 *) - let t1 = t1 * t2 in (* z_250_0 *) - let t1 = it_sqr 4 t1 in (* z_255_5 *) - let t1 = sqr t1 in - let t1 = t0 * t1 in - t1 axiomatized by invert1E. - -lemma eq_invert1 (z1: zp) : - invert1 z1 = invert0 z1. -proof. - rewrite invert1E invert0E /= /it_sqr /sqr /=. - smt(exprS exprD expE). -qed. - -(** split invert2 in 3 parts : jump from it_sqr to it_sqr1 **) - -op invert2(z1 : zp) : zp = - let t0 = sqr z1 in (* z1^2 *) - let t1 = sqr t0 in (* z1^4 *) - let t1 = sqr t1 in (* z1^8 *) - let t1 = z1 * t1 in (* z1^9 *) - let t0 = t0 * t1 in (* z1^11 *) - let t2 = sqr t0 in (* z1^22 *) - let t1 = t1 * t2 in (* z_5_0 *) - let t2 = sqr t1 in (* z_10_5 *) - let t2 = it_sqr1 4 t2 in - let t1 = t1 * t2 in (* z_10_0 *) - let t2 = it_sqr1 10 t1 in (* z_20_10 *) - let t2 = t1 * t2 in (* z_20_0 *) - let t3 = it_sqr1 20 t2 in (* z_40_20 *) - let t2 = t2 * t3 in (* z_40_0 *) - let t2 = it_sqr1 10 t2 in (* z_50_10 *) - let t1 = t1 * t2 in (* z_50_0 *) - let t2 = it_sqr1 50 t1 in (* z_100_50 *) - let t2 = t1 * t2 in (* z_100_0 *) - let t3 = it_sqr1 100 t2 in (* z_200_100 *) - let t2 = t2 * t3 in (* z_200_0 *) - let t2 = it_sqr1 50 t2 in (* z_250_50 *) - let t1 = t1 * t2 in (* z_250_0 *) - let t1 = it_sqr1 4 t1 in (* z_255_5 *) - let t1 = sqr t1 in - let t1 = t0 * t1 in - t1 axiomatized by invert2E. - -lemma eq_invert2 (z1: zp) : - invert2 z1 = invert1 z1. -proof. - rewrite invert2E invert1E. smt(eq_it_sqr1). -qed. - -lemma eq_invert210p (z1: zp) : - invert2 z1 = ZModpRing.exp z1 (p-2). -proof. -rewrite eq_invert2 eq_invert1 eq_invert0p //. -qed. - -(* now we define an alternative version of encodePoint *) -op encodePoint1 (q: zp * zp) : W256.t = - let qi = invert2 q.`2 in - let q = q.`1 * qi in - W256.of_int (asint q) axiomatized by encodePoint1E. - -lemma eq_encodePoint1 (q: zp * zp) : - encodePoint1 q = encodePoint q. -proof. - rewrite encodePoint1E encodePointE. simplify. congr. - rewrite eq_invert210p //. -qed. - -(** step 6: scalarmult with updated montgomery_ladder3 **) -op scalarmult1 (k:W256.t) (u:W256.t) : W256.t = - let k = decodeScalar25519 k in - let u = decodeUCoordinate u in - let r = montgomery_ladder3 u k in - encodePoint1 (r.`1) axiomatized by scalarmult1E. - -hint simplify scalarmult1E. - -(* lemma scalarmult = scalarmult1 *) -lemma eq_scalarmult1 (k:W256.t) (u:W256.t) : - scalarmult1 k u = scalarmult k u. -proof. - simplify. - pose du := decodeUCoordinate u. - pose dk := decodeScalar25519 k. - rewrite eq_encodePoint1. - congr. - have kb0f : (dk).[0] = false. (* k bit 0 false *) - rewrite /dk /decodeScalar25519 //. - have ml123 : montgomery_ladder du dk = select_tuple_12 (montgomery_ladder3 du dk). - move : kb0f. apply eq_montgomery_ladder123. - rewrite ml123 /select_tuple_12 //. -qed. diff --git a/proof/crypto_scalarmult/curve25519/Curve25519_Hop2.ec b/proof/crypto_scalarmult/curve25519/Curve25519_Hop2.ec deleted file mode 100644 index 8a47353..0000000 --- a/proof/crypto_scalarmult/curve25519/Curve25519_Hop2.ec +++ /dev/null @@ -1,488 +0,0 @@ -require import Bool List Int IntExtra IntDiv CoreMap Real Zp. -from Jasmin require import JModel. -require import Curve25519_Spec. -require import Curve25519_Hop1. -import Zp ZModpRing Curve25519_Spec Curve25519_Hop1. - -module MHop2 = { - - (* h = f + g *) - proc add(f g : zp) : zp = - { - var h: zp; - h <- f + g; - return h; - } - - (* h = f - g *) - proc sub(f g : zp) : zp = - { - var h: zp; - h <- f - g; - return h; - } - - (* h = f * a24 *) - proc mul_a24 (f : zp, a24 : int) : zp = - { - var h: zp; - h <- f * (inzp a24); - return h; - } - - (* h = f * g *) - proc mul (f g : zp) : zp = - { - var h : zp; - h <- f * g; - return h; - } - - (* h = f * f *) - proc sqr (f : zp) : zp = - { - var h : zp; - (*h <- f * f;*) - h <- exp f 2; - return h; - } - - (* iterated sqr *) - proc it_sqr (i : int, f : zp) : zp = - { - var h : zp; - h <- witness; - - h <@ sqr(f); - i <- i - 1; - f <@ sqr(h); - i <- i - 1; - - while (0 < i) { - h <@ sqr(f); - i <- i - 1; - f <@ sqr(h); - i <- i - 1; - } - - return f; - } - - (* f ** 2**255-19-2 *) - proc invert (z1' : zp) : zp = - { - var t0 : zp; - var t1 : zp; - var t2 : zp; - var t3 : zp; - - t0 <- witness; - t1 <- witness; - t2 <- witness; - t3 <- witness; - - t0 <@ sqr (z1'); - t1 <@ sqr (t0); - t1 <@ sqr (t1); - t1 <@ mul (z1', t1); - t0 <@ mul (t0, t1); - t2 <@ sqr (t0); - t1 <@ mul (t1, t2); - t2 <@ sqr (t1); - t2 <@ it_sqr (4, t2); - t1 <@ mul (t1, t2); - t2 <@ it_sqr (10, t1); - t2 <@ mul (t1, t2); - t3 <@ it_sqr (20, t2); - t2 <@ mul (t2, t3); - t2 <@ it_sqr (10, t2); - t1 <@ mul (t1, t2); - t2 <@ it_sqr (50, t1); - t2 <@ mul (t1, t2); - t3 <@ it_sqr (100, t2); - t2 <@ mul (t2, t3); - t2 <@ it_sqr (50, t2); - t1 <@ mul (t1, t2); - t1 <@ it_sqr (4, t1); - t1 <@ sqr (t1); - t1 <@ mul (t0, t1); - return t1; - } - - proc cswap (x2 z2 x3 z3 : zp, toswap : bool) : zp * zp * zp * zp = - { - if(toswap) - { (x2,z2,x3,z3) = (x3,z3,x2,z2); } - else - { (x2,z2,x3,z3) = (x2,z2,x3,z3); } - return (x2,z2,x3,z3); - } - - proc ith_bit (k' : W256.t, ctr : int) : bool = - { - return k'.[ctr]; - } - - proc decode_scalar_25519 (k' : W256.t) : W256.t = - { - k'.[0] <- false; - k'.[1] <- false; - k'.[2] <- false; - k'.[255] <- false; - k'.[254] <- true; - return k'; - } - - proc decode_u_coordinate (u' : W256.t) : zp = - { - var u'' : zp; - (* last bit of u is cleared but that can be introduced at the same time as arrays *) - u'' <- inzp ( to_uint u' ); - return u''; - } - - proc init_points (init : zp) : zp * zp * zp * zp = - { - var x2 : zp; - var z2 : zp; - var x3 : zp; - var z3 : zp; - - x2 <- witness; - x3 <- witness; - z2 <- witness; - z3 <- witness; - - x2 <- Zp.one; - z2 <- Zp.zero; - x3 <- init; - z3 <- Zp.one; - - return (x2, z2, x3, z3); - } - - proc add_and_double (init x2 z2 x3 z3 : zp) : zp * zp * zp * zp = - { - var t0 : zp; - var t1 : zp; - var t2 : zp; - t0 <- witness; - t1 <- witness; - t2 <- witness; - t0 <@ sub (x2, z2); - x2 <@ add (x2, z2); - t1 <@ sub (x3, z3); - z2 <@ add (x3, z3); - z3 <@ mul (x2, t1); - z2 <@ mul (z2, t0); - t2 <@ sqr (x2); - t1 <@ sqr (t0); - x3 <@ add (z3, z2); - z2 <@ sub (z3, z2); - x2 <@ mul (t2, t1); - t0 <@ sub (t2, t1); - z2 <@ sqr (z2); - z3 <@ mul_a24 (t0, 121665); - x3 <@ sqr (x3); - t2 <@ add (t2, z3); - z3 <@ mul (init, z2); - z2 <@ mul (t0, t2); - return (x2, z2, x3, z3); - } - - proc montgomery_ladder_step (k' : W256.t, - init' x2 z2 x3 z3 : zp, - swapped : bool, - ctr' : int) : zp * zp * zp * zp * bool = - { - var bit : bool; - var toswap : bool; - bit <@ ith_bit (k', ctr'); - toswap <- swapped; - toswap <- (toswap ^^ bit); - (x2, z2, x3, z3) <@ cswap (x2, z2, x3, z3, toswap); - swapped <- bit; - (x2, z2, x3, z3) <@ add_and_double (init', x2, z2, x3, z3); - return (x2, z2, x3, z3, swapped); - } - - proc montgomery_ladder (init' : zp, k' : W256.t) : zp * zp * zp * zp = - { - var x2 : zp; - var z2 : zp; - var x3 : zp; - var z3 : zp; - var ctr : int; - var swapped : bool; - x2 <- witness; - x3 <- witness; - z2 <- witness; - z3 <- witness; - (x2, z2, x3, z3) <@ init_points (init'); - ctr <- 254; - swapped <- false; - while (0 <= ctr) - { (x2, z2, x3, z3, swapped) <@ - montgomery_ladder_step (k', init', x2, z2, x3, z3, swapped, ctr); - ctr <- ctr - 1; - } - return (x2, z2, x3, z3); - } - - proc encode_point (x2 z2 : zp) : W256.t = - { - var r : zp; - r <- witness; - z2 <@ invert (z2); - r <@ mul (x2, z2); - (* no need to 'freeze' or 'tobytes' r in this spec *) - return (W256.of_int (asint r)); - } - - proc scalarmult (k' u' : W256.t) : W256.t = - { - var u'' : zp; - var x2 : zp; - var z2 : zp; - var x3 : zp; - var z3 : zp; - var r : W256.t; - - r <- witness; - x2 <- witness; - x3 <- witness; - z2 <- witness; - z3 <- witness; - - k' <@ decode_scalar_25519 (k'); - u'' <@ decode_u_coordinate (u'); - (x2, z2, x3, z3) <@ montgomery_ladder (u'', k'); - r <@ encode_point (x2, z2); - return r; - } -}. - -(** step 1 : decode_scalar_25519 **) -lemma eq_h2_decode_scalar_25519 k: - hoare [ MHop2.decode_scalar_25519 : k' = k - ==> res = decodeScalar25519 k]. -proof. - proc; wp; rewrite /decodeScalar25519 /=; skip. - move => _ hk; rewrite hk //. -qed. - -(** step 2 : decode_u_coordinate **) -lemma eq_h2_decode_u_coordinate u: - hoare [ MHop2.decode_u_coordinate : u' = u - ==> res = decodeUCoordinate u]. -proof. - proc; wp; rewrite /decode_u_coordinate /=; skip. - move => _ hu; rewrite hu //. -qed. - -(** step 3 : ith_bit **) -lemma eq_h2_ith_bit (k : W256.t) i: - hoare [MHop2.ith_bit : k' = k /\ ctr = i ==> res = ith_bit k i]. -proof. - proc. rewrite /ith_bit. skip => />. -qed. - -(** step 4 : cswap **) -lemma eq_h2_cswap (t : (zp * zp) * (zp * zp) ) b: - hoare [MHop2.cswap : x2 = (t.`1).`1 /\ - z2 = (t.`1).`2 /\ - x3 = (t.`2).`1 /\ - z3 = (t.`2).`2 /\ - toswap = b - ==> ((res.`1, res.`2),(res.`3, res.`4)) = cswap t b]. -proof. - by proc; wp; skip; simplify => /#. -qed. - -(** step 5 : add_and_double **) -lemma eq_h2_add_and_double (qx : zp) (nqs : (zp * zp) * (zp * zp)): - hoare [MHop2.add_and_double : init = qx /\ - x2 = nqs.`1.`1 /\ - z2 = nqs.`1.`2 /\ - x3 = nqs.`2.`1 /\ - z3 = nqs.`2.`2 - ==> ((res.`1, res.`2),(res.`3, res.`4)) = add_and_double1 qx nqs]. -proof. - proc; inline *; wp; skip. - rewrite /add_and_double1 /=. smt(expr2). -qed. - -(** step 6 : montgomery_ladder_step **) -lemma eq_h2_montgomery_ladder_step (k : W256.t) - (init : zp) - (nqs : (zp * zp) * (zp * zp) * bool) - (ctr : int) : - hoare [MHop2.montgomery_ladder_step : k' = k /\ - init' = init /\ - x2 = nqs.`1.`1 /\ - z2 = nqs.`1.`2 /\ - x3 = nqs.`2.`1 /\ - z3 = nqs.`2.`2 /\ - swapped = nqs.`3 /\ - ctr' = ctr - ==> ((res.`1, res.`2),(res.`3, res.`4),res.`5) = - montgomery_ladder3_step k init nqs ctr]. -proof. - proc => /=. - ecall (eq_h2_add_and_double init (cswap (select_tuple_12 nqs) (nqs.`3 ^^ (ith_bit k ctr)))). - wp. - ecall (eq_h2_cswap (select_tuple_12 nqs) (nqs.`3 ^^ (ith_bit k ctr))). - wp. - ecall (eq_h2_ith_bit k ctr). auto. - rewrite /montgomery_ladder3_step => /#. -qed. - -(** step 7 : montgomery_ladder **) -lemma unroll_ml3s k init nqs (ctr : int) : (** unroll montgomery ladder 3 step **) - 0 <= ctr => - foldl (montgomery_ladder3_step k init) - nqs - (rev (iota_ 0 (ctr+1))) - = - foldl (montgomery_ladder3_step k init) - (montgomery_ladder3_step k init nqs ctr) - (rev (iota_ 0 (ctr))). -proof. -move => ctrge0. -rewrite 2!foldl_rev iotaSr //= -cats1 foldr_cat => /#. -qed. - -lemma eq_h2_montgomery_ladder (init : zp) - (k : W256.t) : - hoare [MHop2.montgomery_ladder : init' = init /\ - k.[0] = false /\ - k' = k - ==> ((res.`1, res.`2),(res.`3,res.`4)) = - select_tuple_12 (montgomery_ladder3 init k)]. -proof. -proc. - inline MHop2.init_points. sp. simplify. - rewrite /montgomery_ladder3. - - while (foldl (montgomery_ladder3_step k' init') - ((Zp.one, Zp.zero), (init, Zp.one), false) - (rev (iota_ 0 255)) - = - foldl (montgomery_ladder3_step k' init') - ((x2,z2), (x3,z3), swapped) - (rev (iota_ 0 (ctr+1))) - ). - wp. - ecall (eq_h2_montgomery_ladder_step k' init' ((x2,z2),(x3,z3),swapped) ctr). - skip. simplify. - move => &hr [?] ? ? ?. smt(unroll_ml3s). - skip. move => &hr [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] ?. subst. - split; first by done. - move => ? ? ? ? ? ? ?. - have _ : rev (iota_ 0 (ctr0 + 1)) = []; smt(iota0). -qed. - -(** step 8 : iterated square **) -lemma it_sqr1_m2_exp4 (e : int) (z : zp) : - 0 <= e - 2 => it_sqr1 e z = it_sqr1 (e-2) (exp (exp z 2) 2). -proof. - rewrite expE // /= => ?. - rewrite !eq_it_sqr1. smt(). trivial. - rewrite /it_sqr (*expE*). - (* directly rewriting expE takes too long *) - have ee : exp (exp z 4) (2 ^ (e - 2)) = exp z (2^2 * 2 ^ (e - 2)). smt(expE). - rewrite ee. congr. - rewrite pow_add //. -qed. - -lemma it_sqr1_0 (e : int) (z : zp) : - 0 = e => it_sqr1 e z = z. -proof. - move => ?. - rewrite eq_it_sqr1. smt(). - rewrite /it_sqr. subst. simplify. - rewrite expr1 //. -qed. - -lemma eq_h2_it_sqr (e : int) (z : zp) : - hoare[MHop2.it_sqr : - i = e && 2 <= i && i %% 2 = 0 && f = z - ==> - res = it_sqr1 e z]. -proof. - proc. inline MHop2.sqr. simplify. - while ( 0 <= i && i %% 2 = 0 && it_sqr1 e z = it_sqr1 i f). - wp. skip. - - (** alternative version with progress **) - (* - progress. smt(). smt(). smt(it_sqr1_m2_exp4). - wp. skip. progress. smt(). smt(). smt(it_sqr1_m2_exp4). - smt(it_sqr1_0). - *) - - move => &hr [[?]] [?] hin ?. simplify. - split; first by smt(). move => ?. - split; first by smt(). move => ?. - rewrite hin. move : H2. apply it_sqr1_m2_exp4. - wp. skip. - move => &hr [?] [?] [?] ?. simplify. - split. - split; first by smt(). move => ?. - split; first by smt(). move => ?. - subst. move : H3. apply it_sqr1_m2_exp4. - move => ? ? ? [?] [?] ->. subst. - have ieq0 : i0 = 0. smt(). - rewrite it_sqr1_0 /#. -qed. - -(** step 9 : invert **) -lemma eq_h2_invert (z : zp) : - hoare[MHop2.invert : z1' = z ==> res = invert2 z]. -proof. - proc. - inline MHop2.sqr MHop2.mul. wp. - ecall (eq_h2_it_sqr 4 t1). wp. - ecall (eq_h2_it_sqr 50 t2). wp. - ecall (eq_h2_it_sqr 100 t2). wp. - ecall (eq_h2_it_sqr 50 t1). wp. - ecall (eq_h2_it_sqr 10 t2). wp. - ecall (eq_h2_it_sqr 20 t2). wp. - ecall (eq_h2_it_sqr 10 t1). wp. - ecall (eq_h2_it_sqr 4 t2). wp. - skip. simplify. - move => &hr ?. - move=> ? ->. move=> ? ->. - move=> ? ->. move=> ? ->. - move=> ? ->. move=> ? ->. - move=> ? ->. move=> ? ->. - rewrite invert2E /sqr /= H /#. -qed. - -(** step 10 : encode point **) -lemma eq_h2_encode_point (q : zp * zp) : - hoare[MHop2.encode_point : x2 = q.`1 /\ z2 = q.`2 ==> res = encodePoint1 q]. -proof. - proc. inline MHop2.mul. wp. sp. - ecall (eq_h2_invert z2). - skip. simplify. - move => &hr [?] [?] ? ?. move=> ->. - rewrite encodePoint1E /= H0 H1 //. -qed. - -(** step 11 : scalarmult **) -lemma eq_h2_scalarmult (k u : W256.t) : - hoare[MHop2.scalarmult : k' = k /\ u' = u ==> res = scalarmult k u]. -proof. - rewrite -eq_scalarmult1. - proc. sp. - ecall (eq_h2_encode_point (x2,z2)). simplify. - ecall (eq_h2_montgomery_ladder u'' k'). simplify. - ecall (eq_h2_decode_u_coordinate u'). simplify. - ecall (eq_h2_decode_scalar_25519 k'). simplify. - skip. - move => &hr [?] [?] [?] [?] [?] [?] ?. - move=> ? -> ? ->. split. - by rewrite /decodeScalar25519 /=. - move=> ? ? ? ? -> => /#. -qed. diff --git a/proof/crypto_scalarmult/curve25519/Curve25519_Hop3.ec b/proof/crypto_scalarmult/curve25519/Curve25519_Hop3.ec deleted file mode 100644 index c728079..0000000 --- a/proof/crypto_scalarmult/curve25519/Curve25519_Hop3.ec +++ /dev/null @@ -1,153 +0,0 @@ -require import Bool List Int IntExtra IntDiv CoreMap Real Zp. -from Jasmin require import JModel. -require import Curve25519_Spec. -require import Curve25519_Hop1. -require import Curve25519_Hop2. - -import Zp ZModpRing Curve25519_Spec Curve25519_Hop1 Curve25519_Hop2. - -(** step 1 : decode_scalar_25519 **) -lemma ill_decode_scalar_25519 : islossless MHop2.decode_scalar_25519. -proof. islossless. qed. - -lemma eq_h3_decode_scalar_25519 k: - phoare [ MHop2.decode_scalar_25519 : k' = k - ==> res = decodeScalar25519 k] = 1%r. -proof. by conseq ill_decode_scalar_25519 (eq_h2_decode_scalar_25519 k). qed. - -(** step 2 : decode_u_coordinate **) -lemma ill_decode_u_coordinate : islossless MHop2.decode_u_coordinate. -proof. islossless. qed. - -lemma eq_h3_decode_u_coordinate u: - phoare [ MHop2.decode_u_coordinate : u' = u - ==> res = decodeUCoordinate u] = 1%r. -proof. by conseq ill_decode_u_coordinate (eq_h2_decode_u_coordinate u). qed. - -(** step 3 : ith_bit **) -lemma ill_ith_bit : islossless MHop2.ith_bit. -proof. islossless. qed. - -lemma eq_h3_ith_bit (k : W256.t) i: - phoare [MHop2.ith_bit : k' = k /\ ctr = i ==> res = ith_bit k i] = 1%r. -proof. by conseq ill_ith_bit (eq_h2_ith_bit k i). qed. - -(** step 4 : cswap **) -lemma ill_cswap : islossless MHop2.cswap. -proof. islossless. qed. - -lemma eq_h3_cswap (t : (zp * zp) * (zp * zp) ) b: - phoare [MHop2.cswap : x2 = (t.`1).`1 /\ - z2 = (t.`1).`2 /\ - x3 = (t.`2).`1 /\ - z3 = (t.`2).`2 /\ - toswap = b - ==> ((res.`1, res.`2),(res.`3, res.`4)) = cswap t b] = 1%r. -proof. by conseq ill_cswap (eq_h2_cswap t b). qed. - -(** step 5 : add_and_double **) -lemma ill_add_and_double : islossless MHop2.add_and_double. -proof. islossless. qed. - -lemma eq_h3_add_and_double (qx : zp) (nqs : (zp * zp) * (zp * zp)): - phoare [MHop2.add_and_double : init = qx /\ - x2 = nqs.`1.`1 /\ - z2 = nqs.`1.`2 /\ - x3 = nqs.`2.`1 /\ - z3 = nqs.`2.`2 - ==> ((res.`1, res.`2),(res.`3, res.`4)) = add_and_double1 qx nqs] = 1%r. -proof. by conseq ill_add_and_double (eq_h2_add_and_double qx nqs). qed. - -(** step 6 : montgomery_ladder_step **) -lemma ill_montgomery_ladder_step : islossless MHop2.montgomery_ladder_step. -proof. islossless. qed. - -lemma eq_h3_montgomery_ladder_step (k : W256.t) - (init : zp) - (nqs : (zp * zp) * (zp * zp) * bool) - (ctr : int) : - phoare [MHop2.montgomery_ladder_step : k' = k /\ - init' = init /\ - x2 = nqs.`1.`1 /\ - z2 = nqs.`1.`2 /\ - x3 = nqs.`2.`1 /\ - z3 = nqs.`2.`2 /\ - swapped = nqs.`3 /\ - ctr' = ctr - ==> ((res.`1, res.`2),(res.`3, res.`4),res.`5) = - montgomery_ladder3_step k init nqs ctr] = 1%r. -proof. by conseq ill_montgomery_ladder_step (eq_h2_montgomery_ladder_step k init nqs ctr). qed. - -(** step 7 : montgomery_ladder **) -lemma ill_montgomery_ladder : islossless MHop2.montgomery_ladder. -proof. - islossless. while true (ctr+1). move => ?. wp. simplify. - call(_:true ==> true). islossless. skip; smt(). - skip; smt(). -qed. - -lemma eq_h3_montgomery_ladder (init : zp) - (k : W256.t) : - phoare [MHop2.montgomery_ladder : init' = init /\ - k.[0] = false /\ - k' = k - ==> ((res.`1, res.`2),(res.`3,res.`4)) = - select_tuple_12 (montgomery_ladder3 init k)] = 1%r. -proof. by conseq ill_montgomery_ladder (eq_h2_montgomery_ladder init k). qed. - -(** step 8 : iterated square **) -lemma ill_it_sqr : islossless MHop2.it_sqr. -proof. - islossless. while true i. move => ?. wp. - inline MHop2.sqr. wp. skip. smt(). - skip. smt(). -qed. - -lemma eq_h3_it_sqr (e : int) (z : zp) : - phoare[MHop2.it_sqr : - i = e && 2 <= i && i %% 2 = 0 && f = z - ==> - res = it_sqr1 e z] = 1%r. -proof. by conseq ill_it_sqr (eq_h2_it_sqr e z). qed. - -(** step 9 : invert **) -lemma ill_invert : islossless MHop2.invert. -proof. - proc. - inline MHop2.sqr MHop2.mul. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - wp; sp. call(_: true ==> true). apply ill_it_sqr. - skip. trivial. -qed. - -lemma eq_h3_invert (z : zp) : - phoare[MHop2.invert : z1' = z ==> res = invert2 z] = 1%r. -proof. by conseq ill_invert (eq_h2_invert z). qed. - -(** step 10 : encode point **) -lemma ill_encode_point : islossless MHop2.encode_point. -proof. - proc. inline MHop2.mul. wp; sp. call(_: true ==> true). apply ill_invert. - skip. trivial. -qed. - -lemma eq_h3_encode_point (q : zp * zp) : - phoare[MHop2.encode_point : x2 = q.`1 /\ z2 = q.`2 ==> res = encodePoint1 q] = 1%r. -proof. by conseq ill_encode_point (eq_h2_encode_point q). qed. - -(** step 11 : scalarmult **) -lemma ill_scalarmult : islossless MHop2.scalarmult. -proof. - proc. sp. - call(_: true ==> true). apply ill_encode_point. - call(_: true ==> true). apply ill_montgomery_ladder. - call(_: true ==> true). apply ill_decode_u_coordinate. - call(_: true ==> true). apply ill_decode_scalar_25519. - skip. trivial. -qed. diff --git a/proof/crypto_scalarmult/curve25519/Curve25519_Hop4.ec b/proof/crypto_scalarmult/curve25519/Curve25519_Hop4.ec deleted file mode 100644 index 7bfb272..0000000 --- a/proof/crypto_scalarmult/curve25519/Curve25519_Hop4.ec +++ /dev/null @@ -1,365 +0,0 @@ -require import AllCore Bool List Int IntExtra IntDiv CoreMap Real Zp. -from Jasmin require import JModel. -require import Curve25519_Spec. -require import Curve25519_Hop1. -require import Curve25519_Hop2. -require import Curve25519_Hop3. -require import Curve25519_smulx. -import Zp ZModpRing. -import Curve25519_Spec Curve25519_Hop1 Curve25519_Hop2 Curve25519_Hop3. -import Curve25519_smulx. - -require import Array4 Array8. -require import W64limbs. - -(** representation : move to another file/use rep3/5 **) -type Rep4 = W64.t Array4.t. -op valRep4 (x : Rep4) : int = val_limbs64 (to_list x). -op inzpRep4 (x : Rep4) : zp = inzp (valRep4 x) axiomatized by inzpRep4E. -abbrev zpcgrRep4 (x : Rep4) (z : int) : bool = zpcgr (valRep4 x) z. -(** ************************************* **) - -(** step 0 : add sub mul sqr **) -equiv eq_h4_add : MHop2.add ~ M._fe64_add_rrs: - f{1} = inzpRep4 f{2} /\ - g{1} = inzpRep4 g{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_sub : MHop2.sub ~ M._fe64_sub_rrs: - f{1} = inzpRep4 f{2} /\ - g{1} = inzpRep4 gs{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_mul_a24 : MHop2.mul_a24 ~ M._fe64_mul_a24: - f{1} = inzpRep4 fs{2} /\ - a24{1} = to_uint a24{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_mul : MHop2.mul ~ M._fe64_mul_rsr: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 g{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_sqr : MHop2.sqr ~ M._fe64_sqr_rr: - f{1} = inzpRep4 f{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -(** step 0.5 : transitivity stuff **) -equiv eq_h4_add_ssr : MHop2.add ~ M._fe64_add_ssr: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 g{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_add_sss : MHop2.add ~ M._fe64_add_sss: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 gs{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_sub_ssr : MHop2.sub ~ M._fe64_sub_ssr: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 g{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_sub_sss : MHop2.sub ~ M._fe64_sub_sss: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 gs{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_mul_a24_ss : MHop2.mul_a24 ~ M._fe64_mul_a24_ss: - f{1} = inzpRep4 fs{2} /\ - a24{1} = to_uint a24{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_mul_rss : MHop2.mul ~ M._fe64_mul_rss: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 gs{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_mul_ssr : MHop2.mul ~ M._fe64_mul_ssr: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 g{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_mul_sss : MHop2.mul ~ M._fe64_mul_sss: - f{1} = inzpRep4 fs{2} /\ - g{1} = inzpRep4 gs{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_sqr_rs : MHop2.sqr ~ M._fe64_sqr_rs: - f{1} = inzpRep4 fs{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -equiv eq_h4_sqr_ss : MHop2.sqr ~ M._fe64_sqr_ss: - f{1} = inzpRep4 fs{2} - ==> - res{1} = inzpRep4 res{2}. -proof. -proc. -admit. -qed. - -(** step 1 : decode_scalar_25519 **) -equiv eq_h4_decode_scalar_25519 : - MHop2.decode_scalar_25519 ~ M.decode_scalar_25519: - true ==> true. -proof. -admit. -qed. - -(** step 2 : decode_u_coordinate **) -equiv eq_h4_decode_u_coordinate : - MHop2.decode_u_coordinate ~ M.decode_u_coordinate: - true ==> true. -proof. -admit. -qed. - -(** step 3 : ith_bit **) -equiv eq_h4_ith_bit : - MHop2.ith_bit ~ M.ith_bit: - true ==> true. -proof. -admit. -qed. - -(** step 4 : cswap **) -equiv eq_h4_cswap : - MHop2.cswap ~ M._fe64_cswap: - x2{1} = inzpRep4 x2{2} /\ - z2{1} = inzpRep4 z2r{2} /\ - x3{1} = inzpRep4 x3{2} /\ - z3{1} = inzpRep4 z3{2} /\ - b2i toswap{1} = to_uint toswap{2} - ==> - res{1}.`1 = inzpRep4 res{2}.`1 /\ - res{1}.`2 = inzpRep4 res{2}.`2 /\ - res{1}.`3 = inzpRep4 res{2}.`3 /\ - res{1}.`4 = inzpRep4 res{2}.`4. -proof. -proc. -do 4! unroll for{2} ^while. -case: (toswap{1}). - rcondt {1} 1 => //. wp => /=; skip. - move => &1 &2 [#] 4!->> ??. - have mask_set : (set0_64.`6 - toswap{2}) = W64.onew. rewrite /set0_64 /=. smt(@W64). - rewrite !mask_set /=. - have lxor1 : forall (x1 x2:W64.t), x1 `^` (x2 `^` x1) = x2. - move=> *. rewrite xorwC -xorwA xorwK xorw0 //. - have lxor2 : forall (x1 x2:W64.t), x1 `^` (x1 `^` x2) = x2. - move=> *. rewrite xorwA xorwK xor0w //. - rewrite !lxor1 !lxor2. - split. congr. apply Array4.ext_eq. smt(@Array4). - split. congr. apply Array4.ext_eq. smt(@Array4). - split. congr. apply Array4.ext_eq. smt(@Array4). - congr. apply Array4.ext_eq. smt(@Array4). - rcondf {1} 1 => //. wp => /=; skip. - move => &1 &2 [#] 4!->> ??. - have mask_not_set : (set0_64.`6 - toswap{2}) = W64.zero. smt(@W64). - rewrite !mask_not_set !andw0 !xorw0. - smt(@Array4). -qed. - -(** step 5 : add_and_double **) -equiv eq_h4_add_and_double : - MHop2.add_and_double ~ M.add_and_double: - init{1} = inzpRep4 init{2} /\ - x2{1} = inzpRep4 x2{2} /\ - z2{1} = inzpRep4 z2r{2} /\ - x3{1} = inzpRep4 x3{2} /\ - z3{1} = inzpRep4 z3{2} - ==> - res{1}.`1 = inzpRep4 res{2}.`1 /\ - res{1}.`2 = inzpRep4 res{2}.`2 /\ - res{1}.`3 = inzpRep4 res{2}.`3 /\ - res{1}.`4 = inzpRep4 res{2}.`4. -proof. -proc => /=. - call eq_h4_mul_rss. - call eq_h4_mul_sss. - call eq_h4_add_sss. - call eq_h4_sqr_ss. - call eq_h4_mul_a24_ss. - call eq_h4_sqr_ss. - call eq_h4_sub_ssr. - call eq_h4_mul_ssr. - call eq_h4_sub_sss. - call eq_h4_add_sss. - call eq_h4_sqr_rs. - call eq_h4_sqr_ss. - call eq_h4_mul_sss. - call eq_h4_mul_sss. - call eq_h4_add_sss. - call eq_h4_sub_sss. - call eq_h4_add_ssr. - call eq_h4_sub_ssr. - wp. done. -qed. - -(** step 6 : montgomery_ladder_step **) -equiv eq_h4_montgomery_ladder_step : - MHop2.montgomery_ladder_step ~ M.montgomery_ladder_step: - true ==> true. -proof. -admit. -qed. - -(** step 7 : montgomery_ladder **) -equiv eq_h4_montgomery_ladder : - MHop2.montgomery_ladder ~ M.montgomery_ladder : - true ==> true. -proof. -admit. -qed. - -(** step 8 : iterated square **) -equiv eq_h4_it_sqr : - MHop2.it_sqr ~ M._fe64_it_sqr: - f{1} = inzpRep4 f{2} /\ - i{1} = to_uint i{2} /\ - i{1} <= W64.modulus /\ - 2 <= i{1} /\ - i{1} %% 2 = 0 - ==> - res{1} = inzpRep4 res{2}.`2. -proof. -proc. - while (f{1} = inzpRep4 f{2} /\ - i{1} = to_uint i{2} /\ - i{1} <= W64.modulus /\ - 0 <= i{1} /\ - i{1} %% 2 = 0 /\ - zf{2} = (i{2} = W64.zero)). - swap 2 3 3. wp. conseq(_: _ ==> f{1} = inzpRep4 f{2}). - move=> &1 &2 [#] ????? ->> ?? ??? /=. - rewrite /DEC_64 /rflags_of_aluop_nocf64 /ZF_of_w64 => /=. - progress. - smt(@W64). move : H1; smt(). smt(). smt(). smt(@W64). smt(@W64). - by do 2! call eq_h4_sqr; skip; done. - swap 3 4 4. wp. conseq(_: _ ==> f{1} = inzpRep4 f{2}). - move=> &1 &2 [#] /= ->> ->> ??? ?? ->> /=. - rewrite /DEC_64 /rflags_of_aluop_nocf64 /ZF_of_w64 => /=. - progress. - smt(@W64). move : H1; smt(). smt(). smt(). smt(@W64). smt(@W64). - by do 2! call eq_h4_sqr; wp; skip; done. -qed. - -(** step 9 : invert **) -equiv eq_h4_invert : - MHop2.invert ~ M._fe64_invert : - z1'{1} = inzpRep4 f{2} - ==> res{1} = inzpRep4 res{2}. -proof. -proc. - call eq_h4_mul. - call eq_h4_sqr. - call eq_h4_it_sqr. wp. - call eq_h4_mul. - call eq_h4_it_sqr. wp. - call eq_h4_mul. - call eq_h4_it_sqr. wp. - call eq_h4_mul. - call eq_h4_it_sqr. wp. - call eq_h4_mul. - call eq_h4_it_sqr. wp. - call eq_h4_mul. - call eq_h4_it_sqr. wp. - call eq_h4_mul. - call eq_h4_it_sqr. wp. - call eq_h4_mul. wp. - call eq_h4_it_sqr. wp. - call eq_h4_sqr. wp. - call eq_h4_mul. - call eq_h4_sqr. wp. - call eq_h4_mul. wp. - call eq_h4_mul. - call eq_h4_sqr. - call eq_h4_sqr. wp. - call eq_h4_sqr. wp. - done. -qed. - -(** step 10 : encode point **) -equiv eq_h4_encode_point : - MHop2.encode_point ~ M.encode_point: - true ==> true. -proof. -admit. -qed. - -(** step 11 : scalarmult **) -equiv eq_h4_scalarmult : - MHop2.scalarmult ~ M._x25519_scalarmult: - true ==> true. -proof. -admit. -qed. diff --git a/proof/crypto_scalarmult/curve25519/Curve25519_Spec.ec b/proof/crypto_scalarmult/curve25519/Curve25519_Spec.ec deleted file mode 100644 index fcf5330..0000000 --- a/proof/crypto_scalarmult/curve25519/Curve25519_Spec.ec +++ /dev/null @@ -1,57 +0,0 @@ -require import List Int IntExtra IntDiv CoreMap Real. -require import Zp. -import Zp. - -from Jasmin require import JModel. - -op decodeScalar25519 (k:W256.t) = - let k = k.[0 <- false] in - let k = k.[1 <- false] in - let k = k.[2 <- false] in - let k = k.[255 <- false] in - let k = k.[254 <- true ] in - k. - -op decodeUCoordinate (u:W256.t) = inzp (to_uint u). - -op add_and_double (qx : zp) (nqs : (zp * zp) * (zp * zp)) = - let x_1 = qx in - let (x_2, z_2) = nqs.`1 in - let (x_3, z_3) = nqs.`2 in - let a = x_2 + z_2 in - let aa = a * a in - let b = x_2 + (- z_2) in - let bb = b*b in - let e = aa + (- bb) in - let c = x_3 + z_3 in - let d = x_3 + (- z_3) in - let da = d * a in - let cb = c * b in - let x_3 = (da + cb)*(da + cb) in - let z_3 = x_1 * ((da + (- cb))*(da + (- cb))) in - let x_2 = aa * bb in - let z_2 = e * (aa + (inzp 121665 * e)) in - ((x_2,z_2), (x_3,z_3)). - -op swap_tuple( t : ('a * 'a) * ('a * 'a) ) = (t.`2, t.`1). - -op ith_bit(k : W256.t, i : int) = k.[i]. - -op montgomery_ladder(init : zp, k : W256.t) = - let nqs0 = ((Zp.one,Zp.zero),(init,Zp.one)) in - foldl (fun (nqs : (zp * zp) * (zp * zp)) ctr => - if ith_bit k ctr - then swap_tuple (add_and_double init (swap_tuple(nqs))) - else add_and_double init nqs) nqs0 (rev (iota_ 0 255)). - -op encodePoint (q: zp * zp) : W256.t = - let q = q.`1 * (ZModpRing.exp q.`2 (p - 2)) in - W256.of_int (asint q) axiomatized by encodePointE. - -op scalarmult (k:W256.t) (u:W256.t) : W256.t = - let k = decodeScalar25519 k in - let u = decodeUCoordinate u in - let r = montgomery_ladder u k in - encodePoint (r.`1) axiomatized by scalarmultE. - -hint simplify scalarmultE. diff --git a/proof/crypto_scalarmult/curve25519/EClib.ec b/proof/crypto_scalarmult/curve25519/EClib.ec deleted file mode 100644 index 9a21692..0000000 --- a/proof/crypto_scalarmult/curve25519/EClib.ec +++ /dev/null @@ -1,194 +0,0 @@ -(* TODO: same file as proof/crypto_onetimeauth/poly1305/EClib.ec *) -(* merge these files: find a common place for these *) - -require import List Int IntDiv IntExtra CoreMap. - -from Jasmin require import JModel. - -lemma ltr_pmul2 x1 x2 y1 y2: - 0 <= x1 => 0 <= x2 => x1 < y1 => x2 < y2 => x1 * x2 < y1 * y2. -proof. -case: (x1 <= 0) => *; first smt(). -have HH1: 0 < x1 by smt(). -apply (ltz_trans (x1 * y2)). - by rewrite (StdOrder.IntOrder.ltr_pmul2l _ HH1). -have HH2: 0 < y2 by smt(). -by rewrite (StdOrder.IntOrder.ltr_pmul2r _ HH2). -qed. - -lemma divzU a b q r: - 0 <= r < `|b|%Int => a = b*q+r => q=a%/b. -proof. -move=> *. -have ?:= divz_eq a b. -have [??] := euclideU b q (a%/b) r (a%%b) _ _ _ => //. - by rewrite mulzC -H0 {1}H1. -smt(modz_ge0 ltz_mod). -qed. - -lemma divz_div a b c: - 0 0 a %/ b %/ c = a %/ (b * c). -proof. -move=> *. -apply (divzU _ _ _ (b*((a%/b)%%c) + a %% b)). - apply bound_abs; split. - smt(StdOrder.IntOrder.mulr_ge0 addz_ge0 modz_ge0). - move => *. - apply (StdOrder.IntOrder.ltr_le_trans (b * (a %/ b %% c) + b)). - rewrite StdOrder.IntOrder.ltr_add2l; smt(modz_cmp). - have ->: b * (a %/ b %% c) + b = b * (a %/ b %% c + 1) by smt(). - have -> := (StdOrder.IntOrder.ler_pmul2l b _) => //. - have := modz_cmp (a%/b) c _; smt(). -rewrite {1}(divz_eq a b) addzA; congr. -rewrite mulzA -mulzDr mulzC; congr. -by rewrite {1}(divz_eq (a%/b) c); ring. -qed. - -lemma modz_minus x d: - (d <= x < 2*d)%Int => x %% d = x-d. -proof. -move=> *; have {1}->: x = x-d+d by smt(). -rewrite -modzDm modzz /= modz_mod modz_small //. -by apply bound_abs; smt(). -qed. - -lemma iota_split len2 n len: - 0 <= len2 <= len => - iota_ n len = iota_ n len2 ++ iota_ (n+len2) (len-len2). -proof. -move=> H; have E: len = len2 + (len - len2) by smt(). -by rewrite {1} E iota_add // /#. -qed. - -(* obs: remove the "0<=x" requirnment of [pow_Mle] *) -lemma pow_Mle' x y: (x <= y => 2^x <= 2^y)%Int. -proof. -case: (0 <= x) => *. - by apply pow_Mle. -case: (1 <= y) => *; last first. - by rewrite !powNeg /#. -by rewrite powNeg; smt(gt0_pow2). -qed. - - -(* MOVE TO... ??? *) - -require import W64limbs. - -(* different views on datatypes *) -lemma of_int2u64 i0 i1: - pack2 [ W64.of_int i0; W64.of_int i1] = W128.of_int (i0 %% W64.modulus + W64.modulus * i1). -proof. -rewrite W2u64.of_uint_pack2 /=; congr; congr; split. - apply W64.word_modeqP; congr. - by rewrite !of_uintK mulzC modzMDr !modz_mod. -rewrite mulzC divzMDr //. -have ->:i0 %% 18446744073709551616 %/ 18446744073709551616 = 0 by smt(modz_cmp divz_eq0). -by rewrite !of_intE modz_mod. -qed. - -lemma to_uint_unpack2u64 w: - W128.to_uint w = val_digits W64.modulus (map W64.to_uint (W2u64.to_list w)). -proof. -have [? /= ?]:= W128.to_uint_cmp w. -rewrite /val_digits /=. -do 2! (rewrite bits64_div 1:// /=). -rewrite !of_uintK /=. -have P: forall x, - x = x %% 18446744073709551616 + 18446744073709551616 * (x %/ 18446744073709551616). - by move=> x; rewrite {1}(divz_eq x 18446744073709551616) /=; ring. -rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /=. -by ring; smt(). -qed. - -lemma to_uint2u64 w0 w1: - to_uint (W2u64.pack2 [w0; w1]) = to_uint w0 + W64.modulus * to_uint w1. -proof. by rewrite to_uint_unpack2u64. qed. - -lemma to_uint_unpack4u32 w: - W128.to_uint w = val_digits W32.modulus (map W32.to_uint (W4u32.to_list w)). -proof. -have [? /= ?]:= W128.to_uint_cmp w. -rewrite /val_digits /=. -do 4! (rewrite bits32_div 1:// /=). -rewrite !of_uintK /=. -have P: forall x, x = x %% 4294967296 + 4294967296 * (x %/ 4294967296). - by move=> x; rewrite {1}(divz_eq x 4294967296) /=; ring. -rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 4294967296)) divz_div 1..2:/# - {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 79228162514264337593543950336)) divz_div 1..2:/# /=. -by ring; smt(). -qed. - -lemma to_uint_unpack16u8 w: - W128.to_uint w = val_digits W8.modulus (map W8.to_uint (W16u8.to_list w)). -proof. -have [? /= ?]:= W128.to_uint_cmp w. -rewrite /val_digits /=. -do 16! (rewrite bits8_div 1:// /=). -have P: forall x, x = x %% 256 + 256 * (x %/ 256). - by move=> x; rewrite {1}(divz_eq x W8.modulus) /=; ring. -rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 256)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 65536)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 16777216)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 4294967296)) divz_div 1..2:/# /=. -rewrite {1}(P (to_uint w %/ 1099511627776)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 281474976710656)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 72057594037927936)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /=. -rewrite {1}(P (to_uint w %/ 4722366482869645213696)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 1208925819614629174706176)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 309485009821345068724781056)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 79228162514264337593543950336)) divz_div 1..2:/# /=. -rewrite {1}(P (to_uint w %/ 20282409603651670423947251286016)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 5192296858534827628530496329220096)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 1329227995784915872903807060280344576)) divz_div 1..2:/# /=. -ring; smt(). -qed. - -lemma to_uint_unpack8u8 w: - W64.to_uint w = val_digits W8.modulus (map W8.to_uint (W8u8.to_list w)). -proof. -have [? /= ?]:= W64.to_uint_cmp w. -rewrite /val_digits /=. -do 8! (rewrite bits8_div 1:// /=). -have P: forall x, x = x %% 256 + 256 * (x %/ 256). - by move=> x; rewrite {1}(divz_eq x 256) /=; ring. -rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 256)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 65536)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 16777216)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 4294967296)) divz_div 1..2:/# /=. -rewrite {1}(P (to_uint w %/ 1099511627776)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 281474976710656)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 72057594037927936)) divz_div 1..2:/# /= - {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /=. -ring; smt(). -qed. - -lemma pack8u8_init_mkseq f: - pack8_t (init f)%W8u8.Pack = pack8 (mkseq f 8). -proof. by rewrite W8u8.Pack.init_of_list. qed. - -lemma load8u8' mem p: - loadW64 mem p = pack8 (mkseq (fun i => mem.[p+i]) 8). -proof. -rewrite /mkseq /= /loadW64; congr. -by rewrite W8u8.Pack.init_of_list. -qed. - -(* -require import Array2 WArray16. -lemma WArray16_init64K (f:int -> W8.t): - WArray16.init64 ("_.[_]" (Array2.init (WArray16.get64 (WArray16.init f)))) = WArray16.init f. -proof. -rewrite /WArray16.init64. -apply WArray16.init_ext => * /=. -rewrite Array2.initE /=. -have ->/=: 0 <= x %/ 8 < 2 by smt(). -rewrite WArray16.get64E /= pack8bE /=; first smt(). -rewrite W8u8.Pack.initE. -have ->/=: 0 <= x %% 8 < 8 by smt(). -rewrite WArray16.initiE; smt(). -qed. - -*) diff --git a/proof/crypto_scalarmult/curve25519/Rep3Limb.ec b/proof/crypto_scalarmult/curve25519/Rep3Limb.ec deleted file mode 100644 index 5c8b405..0000000 --- a/proof/crypto_scalarmult/curve25519/Rep3Limb.ec +++ /dev/null @@ -1,738 +0,0 @@ -require import List Int IntDiv IntExtra CoreMap. -require import EClib Array2 Array3 Array4. -require import WArray16. -require import W64limbs. - -require import JModel. (*here*) - -require import Zp. -require ZModP. -import Zp. - - -(* packed lemmas for SMT calls *) -lemma ubW64_lemmas: - (forall x, ubW64 W64.max_uint x) - && (forall b x n, ubW64 (n-1) x => ubW64 n (x+(W64.of_int (b2i b)))) - && (forall n1 n2 x, (n1 <= n2)%Int => ubW64 n1 x => ubW64 n2 x) - && (forall nx ny x y, ubW64 nx x => ubW64 ny y => ubW64 (nx+ny) (x+y)) - && (forall nx ny x y, ubW64 nx x => ubW64 ny y => ubW64 (nx*ny) (x*y)) - && (forall nx ny x y, ubW64 nx x => ubW64 ny y => ubW64 (nx*ny %/ W64.modulus) (mulhi x y)). -proof. -split; move=> *; first by apply ubW64T. -split; move=> *. - by apply (ubW64D (n-1) 1) => //; apply ubW641. -split; move=> *; first by smt(ubW64W). -split; move=> *; first by apply ubW64D. -split; move=> *; first by apply ubW64M. -by apply ubW64Mhi. -qed. - - -type Rep2 = W64.t Array2.t. - -type Rep3 = W64.t Array3.t. - -(* [Rep3r] is a redundant representation for "r". - r.[0], r.[1] - 128bit r - r.[2] - 5 * r.[1] %/ 4 -*) -type Rep3r = W64.t Array3.t. - -op Rep3r_ok (r: Rep3r) = - ubW64 1152921504606846975 r.[0] - && ubW64 1152921504606846972 r.[1] - && 4 %| to_uint r.[1] - && to_uint r.[2] = 5 * (to_uint r.[1] %/ 4) - && ubW64 1441151880758558715 r.[2]. - -op valRep2 (x: Rep2) = val_limbs64 (to_list x). - -lemma valRep2E x: - valRep2 x = to_uint x.[0] + 2^64 * to_uint x.[1]. -proof. by rewrite /valRep2 /= /to_list /mkseq /val_digits /=. qed. -hint simplify valRep2E. - -lemma valRep2_W8L y: - valRep2 (Array2.init (WArray16.get64 y)) - = val_digits 256 (map W8.to_uint (WArray16.to_list y)). -proof. -rewrite valRep2E /= !WArray16.get64E !to_uint_unpack8u8 /= /val_digits /=. -by ring. -qed. - -lemma valRep2_to_uint16u8 y: - valRep2 (Array2.init (WArray16.get64 y)) = - to_uint (W16u8.pack16_t (W16u8.Pack.init (WArray16."_.[_]" y))). -proof. -by rewrite valRep2_W8L to_uint_unpack16u8; congr; congr; congr. -qed. - -op repres2(r : Rep2) = inzp (valRep2 r) axiomatized by repres2E. - - -op valRep3 (x: Rep3) = val_limbs64 (to_list x). - -lemma valRep3E x: - valRep3 x = to_uint x.[0] + 2^64 * to_uint x.[1] + 2^128 * to_uint x.[2]. -proof. by rewrite /valRep3 /= /to_list /mkseq /val_digits /=; ring. qed. -hint simplify valRep3E. - -op repres3(r : Rep3) = inzp (valRep3 r) axiomatized by repres3E. - -op valRep3r (x: Rep3r) = val_limbs64 [x.[0]; x.[1]]. - -lemma valRep3rE x: - valRep3r x = to_uint x.[0] + 2^64 * to_uint x.[1]. -proof. by rewrite /valRep3r /= /to_list /mkseq /val_digits //=. qed. -hint simplify valRep3rE. - -op repres3r(r : Rep3r) = inzp (valRep3r r) axiomatized by repres3rE. - -lemma eqRep3 (x y:Rep3): - x=y <=> (x.[0]=y.[0]) && (x.[1]=y.[1]) && (x.[2]=y.[2]). -proof. by move => /> *; apply (Array3.ext_eq_all x y). qed. - -abbrev congpRep3 x xval = zpcgr (valRep3 x) xval. - - -lemma equiv_class3 x r: - congpRep3 r (valRep3 x) <=> repres3 x = repres3 r. -proof. -split. - move=> h; apply/Zp.Sub.val_inj/eq_sym. - by rewrite !repres3E !inzpK. -by rewrite !repres3E /congpRep3 -!inzpK => ->. -qed. - -lemma equiv_class3M r x y: - congpRep3 r (valRep3 x * valRep3 y) <=> - repres3 r = (repres3 x * repres3 y). -proof. -split. - rewrite !repres3E -inzpM => ?. - apply Zp.Sub.val_inj. - by rewrite !inzpK. -by rewrite !repres3E -!inzpK inzpM => ->. -qed. - -lemma equiv_class3D r x y: - congpRep3 r (valRep3 x + valRep3 y) <=> - repres3 r = (repres3 x + repres3 y). -proof. -split. - rewrite !repres3E -inzpD => ?. - apply Zp.Sub.val_inj. - by rewrite !inzpK. -by rewrite !repres3E -!inzpK inzpD => ->. -qed. - -lemma mul54_redp x: - inzp (2^128 * x) = inzp (5 * (x %/ 4) + 2^128 * (x%%4)). -proof. -have := divz_eq x (2^2); rewrite mulzC => {1}->. -rewrite (mulzDr W128.modulus) -mulzA /=. -by rewrite !inzpD inzp_over. -qed. - -lemma mul54_mul1_redp x x54 l: - 4 %| x => - x54 = 5 * (x %/ 4) => - inzp (2^128 * val_digits64 (mul1_digits x l)) = inzp (val_digits64 (mul1_digits x54 l)). -proof. -move => /dvdzP [x' ->]. -rewrite mulzK // => ->. -rewrite !mul1_digitsP (mulzC _ 4) -!mulzA /= mulzA. -by rewrite inzp_over; congr; ring. -qed. - -lemma add_digits64_redp x x54 l la l1 l2: - 4 %| x => - x54 = 5 * (x %/ 4) => - l1 = add_digits la (0::0::List.map (fun h => h * x) l) => - val_digits64 l2 = val_digits64 (add_digits la (map (fun h => h * x54) l)) => - inzp (val_digits64 l1) = inzp (val_digits64 l2). -proof. -move=> ?? -> ->. -rewrite !add_digitsP !inzpD -!mul1_digitsCE; congr. -rewrite !val_digits_cons /= -!mulzA /=. -by apply (mul54_mul1_redp x x54 l). -qed. - -(*****************************************) - -op mulmod3_pass0 (h: Rep3) (r:Rep3r) = - [ to_uint (h.[0] * r.[0]) + to_uint (h.[1] * r.[2]); - - to_uint (h.[0] * r.[1]) + to_uint (mulhi h.[0] r.[0]) + - to_uint (mulhi h.[1] r.[2]) + to_uint (h.[1] * r.[0]) + - to_uint (h.[2] * r.[2]); - - to_uint (mulhi h.[0] r.[1]) + to_uint (mulhi h.[1] r.[0]) + - to_uint (h.[2] * r.[0]) - ]. - -lemma mulmod3_pass0_ok (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - zpcgr (valRep3 h * valRep3r r) (val_digits64 (mulmod3_pass0 h r)). -proof. -rewrite /valRep3 /valRep3r -mul_limbs64P eq_inzp /to_list /mkseq => /> *. -apply (add_digits64_redp (to_uint r.[1]) (to_uint r.[2]) [ to_uint h.[1]; to_uint h.[2] ] - [ to_uint h.[0] * to_uint r.[0]; - to_uint h.[0] * to_uint r.[1] + to_uint h.[1] * to_uint r.[0]; - to_uint h.[2] * to_uint r.[0] ]) => //. by rewrite /mul1_digits => /#. (*TO*) -rewrite /= /mulmod3_pass0 -!mulhiP !val_digits_cons; ring. -by rewrite (ubW64_mulhi0 6 1152921504606846975) // (ubW64_mulhi0 6 1441151880758558715). -qed. - -op mulmod3_pass1 (h: Rep3) (r:Rep3r) = - add_limbs64nc - ((h.[0]*r.[0]) :: add_limbs64nc [h.[1]*r.[0] ; h.[2]*r.[0] ] - [mulhi h.[0] r.[0]; mulhi h.[1] r.[0]]) - [h.[1]*r.[2]; h.[0]*r.[1]; mulhi h.[0] r.[1] ]. - -lemma mulmod3_pass1_ok (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - val_digits64 (mulmod3_pass0 h r) - = val_limbs64 (mulmod3_pass1 h r) + val_limbs64 [W64.zero; mulhi h.[1] r.[2] + h.[2]*r.[2]] - /\ ubW64 9223372036854775797 (nth_limbs64 (mulmod3_pass1 h r) 2). -proof. -rewrite /mulmod3_pass1 => /> *. -have /= ?:= ubW64M _ _ _ _ H H0. -have ?:= ubW64T h.[0]. -have ?:= ubW64T h.[1]. -have /= := ubW64Mhi _ _ _ _ H7 H0. -have -> ?: 21267647932558653946861247386169114625 %/ 18446744073709551616 - = 1152921504606846974 by smt (edivzP divz_small). -have /= := ubW64Mhi _ _ _ _ H6 H1. -have -> ?: 21267647932558653891521015165040459780 %/ 18446744073709551616 - = 1152921504606846971 by smt (edivzP divz_small). -have /= ?:= ubW64M _ _ _ _ H H4. -have /= := ubW64Mhi _ _ _ _ H7 H4. -have -> ?: 26584559915698317364401268956300574725 %/ 18446744073709551616 - = 1441151880758558714 by smt (edivzP divz_small). -have /= [->] := (add_limbs64ncP' (6917529027641081850+1152921504606846974+1) 1152921504606846971 - ((h.[0] * r.[0]) :: add_limbs64nc [h.[1] * r.[0]; h.[2] * r.[0]] - [mulhi h.[0] r.[0]; mulhi h.[1] r.[0]]) - [h.[1] * r.[2]; h.[0] * r.[1]; mulhi h.[0] r.[1]] _ _ _ _) -; rewrite /= ?size_add_limbs64nc /nth_limbs64 //=. - rewrite /add_limbs64nc /= !addcE /=. - smt(ubW64_lemmas). -move=> Hub; split; last by apply Hub. -rewrite /mulmod3_pass0 /= !val_digits_cons val_digits_nil /=. -rewrite (add_limbs64ncP 6917529027641081850 1152921504606846974) => //=. -rewrite !val_digits_cons bW64_to_uintD 1:bW64ub 1://. - smt(ubW64_lemmas). - rewrite bW64ub 1://; smt(ubW64_lemmas). -by ring. -qed. - -lemma mulmod3_pass1_spec (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - exists t0 t1 t2, mulmod3_pass1 h r = [t0; t1; t2] && - inzp (valRep3 h * valRep3r r) - = inzp (val_limbs64 [t0; t1; t2] - + val_limbs64 [W64.zero; mulhi h.[1] r.[2] + h.[2]*r.[2]] ) - /\ ubW64 9223372036854775797 (nth_limbs64 (mulmod3_pass1 h r) 2). -proof. -rewrite /= => *. -exists (nth_limbs64 (mulmod3_pass1 h r) 0) - (nth_limbs64 (mulmod3_pass1 h r) 1) - (nth_limbs64 (mulmod3_pass1 h r) 2); split. - by rewrite /mulmod3_pass1 /add_limbs64nc /nth_limbs64. -move => E. -have := mulmod3_pass0_ok _ _ H H0. -rewrite eq_inzp. -have /= [-> ?] := mulmod3_pass1_ok _ _ H H0. -by rewrite {1}E /=. -qed. - -op split_h2 (h2: W64.t) : W64.t * W64.t = - (h2 `&` W64.of_int 3, (h2 `&` W64.of_int 18446744073709551612) + (h2 `>>` W8.of_int 2)). - -lemma split_h2_spec h2: - ubW64 9223372036854775797 h2 => - exists x y, split_h2 h2 = (x,y) /\ - to_uint x = to_uint h2 %% 4 /\ - to_uint y = 5 * (to_uint h2 %/ 4) /\ - inzp (2^128 * to_uint h2) = inzp (2^128 * to_uint x) + inzp (to_uint y). -proof. -move=> Hub. -exists (splitAt 2 h2).`1 ((splitAt 2 h2).`2 + (h2 `>>` W8.of_int 2)); split. - rewrite /split_h2 /splitMask /=; congr; congr. - apply W64.word_modeqP; congr. - by rewrite of_uintK modz_small // to_uint_invw. -have := W64.splitAtP 2 h2 _; first by []. -rewrite /splitMask /=; move=> [<- E]; split; first by []. -have ? := ubW64Wand _ _ (invw ((of_int 3))%W64) Hub. -have ? := ubW64shr 2 _ _ _ Hub => //. -rewrite (ubW64D_to_uint _ _ _ _ _ H H0). - smt (edivzP divz_small). -split; first by rewrite E to_uint_shr // pow2_2; ring. -rewrite E to_uint_shr // (W64.to_uint_and_mod 2) // !pow2_2. -rewrite {1}(divz_eq (to_uint h2) 4) mulzDr !inzpD. -rewrite (mulzC _ 4) -mulzA /= inzp_over (mulzDl 4 1) inzpD /=. -by ring. -qed. - -lemma split_h2_repp h0 h1 h2: - ubW64 9223372036854775797 h2 => - inzp (val_limbs64 [h0; h1; h2]) - = inzp (val_limbs64 [h0; h1; (split_h2 h2).`1]) - + inzp (val_limbs64 [(split_h2 h2).`2]). -proof. -move=> *. -have [? ? [-> /= [?[??]]]] := split_h2_spec _ H. -by rewrite /val_digits /= !mulzDr -!mulzA /= !inzpD H2; ring. -qed. - -op mulmod3_pass2 (h: Rep3) (r:Rep3r) = - let t = mulmod3_pass1 h r in - let (h21, h22) = split_h2 (nth W64.zero t 2) in - add_limbs64nc [nth W64.zero t 0; nth W64.zero t 1; h21] - [h22; mulhi h.[1] r.[2] + h.[2]*r.[2]]. - -lemma mulmod3_pass3_ok (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - inzp (valRep3 h * valRep3r r) - = inzp (val_limbs64 (mulmod3_pass2 h r)) - /\ ubW64 4 (nth_limbs64 (mulmod3_pass2 h r) 2). -proof. -rewrite /mulmod3_pass2 => *. -have [t0 t1 t2 [-> [->]]] := mulmod3_pass1_spec _ _ H H0. -rewrite /nth_limbs64 /= => Ht2. -have [h21 h22 [-> /= [?[??]]]] /= := split_h2_spec _ Ht2. -have := add_limbs64ncP' 3 0 - [t0; t1; h21] [h22; mulhi h.[1] r.[2] + h.[2] * r.[2]] - _ _ _ _; rewrite /nth_limbs64 //=. - by rewrite /ubW64 H1 /=; smt(modz_cmp). -rewrite inzpD; move => [-> /= Hub]. -split; last by []. -by rewrite /val_digits /= !mulzDr -!mulzA /= !inzpD H3; ring. -qed. - -lemma mulmod3_spec (h hh:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - [hh.[0]; hh.[1]; hh.[2]] = mulmod3_pass2 h r => - (repres3 h * repres3r r) = repres3 hh - /\ ubW64 4 hh.[2]. -proof. -move=> *. -rewrite !repres3E repres3rE -inzpM. -have := (mulmod3_pass3_ok _ _ H H0). -rewrite -!H1 /nth_limbs64 /=; move => [-> ->] /=; congr. -by rewrite /val_digits /=; ring. -qed. - -(****************************************************************************************** - - 3-limb implementations - - ******************************************************************************************) - -(* 3-limb implementations are taken directly from the jasmin-extracted code *) -require Poly1305_savx2. - -module Mrep3 = { - proc clamp = Poly1305_savx2.M.clamp - proc load_add = Poly1305_savx2.M.load_add - proc load_last_add = Poly1305_savx2.M.load_last_add - proc mulmod = Poly1305_savx2.M.mulmod - proc freeze = Poly1305_savx2.M.freeze - proc load2 = Poly1305_savx2.M.load2 - proc store2 = Poly1305_savx2.M.store2 - proc add2 = Poly1305_savx2.M.add2 - proc setup = Poly1305_savx2.M.poly1305_ref3_setup - proc update = Poly1305_savx2.M.poly1305_ref3_update - proc finish = Poly1305_savx2.M.poly1305_ref3_last - proc poly1305 = Poly1305_savx2.M.poly1305_ref3_local -}. - -(**************************** Rep3 specs *******************************) - -require import Poly1305_Spec. - -lemma clamp_spec mem kk: - phoare [ Mrep3.clamp: - Glob.mem = mem /\ kk = k /\ good_ptr kk 16 - ==> - Rep3r_ok res /\ repres3r res = load_clamp mem kk ] = 1%r. -proof. -proc; wp; skip => ? /> *. -pose r0 := loadW64 Glob.mem{hr} (to_uint k{hr}) `&` W64.of_int 1152921487695413247. -pose r1 := loadW64 Glob.mem{hr} (to_uint (k{hr} + W64.of_int 8)) `&` W64.of_int 1152921487695413244. -have ?: ubW64 1152921504606846975 r0. - apply (ubW64W 1152921487695413247) => //. - have {1}->: 1152921487695413247 = 1152921487695413247 %% W64.modulus by smt(modz_small). - rewrite /ubW64 /r0 -W64.of_uintK andwC W64.to_uintK. - by apply W64.to_uint_ule_andw. -have ?: ubW64 1152921504606846972 r1. - apply (ubW64W 1152921487695413244) => //. - have {1}->: 1152921487695413244 = 1152921487695413244 %% W64.modulus by smt(modz_small). - rewrite /ubW64 /r1 -W64.of_uintK andwC W64.to_uintK. - by apply W64.to_uint_ule_andw. -split; progress; rewrite /r0 /r1 //=. -+ rewrite dvdzE /r1 -(W64.to_uint_and_mod 2) // -andwA. - have ->: (of_int 1152921487695413244)%W64 `&` (masklsb 2)%W64 = W64.zerow. - apply W64.word_modeqP; congr. - by rewrite to_uint0 W64.to_uint_and_mod //. - by rewrite andw0. -+ rewrite to_uintD. - have /= ?:= ubW64shr 2 1152921504606846972 _ _ H1 => //=. - rewrite to_uint_shr //= modz_small. - apply bound_abs; smt(W64.to_uint_cmp). - smt(). - smt(). -+ rewrite repres3rE /load_clamp /=; congr. - rewrite -load2u64 /= -(of_int2u64 1152921487695413247 1152921487695413244) // andb2u64E /=. - rewrite to_uintD_small of_uintK modz_small //; first smt(). - by rewrite to_uint2u64; ring. -qed. - -lemma load_add_spec mem hh inp: - phoare [ Mrep3.load_add: - Glob.mem = mem /\ hh = h /\ inp = in_0 /\ ubW64 4 hh.[2] /\ good_ptr inp 16 - ==> - ubW64 6 res.[2] /\ - repres3 res = repres3 hh + (load_block mem inp) ] = 1%r. -proof. -proc; wp; skip =>?[<-[<-[<-[? Hptr]]]] *. -have E: [h2.[0]; h2.[1]; h2.[2]] - = add_limbs64nc [hh.[0]; hh.[1]; hh.[2]] - [loadW64 Glob.mem{hr} (to_uint (inp + W64.zero)); - loadW64 Glob.mem{hr} (to_uint (inp + (of_int 8)%W64)); - W64.one]. - by rewrite /add_limbs64nc /h2 /tpl1 /h1 /tpl0 /h0 /tpl //=; clear h2 tpl1 h1 tpl0 h0 tpl. -have := add_limbs64ncP' 4 1 [hh.[0]; hh.[1]; hh.[2]] - [loadW64 Glob.mem{hr} (to_uint (inp + W64.zero)); - loadW64 Glob.mem{hr} (to_uint (inp + (of_int 8)%W64)); W64.one] _ _ _ _; rewrite /nth_limbs64 //=. -rewrite -E; move => {E} [E ?]; split; first by []. -rewrite !repres3E {1}/valRep3 valRep3E /= E /load_block /load_lblock /= inzpD; congr. - by congr; rewrite /val_digits /= !mulzDr; ring. -rewrite -(W16u8.Pack.init_ext (fun i => Glob.mem{hr}.[to_uint inp + i])) 1:/#. -congr. -rewrite to_uintD_small of_uintK modz_small //; first by move: Hptr; smt(). -rewrite !load8u8' /val_digits /mkseq /=. -rewrite !(to_uint_unpack8u8 (W8u8.pack8 _)) /=. -rewrite (to_uint_unpack16u8 (W16u8.pack16_t _)) /=. -by rewrite /val_digits /=; ring. -qed. - -op load_lblock' (mem : global_mem_t) (l ptr : W64.t) = - Zp.inzp (W128.to_uint (pack16_t (W16u8.Pack.init - (fun i => if i = W64.to_uint l then W8.one - else if i < W64.to_uint l - then mem.[to_uint ptr + i] - else W8.zero)))). - -lemma load_lblock_alt mem l ptr: - W64.to_uint l < 16 => - load_lblock mem l ptr = load_lblock' mem l ptr. -proof. -rewrite /load_lblock /load_lblock' /= => *; congr. -have ? : 0 <= to_uint l < 16 by smt(W64.to_uint_cmp). -rewrite -pow_mul //=; first smt(). -have : to_uint l \in iota_ 0 16 by rewrite mem_iota; smt(). -move: (to_uint l); apply/List.allP => /=. -rewrite !to_uint_unpack16u8 /val_digits /=; smt(). -qed. - -lemma load_last_add_spec_h mem hh inp inlen: - hoare [ Mrep3.load_last_add: - Glob.mem = mem /\ h = hh /\ in_0 = inp /\ len = inlen /\ ubW64 4 hh.[2] /\ - to_uint inlen < 16 /\ good_ptr in_0 (to_uint inlen) - ==> - ubW64 6 res.[2] /\ - repres3 res = repres3 hh + (load_lblock mem inlen inp) ]. -proof. -proc => /=. -seq 6: (#pre /\ repres2 s = load_lblock mem inlen inp). -wp; while (j \ule len /\ in_0 = inp /\ len = inlen /\ to_uint inlen < 16 /\ - good_ptr in_0 (to_uint len) /\ - s = Array2.init (WArray16.get64 (WArray16.of_list - (mkseq (fun i=> Glob.mem.[(to_uint in_0) + i]) (W64.to_uint j))))). -+ wp; skip; progress. - move: {H} H2; rewrite ultE uleE => *. - by rewrite to_uintD_small /=; move: (W64.to_uint_cmp len{hr}); smt(). - move: {H} H0 H1 H2; rewrite ultE => Hlen_bnd Hptr Hj_bnd. - move: (W64.to_uint_cmp in_0{hr}) (W64.to_uint_cmp j{hr}) => [Hin0 Hin1] [Hj0 X] {X}. - have Hj1: to_uint in_0{hr} + to_uint j{hr} < W64.modulus by smt(StdOrder.IntOrder.ltr_le_trans). - rewrite to_uintD_small //=. - rewrite to_uintD_small; first by rewrite of_uintK modz_small; smt(). - congr; congr; rewrite /WArray16.set8 /= /loadW8 /=. - rewrite WArray16.of_listE /= WArray16.of_listE WArray16.setE. - pose X := WArray16.init64 _. - apply WArray16.init_ext => i [Hi0 Hi1] /=. - have HX: WArray16."_.[_]" X i - = if i < to_uint j{hr} then Glob.mem{hr}.[to_uint (in_0{hr})+i] else W8.zero. - rewrite /X; clear X. - rewrite -(WArray16.init_ext (fun i => if i < to_uint j{hr} - then Glob.mem{hr}.[to_uint in_0{hr} + i] - else W8.zero)). - by move=> k [Hk0 Hk1] /=; rewrite nth_mkseq_if Hk0. - by rewrite WArray16_init64K WArray16.initE Hi0 Hi1. - rewrite /mkseq iota_add //. - rewrite iota1 /= map_cat nth_cat /= size_map size_iota max_ler //. - case: (i < to_uint j{hr}) => ?. - have ->/=: !i = to_uint j{hr}; first smt(). - rewrite nth_mkseq_if H Hi0 /=. - by rewrite HX H. - case: (i = to_uint j{hr}) => //= ?. - have ->/= : !i - to_uint j{hr} = 0 by smt(). - rewrite HX. - by have ->: !i < to_uint j{hr} by smt(). -+ wp; skip; progress. - by rewrite uleE /=; smt(W64.to_uint_cmp). - rewrite mkseq0 WArray16.of_listE /=. - rewrite -Array2.ext_eq_all /all_eq /=. - by split; - rewrite WArray16.get64E /=; - apply W64.word_modeqP; congr; - rewrite pack8u8_init_mkseq /mkseq /= (to_uint_unpack8u8 (W8u8.pack8_t _)) /=. -+ move: {H H4 H5} H0 H1. - have ->: len{hr} = j0 by move: H2 H3; rewrite uleE ultE -lezNgt; smt(W64.word_modeqP). - move: (W64.to_uint_cmp in_0{hr}) (W64.to_uint_cmp j{hr}) => [Hin0 Hin1] [Hj0 X] {X} Hj1 Hptr. - rewrite load_lblock_alt /load_lblock' //=. - rewrite repres2E valRep2_to_uint16u8; congr; congr; congr. - apply W16u8.Pack.init_ext => i [Hi0 Hi1]. - rewrite /WArray16.set8. - have ->: WArray16.of_list (mkseq (fun (i : int) => - Glob.mem{hr}.[to_uint in_0{hr} + i]) (to_uint j0)) - = WArray16.init (fun (i : int) => if i < to_uint j0 - then Glob.mem{hr}.[to_uint in_0{hr} + i] - else W8.zero). - rewrite WArray16.of_listE; apply WArray16.init_ext => k [Hk0 Hk1] /=. - by rewrite nth_mkseq_if Hk0. - rewrite WArray16_init64K WArray16.setE. - by do rewrite WArray16.initE /= Hi0 Hi1 /=. -wp; skip => ? [[?[?[?[?[?[??]]]]]]] *. -have E: [h2.[0]; h2.[1]; h2.[2]] - = add_limbs64nc [hh.[0]; hh.[1]; hh.[2]] - [s{hr}.[0]; s{hr}.[1]]. - rewrite /add_limbs64nc -!H0 /h2 /tpl1 /h1 /tpl0 /h0 /tpl //=; progress. - by rewrite addcE /=. -have /= := add_limbs64ncP' 4 0 [hh.[0]; hh.[1]; hh.[2]] - [s{hr}.[0]; s{hr}.[1]]. -rewrite/ nth_limbs64 /= ubW640 H3 /= -E => [[?/=?]]; split. - by apply (ubW64W 5). -rewrite -H6 !repres3E {1}/valRep3 valRep3E repres2E /= H7 /= inzpD /val_digits; congr. -by congr; simplify; ring. -qed. - -lemma load_last_add_spec_ll: islossless Mrep3.load_last_add. -proof. -proc; wp. -while (j \ule len) (to_uint (len-j)). - move=> *; wp; skip; rewrite !ultE !uleE; progress. - by rewrite uleE to_uintD_small /=; move: (W64.to_uint_cmp len{hr}) => /#. - rewrite to_uintB ?uleE //. - by rewrite to_uintD_small /=; move: (W64.to_uint_cmp len{hr}) => /#. - rewrite to_uintB ?uleE //. - by rewrite to_uintD_small /=; move: (W64.to_uint_cmp len{hr}) => /#. -wp; skip; rewrite !uleE; progress; first smt(W64.to_uint_cmp). -move: H; rewrite uleE ultE => *. -by move: H0; rewrite to_uintB ?uleE // /#. -qed. - -lemma load_last_add_spec mem hh inp inlen: - phoare [ Mrep3.load_last_add: - Glob.mem = mem /\ h = hh /\ in_0 = inp /\ len = inlen /\ ubW64 4 hh.[2] /\ - to_uint inlen < 16 /\ good_ptr in_0 (to_uint inlen) - ==> - ubW64 6 res.[2] /\ - repres3 res = repres3 hh + (load_lblock mem inlen inp) ] = 1%r. -proof. by conseq load_last_add_spec_ll (load_last_add_spec_h mem hh inp inlen). qed. - -lemma mulmod_spec_h (hh:Rep3) (rr:Rep3r): - hoare [ Mrep3.mulmod: - ubW64 6 hh.[2] /\ Rep3r_ok rr /\ - hh = h /\ rr = r - ==> - ubW64 4 res.[2] /\ - repres3 res = (repres3 hh * repres3r rr) ]. -proof. -proc. -seq 13: (ubW64 6 hh.[2] /\ Rep3r_ok rr /\ rr = r /\ - t2 = hh.[2]*rr.[2] /\ h.[0] = hh.[0] /\ h.[1] = hh.[1] /\ - t0 = hh.[0]*rr.[0] /\ - [t1; h.[2]] = add_limbs64nc [hh.[1] * rr.[0]; hh.[2] * rr.[0]] - [mulhi hh.[0] rr.[0]; mulhi hh.[1] rr.[0]]) => //. - wp; skip; progress. - by rewrite mulrC. - by rewrite muluE /= mulrC. - rewrite !muluE /= !addcE /=; congr. - by rewrite /mulhi mulzC; ring. - rewrite !muluE /= !addcE /=; congr; congr. - by rewrite /mulhi mulzC. - by congr; rewrite /carry_add mulrC /#. -seq 12: (ubW64 6 hh.[2] /\ Rep3r_ok rr /\ rr = r /\ - h.[1] = mulhi hh.[1] r.[2] + hh.[2]*r.[2] /\ - [t0; t1; h.[2]] = mulmod3_pass1 hh rr) => //. - wp; skip; progress; rewrite !muluE /=. - by congr; rewrite /mulhi mulzC H2. - by rewrite H2 !addcE /=; ring. - rewrite -H3 /= !H1 !H2; progress. - by rewrite mulrC (W64.WRingA.mulrC r{hr}.[2] hh.[1]). - rewrite {1}mulrC -!H1 !addcE //=; congr; congr. - by rewrite /mulhi mulzC. - by rewrite (W64.WRingA.mulrC r{hr}.[2] hh.[1]). -(* split_h2 *) -exists* (h.[2]); elim* => h2. -seq 6: (ubW64 6 hh.[2] /\ Rep3r_ok rr /\ rr = r /\ - h.[1] = mulhi hh.[1] r.[2] + hh.[2]*r.[2] /\ - split_h2(h2) = (h.[2], h.[0]) /\ - [t0; t1; h2] = mulmod3_pass1 hh rr) => //. - by wp; skip; progress; rewrite andwC; congr. -(* final chain *) -wp; skip => ? H *; move: H => |> *. -have E: [h1.[0]; h1.[1]; h1.[2]] = mulmod3_pass2 hh r{hr}. - rewrite /mulmod3_pass2 /h1 /tpl1 /h0 /tpl0 /h /tpl; clear h1 tpl1 h0 tpl0 h tpl. - rewrite /= -H3 /= H2 /= -H1 /add_limbs64nc /=; progress. - by rewrite !addcE /=; ring. - by rewrite !addcE /= carry_addC; ring. - by rewrite !addcE /= carry_addC (W64.carry_addC t0{hr}). -by have [-> ->] := mulmod3_spec _ _ _ H H0 E. -qed. - -lemma mulmod_spec_ll : islossless Mrep3.mulmod. -proof. by proc; auto. qed. - -lemma mulmod_spec (hh: W64.t Array3.t) rr : - phoare [ Mrep3.mulmod: - ubW64 6 hh.[2] /\ Rep3r_ok rr /\ - hh = h /\ rr = r - ==> - ubW64 4 res.[2] /\ - repres3 res = (repres3 hh * repres3r rr) ] = 1%r. -proof. by conseq mulmod_spec_ll (mulmod_spec_h hh rr). qed. - -lemma freeze_spec_h hh: - hoare [ Mrep3.freeze : - hh = h /\ ubW64 4 h.[2] - ==> - valRep2 res = (asint (repres3 hh)) %% 2^128 ]. -proof. -have X: ubW64 4 hh.[2] => valRep3 hh < 2*p. - rewrite /ubW64 /= pE. - have /= ??:= W64.to_uint_cmp hh.[0]. - have /= ?:= W64.to_uint_cmp hh.[1]. - smt(). -proc. -seq 11: (#pre /\ val_limbs64 [g.[0]; g.[1]; g2] = valRep3 hh + 5) => //. - wp; skip => ?[<-?]*. - split; first by []. - have ->: 5 = val_limbs64 [W64.of_int 5] by smt(). - have ->: [g3.[0]; g3.[1]; (addc hh.[2] W64.zero tpl0.`1).`2] - = add_limbs64nc [hh.[0]; hh.[1]; hh.[2]] [W64.of_int 5]. - by rewrite /add_limbs64nc /g3 /tpl0 /g1 /tpl /g0 /= !addcE. - have ->:= add_limbs64ncP 4 0 [hh.[0]; hh.[1]; hh.[2]] [W64.of_int 5] _ _ _ _; - by rewrite /nth_limbs64 /val_digits //=; ring. -seq 2: (#[/1:3]pre /\ - (p <= valRep3 hh => val_limbs64 [g.[0]; g.[1]] = (asint (repres3 hh)) %% W128.modulus) /\ - mask = if valRep3 hh < p then W64.zerow else W64.onew) => //. - wp; skip => ?[[<-?]E]*. - split; first by done. - split. - move: (X H) => ??. - rewrite repres3E inzpK zp_over_lt2p_red. - by split => // ? //. - rewrite -E modz_mod_pow2 /min /=. - by rewrite (val_limbs64_mod2128 g{hr}.[0] g{hr}.[1] g2{hr}). - have : to_uint (g2{hr}) %/ 4 = if valRep3 hh < p then 0 else 1. - rewrite -(ltz_add2r 5) pE. - have ->: 2^130 - 5 + 5 = 2^130 by done. - case: (valRep3 hh + 5 < 2 ^ 130). - rewrite -E -(val_limbs64_div2130 g{hr}.[0] g{hr}.[1]) /= => ?. - apply divz_eq0 => //; split => //. - by rewrite /val_digits /=; smt(W64.to_uint_cmp). - move: E; pose x:=val_limbs64 [g{hr}.[0]; g{hr}.[1]; g2{hr}]; move => E. - move: (X H); rewrite -(ltz_add2r 5) pE -E. - by rewrite -(val_limbs64_div2130 g{hr}.[0] g{hr}.[1]) -/x /#. - case: (valRep3 hh < p) => ??. - have ->: g2{hr} `>>` (of_int 2)%W8 = W64.zerow. - apply W64.word_modeqP; rewrite to_uint_shr //. - rewrite (W8.of_uintK 2) !pow2_2. - by rewrite (modz_small 2 W8.modulus) // H1 to_uint0. - by rewrite /W64.zerow; ring. - have ->: g2{hr} `>>` (of_int 2)%W8 = W64.one. - apply W64.word_modeqP; rewrite to_uint_shr //. - rewrite (W8.of_uintK 2) !pow2_2. - by rewrite (modz_small 2 W8.modulus) // H1 to_uint1. - by rewrite minus_one. -(* *) -wp; skip => ?[[<-?]]. -case: (valRep3 hh < p). - move => |> *. - rewrite !andw0 !xor0w repres3E inzpK (modz_small (valRep3 hh)). - apply bound_abs => /=; smt(W64.to_uint_cmp). - by rewrite /valRep3 (val_limbs64_mod2128 hh.[0] hh.[1] hh.[2]) /val_digits. -move => ?; have -> /= [? ->/=]: p <= valRep3 hh. - by rewrite lezNgt. -by rewrite -!xorwA !xorwK !xorw0 H1 !repres3E !inzpK. -qed. - -lemma freeze_spec_ll: islossless Mrep3.freeze. -proof. by proc; auto. qed. - -lemma freeze_spec hh: - phoare [ Mrep3.freeze : - hh = h /\ ubW64 4 h.[2] - ==> - valRep2 res = (asint (repres3 hh)) %% 2^128 ] = 1%r. -proof. by conseq freeze_spec_ll (freeze_spec_h hh). qed. - -lemma load2_spec mem kk: - phoare [ Mrep3.load2: - Glob.mem = mem /\ kk = p /\ good_ptr kk 16 - ==> - valRep2 res = W128.to_uint (loadW128 mem (to_uint kk)) ] = 1%r. -proof. -proc; wp; skip; progress. -by rewrite -load2u64 // to_uint2u64 to_uintD_small //; smt(). -qed. - -lemma store2_spec mem pp xx: - phoare [ Mrep3.store2: - Glob.mem = mem /\ pp = p /\ xx = x /\ good_ptr pp 16 - ==> - Glob.mem = storeW128 mem (to_uint pp) (W128.of_int (valRep2 xx))] = 1%r. -proof. -proc; wp; skip; progress. -rewrite to_uintD_small. - by rewrite of_uintK modz_small //; smt(StdOrder.IntOrder.ltr_le_trans). -rewrite -store2u64; congr. -apply W128.word_modeqP; congr. -rewrite to_uint2u64 of_uintK modz_small //. -apply bound_abs; split; first smt(W64.to_uint_cmp). -move=> *. -have /= ? := W64.to_uint_cmp x{hr}.[0]. -have /= ? := W64.to_uint_cmp x{hr}.[1]. -smt(). -qed. - -lemma add2_spec (hh ss: W64.t Array2.t): - phoare [ Mrep3.add2 : - hh = h /\ ss = s - ==> - valRep2 res = (valRep2 hh + valRep2 ss)%%2^128 ] = 1%r. -proof. -proc; wp; skip => ?[<- <-]*. -have E : (tpl0.`1, [tpl.`2; tpl0.`2]) = add_limbs64 [hh.[0]; hh.[1]] [ss.[0]; ss.[1]] false. - by rewrite /= !addcE /= /h0 //=. -have ->: valRep2 hh + valRep2 ss = valRep2 hh + valRep2 ss + b2i false by rewrite b2i0. -have := (add_limbs64P [hh.[0]; hh.[1]] [ss.[0]; ss.[1]] false). -rewrite -E /= => <-. -rewrite /val_digits /=. -rewrite -modzDmr modzMr /= modz_small. - apply bound_abs. - have /= ? := W64.to_uint_cmp tpl.`2. - have /= ? := W64.to_uint_cmp tpl0.`2. - smt(). -by rewrite /h0 /tpl. -qed. - diff --git a/proof/crypto_scalarmult/curve25519/Rep4Limb.ec b/proof/crypto_scalarmult/curve25519/Rep4Limb.ec deleted file mode 100644 index 20be310..0000000 --- a/proof/crypto_scalarmult/curve25519/Rep4Limb.ec +++ /dev/null @@ -1,733 +0,0 @@ -require import List Int IntDiv IntExtra CoreMap. -require import EClib Array2 Array3 Array4. -require import WArray16. -require import W64limbs. - -from Jasmin require import JModel. - -require import Zp. -require ZModP. -import Zp. - - -(* packed lemmas for SMT calls *) -lemma ubW64_lemmas: - (forall x, ubW64 W64.max_uint x) - && (forall b x n, ubW64 (n-1) x => ubW64 n (x+(W64.of_int (b2i b)))) - && (forall n1 n2 x, (n1 <= n2)%Int => ubW64 n1 x => ubW64 n2 x) - && (forall nx ny x y, ubW64 nx x => ubW64 ny y => ubW64 (nx+ny) (x+y)) - && (forall nx ny x y, ubW64 nx x => ubW64 ny y => ubW64 (nx*ny) (x*y)) - && (forall nx ny x y, ubW64 nx x => ubW64 ny y => ubW64 (nx*ny %/ W64.modulus) (mulhi x y)). -proof. -split; move=> *; first by apply ubW64T. -split; move=> *. - by apply (ubW64D (n-1) 1) => //; apply ubW641. -split; move=> *; first by smt(ubW64W). -split; move=> *; first by apply ubW64D. -split; move=> *; first by apply ubW64M. -by apply ubW64Mhi. -qed. - - -type Rep2 = W64.t Array2.t. - -type Rep3 = W64.t Array3.t. - -(* [Rep3r] is a redundant representation for "r". - r.[0], r.[1] - 128bit r - r.[2] - 5 * r.[1] %/ 4 -*) -type Rep3r = W64.t Array3.t. - -op Rep3r_ok (r: Rep3r) = - ubW64 1152921504606846975 r.[0] - && ubW64 1152921504606846972 r.[1] - && 4 %| to_uint r.[1] - && to_uint r.[2] = 5 * (to_uint r.[1] %/ 4) - && ubW64 1441151880758558715 r.[2]. - -op valRep2 (x: Rep2) = val_limbs64 (to_list x). - -lemma valRep2E x: - valRep2 x = to_uint x.[0] + 2^64 * to_uint x.[1]. -proof. by rewrite /valRep2 /= /to_list /mkseq /val_digits /=. qed. -hint simplify valRep2E. - -lemma valRep2_W8L y: - valRep2 (Array2.init (WArray16.get64 y)) - = val_digits 256 (map W8.to_uint (WArray16.to_list y)). -proof. -rewrite valRep2E /= !WArray16.get64E !to_uint_unpack8u8 /= /val_digits /=. -by ring. -qed. - -lemma valRep2_to_uint16u8 y: - valRep2 (Array2.init (WArray16.get64 y)) = - to_uint (W16u8.pack16_t (W16u8.Pack.init (WArray16."_.[_]" y))). -proof. -by rewrite valRep2_W8L to_uint_unpack16u8; congr; congr; congr. -qed. - -op repres2(r : Rep2) = inzp (valRep2 r) axiomatized by repres2E. - - -op valRep3 (x: Rep3) = val_limbs64 (to_list x). - -lemma valRep3E x: - valRep3 x = to_uint x.[0] + 2^64 * to_uint x.[1] + 2^128 * to_uint x.[2]. -proof. by rewrite /valRep3 /= /to_list /mkseq /val_digits /=; ring. qed. -hint simplify valRep3E. - -op repres3(r : Rep3) = inzp (valRep3 r) axiomatized by repres3E. - -op valRep3r (x: Rep3r) = val_limbs64 [x.[0]; x.[1]]. - -lemma valRep3rE x: - valRep3r x = to_uint x.[0] + 2^64 * to_uint x.[1]. -proof. by rewrite /valRep3r /= /to_list /mkseq /val_digits //=. qed. -hint simplify valRep3rE. - -op repres3r(r : Rep3r) = inzp (valRep3r r) axiomatized by repres3rE. - -lemma eqRep3 (x y:Rep3): - x=y <=> (x.[0]=y.[0]) && (x.[1]=y.[1]) && (x.[2]=y.[2]). -proof. by move => /> *; apply (Array3.ext_eq_all x y). qed. - -abbrev congpRep3 x xval = zpcgr (valRep3 x) xval. - - -lemma equiv_class3 x r: - congpRep3 r (valRep3 x) <=> repres3 x = repres3 r. -proof. -split. - move=> h; apply/Zp.Sub.val_inj/eq_sym. - by rewrite !repres3E !inzpK. -by rewrite !repres3E /congpRep3 -!inzpK => ->. -qed. - -lemma equiv_class3M r x y: - congpRep3 r (valRep3 x * valRep3 y) <=> - repres3 r = (repres3 x * repres3 y). -proof. -split. - rewrite !repres3E -inzpM => ?. - apply Zp.Sub.val_inj. - by rewrite !inzpK. -by rewrite !repres3E -!inzpK inzpM => ->. -qed. - -lemma equiv_class3D r x y: - congpRep3 r (valRep3 x + valRep3 y) <=> - repres3 r = (repres3 x + repres3 y). -proof. -split. - rewrite !repres3E -inzpD => ?. - apply Zp.Sub.val_inj. - by rewrite !inzpK. -by rewrite !repres3E -!inzpK inzpD => ->. -qed. - -lemma mul54_redp x: - inzp (2^128 * x) = inzp (5 * (x %/ 4) + 2^128 * (x%%4)). -proof. -have := divz_eq x (2^2); rewrite mulzC => {1}->. -rewrite (mulzDr W128.modulus) -mulzA /=. -by rewrite !inzpD inzp_over. -qed. - -lemma mul54_mul1_redp x x54 l: - 4 %| x => - x54 = 5 * (x %/ 4) => - inzp (2^128 * val_digits64 (mul1_digits x l)) = inzp (val_digits64 (mul1_digits x54 l)). -proof. -move => /dvdzP [x' ->]. -rewrite mulzK // => ->. -rewrite !mul1_digitsP (mulzC _ 4) -!mulzA /= mulzA. -by rewrite inzp_over; congr; ring. -qed. - -lemma add_digits64_redp x x54 l la l1 l2: - 4 %| x => - x54 = 5 * (x %/ 4) => - l1 = add_digits la (0::0::List.map (fun h => h * x) l) => - val_digits64 l2 = val_digits64 (add_digits la (map (fun h => h * x54) l)) => - inzp (val_digits64 l1) = inzp (val_digits64 l2). -proof. -move=> ?? -> ->. -rewrite !add_digitsP !inzpD -!mul1_digitsCE; congr. -rewrite !val_digits_cons /= -!mulzA /=. -by apply (mul54_mul1_redp x x54 l). -qed. - -(*****************************************) - -op mulmod3_pass0 (h: Rep3) (r:Rep3r) = - [ to_uint (h.[0] * r.[0]) + to_uint (h.[1] * r.[2]); - - to_uint (h.[0] * r.[1]) + to_uint (mulhi h.[0] r.[0]) + - to_uint (mulhi h.[1] r.[2]) + to_uint (h.[1] * r.[0]) + - to_uint (h.[2] * r.[2]); - - to_uint (mulhi h.[0] r.[1]) + to_uint (mulhi h.[1] r.[0]) + - to_uint (h.[2] * r.[0]) - ]. - -lemma mulmod3_pass0_ok (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - zpcgr (valRep3 h * valRep3r r) (val_digits64 (mulmod3_pass0 h r)). -proof. -rewrite /valRep3 /valRep3r -mul_limbs64P eq_inzp /to_list /mkseq => /> *. -apply (add_digits64_redp (to_uint r.[1]) (to_uint r.[2]) [ to_uint h.[1]; to_uint h.[2] ] - [ to_uint h.[0] * to_uint r.[0]; - to_uint h.[0] * to_uint r.[1] + to_uint h.[1] * to_uint r.[0]; - to_uint h.[2] * to_uint r.[0] ]) => //. - smt(). -rewrite /= /mulmod3_pass0 -!mulhiP !val_digits_cons; ring. -by rewrite (ubW64_mulhi0 6 1152921504606846975) // (ubW64_mulhi0 6 1441151880758558715). -qed. - -op mulmod3_pass1 (h: Rep3) (r:Rep3r) = - add_limbs64nc - ((h.[0]*r.[0]) :: add_limbs64nc [h.[1]*r.[0] ; h.[2]*r.[0] ] - [mulhi h.[0] r.[0]; mulhi h.[1] r.[0]]) - [h.[1]*r.[2]; h.[0]*r.[1]; mulhi h.[0] r.[1] ]. - -lemma mulmod3_pass1_ok (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - val_digits64 (mulmod3_pass0 h r) - = val_limbs64 (mulmod3_pass1 h r) + val_limbs64 [W64.zero; mulhi h.[1] r.[2] + h.[2]*r.[2]] - /\ ubW64 9223372036854775797 (nth_limbs64 (mulmod3_pass1 h r) 2). -proof. -rewrite /mulmod3_pass1 => /> *. -have /= ?:= ubW64M _ _ _ _ H H0. -have ?:= ubW64T h.[0]. -have ?:= ubW64T h.[1]. -have /= := ubW64Mhi _ _ _ _ H7 H0. -have -> ?: 21267647932558653946861247386169114625 %/ 18446744073709551616 - = 1152921504606846974 by smt (edivzP divz_small). -have /= := ubW64Mhi _ _ _ _ H6 H1. -have -> ?: 21267647932558653891521015165040459780 %/ 18446744073709551616 - = 1152921504606846971 by smt (edivzP divz_small). -have /= ?:= ubW64M _ _ _ _ H H4. -have /= := ubW64Mhi _ _ _ _ H7 H4. -have -> ?: 26584559915698317364401268956300574725 %/ 18446744073709551616 - = 1441151880758558714 by smt (edivzP divz_small). -have /= [->] := (add_limbs64ncP' (6917529027641081850+1152921504606846974+1) 1152921504606846971 - ((h.[0] * r.[0]) :: add_limbs64nc [h.[1] * r.[0]; h.[2] * r.[0]] - [mulhi h.[0] r.[0]; mulhi h.[1] r.[0]]) - [h.[1] * r.[2]; h.[0] * r.[1]; mulhi h.[0] r.[1]] _ _ _ _) -; rewrite /= ?size_add_limbs64nc /nth_limbs64 //=. - rewrite /add_limbs64nc /= !addcE /=. - smt(ubW64_lemmas). -move=> Hub; split; last by apply Hub. -rewrite /mulmod3_pass0 /= !val_digits_cons val_digits_nil /=. -rewrite (add_limbs64ncP 6917529027641081850 1152921504606846974) => //=. -rewrite !val_digits_cons bW64_to_uintD 1:bW64ub 1://. - smt(ubW64_lemmas). - rewrite bW64ub 1://; smt(ubW64_lemmas). -by ring. -qed. - -lemma mulmod3_pass1_spec (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - exists t0 t1 t2, mulmod3_pass1 h r = [t0; t1; t2] && - inzp (valRep3 h * valRep3r r) - = inzp (val_limbs64 [t0; t1; t2] - + val_limbs64 [W64.zero; mulhi h.[1] r.[2] + h.[2]*r.[2]] ) - /\ ubW64 9223372036854775797 (nth_limbs64 (mulmod3_pass1 h r) 2). -proof. -rewrite /= => *. -exists (nth_limbs64 (mulmod3_pass1 h r) 0) - (nth_limbs64 (mulmod3_pass1 h r) 1) - (nth_limbs64 (mulmod3_pass1 h r) 2); split. - by rewrite /mulmod3_pass1 /add_limbs64nc /nth_limbs64. -move => E. -have := mulmod3_pass0_ok _ _ H H0. -rewrite eq_inzp. -have /= [-> ?] := mulmod3_pass1_ok _ _ H H0. -by rewrite {1}E /=. -qed. - -op split_h2 (h2: W64.t) : W64.t * W64.t = - (h2 `&` W64.of_int 3, (h2 `&` W64.of_int 18446744073709551612) + (h2 `>>` W8.of_int 2)). - -lemma split_h2_spec h2: - ubW64 9223372036854775797 h2 => - exists x y, split_h2 h2 = (x,y) /\ - to_uint x = to_uint h2 %% 4 /\ - to_uint y = 5 * (to_uint h2 %/ 4) /\ - inzp (2^128 * to_uint h2) = inzp (2^128 * to_uint x) + inzp (to_uint y). -proof. -move=> Hub. -exists (splitAt 2 h2).`1 ((splitAt 2 h2).`2 + (h2 `>>` W8.of_int 2)); split. - rewrite /split_h2 /splitMask /=; congr; congr. - apply W64.word_modeqP; congr. - by rewrite of_uintK modz_small // to_uint_invw. -have := W64.splitAtP 2 h2 _; first by []. -rewrite /splitMask /=; move=> [<- E]; split; first by []. -have ? := ubW64Wand _ _ (invw ((of_int 3))%W64) Hub. -have ? := ubW64shr 2 _ _ _ Hub => //. -rewrite (ubW64D_to_uint _ _ _ _ _ H H0). - smt (edivzP divz_small). -split; first by rewrite E to_uint_shr // pow2_2; ring. -rewrite E to_uint_shr // (W64.to_uint_and_mod 2) // !pow2_2. -rewrite {1}(divz_eq (to_uint h2) 4) mulzDr !inzpD. -rewrite (mulzC _ 4) -mulzA /= inzp_over (mulzDl 4 1) inzpD /=. -by ring. -qed. - -lemma split_h2_repp h0 h1 h2: - ubW64 9223372036854775797 h2 => - inzp (val_limbs64 [h0; h1; h2]) - = inzp (val_limbs64 [h0; h1; (split_h2 h2).`1]) - + inzp (val_limbs64 [(split_h2 h2).`2]). -proof. -move=> *. -have [? ? [-> /= [?[??]]]] := split_h2_spec _ H. -by rewrite /val_digits /= !mulzDr -!mulzA /= !inzpD H2; ring. -qed. - -op mulmod3_pass2 (h: Rep3) (r:Rep3r) = - let t = mulmod3_pass1 h r in - let (h21, h22) = split_h2 (nth W64.zero t 2) in - add_limbs64nc [nth W64.zero t 0; nth W64.zero t 1; h21] - [h22; mulhi h.[1] r.[2] + h.[2]*r.[2]]. - -lemma mulmod3_pass3_ok (h:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - inzp (valRep3 h * valRep3r r) - = inzp (val_limbs64 (mulmod3_pass2 h r)) - /\ ubW64 4 (nth_limbs64 (mulmod3_pass2 h r) 2). -proof. -rewrite /mulmod3_pass2 => *. -have [t0 t1 t2 [-> [->]]] := mulmod3_pass1_spec _ _ H H0. -rewrite /nth_limbs64 /= => Ht2. -have [h21 h22 [-> /= [?[??]]]] /= := split_h2_spec _ Ht2. -have := add_limbs64ncP' 3 0 - [t0; t1; h21] [h22; mulhi h.[1] r.[2] + h.[2] * r.[2]] - _ _ _ _; rewrite /nth_limbs64 //=. - by rewrite /ubW64 H1 /=; smt(modz_cmp). -rewrite inzpD; move => [-> /= Hub]. -split; last by []. -by rewrite /val_digits /= !mulzDr -!mulzA /= !inzpD H3; ring. -qed. - -lemma mulmod3_spec (h hh:Rep3) (r:Rep3r): - ubW64 6 h.[2] => - Rep3r_ok r => - [hh.[0]; hh.[1]; hh.[2]] = mulmod3_pass2 h r => - (repres3 h * repres3r r) = repres3 hh - /\ ubW64 4 hh.[2]. -proof. -move=> *. -rewrite !repres3E repres3rE -inzpM. -have := (mulmod3_pass3_ok _ _ H H0). -rewrite -!H1 /nth_limbs64 /=; move => [-> ->] /=; congr. -by rewrite /val_digits /=; ring. -qed. - -(****************************************************************************************** - - 3-limb implementations - - ******************************************************************************************) - -(* 3-limb implementations are taken directly from the jasmin-extracted code *) -require Poly1305_savx2. - -module Mrep3 = { - proc clamp = Poly1305_savx2.M.clamp - proc load_add = Poly1305_savx2.M.load_add - proc load_last_add = Poly1305_savx2.M.load_last_add - proc mulmod = Poly1305_savx2.M.mulmod - proc freeze = Poly1305_savx2.M.freeze - proc load2 = Poly1305_savx2.M.load2 - proc store2 = Poly1305_savx2.M.store2 - proc add2 = Poly1305_savx2.M.add2 - proc setup = Poly1305_savx2.M.poly1305_ref3_setup - proc update = Poly1305_savx2.M.poly1305_ref3_update - proc finish = Poly1305_savx2.M.poly1305_ref3_last - proc poly1305 = Poly1305_savx2.M.poly1305_ref3_local -}. - -(**************************** Rep3 specs *******************************) - -require import Poly1305_Spec. - -lemma clamp_spec mem kk: - phoare [ Mrep3.clamp: - Glob.mem = mem /\ kk = k /\ good_ptr kk 16 - ==> - Rep3r_ok res /\ repres3r res = load_clamp mem kk ] = 1%r. -proof. -proc; wp; skip => ? /> *. -pose r0 := loadW64 Glob.mem{hr} (to_uint k{hr}) `&` W64.of_int 1152921487695413247. -pose r1 := loadW64 Glob.mem{hr} (to_uint (k{hr} + W64.of_int 8)) `&` W64.of_int 1152921487695413244. -have ?: ubW64 1152921504606846975 r0. - apply (ubW64W 1152921487695413247) => //. - have {1}->: 1152921487695413247 = 1152921487695413247 %% W64.modulus by smt(modz_small). - rewrite /ubW64 /r0 -W64.of_uintK andwC W64.to_uintK. - by apply W64.to_uint_ule_andw. -have ?: ubW64 1152921504606846972 r1. - apply (ubW64W 1152921487695413244) => //. - have {1}->: 1152921487695413244 = 1152921487695413244 %% W64.modulus by smt(modz_small). - rewrite /ubW64 /r1 -W64.of_uintK andwC W64.to_uintK. - by apply W64.to_uint_ule_andw. -split; progress; rewrite /r0 /r1 //=. -+ rewrite dvdzE /r1 -(W64.to_uint_and_mod 2) // -andwA. - have ->: (of_int 1152921487695413244)%W64 `&` (masklsb 2)%W64 = W64.zerow. - apply W64.word_modeqP; congr. - by rewrite to_uint0 W64.to_uint_and_mod //. - by rewrite andw0. -+ rewrite to_uintD. - by have ?:= ubW64shr 2 1152921504606846972 _ _ H1 => //; smt. - smt. -+ rewrite repres3rE /load_clamp /=; congr. - rewrite -load2u64 /= -(of_int2u64 1152921487695413247 1152921487695413244) // andb2u64E /=. - rewrite to_uintD_small of_uintK modz_small //; first smt. - by rewrite to_uint2u64; ring. -qed. - -lemma load_add_spec mem hh inp: - phoare [ Mrep3.load_add: - Glob.mem = mem /\ hh = h /\ inp = in_0 /\ ubW64 4 hh.[2] /\ good_ptr inp 16 - ==> - ubW64 6 res.[2] /\ - repres3 res = repres3 hh + (load_block mem inp) ] = 1%r. -proof. -proc; wp; skip =>?[<-[<-[<-[? Hptr]]]] *. -have E: [h2.[0]; h2.[1]; h2.[2]] - = add_limbs64nc [hh.[0]; hh.[1]; hh.[2]] - [loadW64 Glob.mem{hr} (to_uint (inp + W64.zero)); - loadW64 Glob.mem{hr} (to_uint (inp + (of_int 8)%W64)); - W64.one]. - by rewrite /add_limbs64nc /h2 /tpl1 /h1 /tpl0 /h0 /tpl //=; clear h2 tpl1 h1 tpl0 h0 tpl. -have := add_limbs64ncP' 4 1 [hh.[0]; hh.[1]; hh.[2]] - [loadW64 Glob.mem{hr} (to_uint (inp + W64.zero)); - loadW64 Glob.mem{hr} (to_uint (inp + (of_int 8)%W64)); W64.one] _ _ _ _; rewrite /nth_limbs64 //=. -rewrite -E; move => {E} [E ?]; split; first by []. -rewrite !repres3E {1}/valRep3 valRep3E /= E /load_block /load_lblock /= inzpD; congr. - by congr; rewrite /val_digits /= !mulzDr; ring. -rewrite -(W16u8.Pack.init_ext (fun i => Glob.mem{hr}.[to_uint inp + i])) 1:/#. -congr. -rewrite to_uintD_small of_uintK modz_small //; first smt. -rewrite !load8u8' /val_digits /mkseq /=. -rewrite !(to_uint_unpack8u8 (W8u8.pack8 _)) /=. -rewrite (to_uint_unpack16u8 (W16u8.pack16_t _)) /=. -rewrite /val_digits /=; ring. -qed. - -op load_lblock' (mem : global_mem_t) (l ptr : W64.t) = - Zp.inzp (W128.to_uint (pack16_t (W16u8.Pack.init - (fun i => if i = W64.to_uint l then W8.one - else if i < W64.to_uint l - then mem.[to_uint ptr + i] - else W8.zero)))). - -lemma load_lblock_alt mem l ptr: - W64.to_uint l < 16 => - load_lblock mem l ptr = load_lblock' mem l ptr. -proof. -rewrite /load_lblock /load_lblock' /= => *; congr. -have ? : 0 <= to_uint l < 16 by smt(W64.to_uint_cmp). -rewrite -pow_mul //=; first smt(). -have : to_uint l \in iota_ 0 16 by rewrite mem_iota; smt(). -move: (to_uint l); apply/List.allP => /=. -rewrite !to_uint_unpack16u8 /val_digits /=; smt(). -qed. - -lemma load_last_add_spec_h mem hh inp inlen: - hoare [ Mrep3.load_last_add: - Glob.mem = mem /\ h = hh /\ in_0 = inp /\ len = inlen /\ ubW64 4 hh.[2] /\ - to_uint inlen < 16 /\ good_ptr in_0 (to_uint inlen) - ==> - ubW64 6 res.[2] /\ - repres3 res = repres3 hh + (load_lblock mem inlen inp) ]. -proof. -proc => /=. -seq 6: (#pre /\ repres2 s = load_lblock mem inlen inp). -wp; while (j \ule len /\ in_0 = inp /\ len = inlen /\ to_uint inlen < 16 /\ - good_ptr in_0 (to_uint len) /\ - s = Array2.init (WArray16.get64 (WArray16.of_list - (mkseq (fun i=> Glob.mem.[(to_uint in_0) + i]) (W64.to_uint j))))). -+ wp; skip; progress. - move: {H} H2; rewrite ultE uleE => *. - by rewrite to_uintD_small /=; move: (W64.to_uint_cmp len{hr}); smt(). - move: {H} H0 H1 H2; rewrite ultE => Hlen_bnd Hptr Hj_bnd. - move: (W64.to_uint_cmp in_0{hr}) (W64.to_uint_cmp j{hr}) => [Hin0 Hin1] [Hj0 X] {X}. - have Hj1: to_uint in_0{hr} + to_uint j{hr} < W64.modulus by smt(StdOrder.IntOrder.ltr_le_trans). - rewrite to_uintD_small //=. - rewrite to_uintD_small; first by rewrite of_uintK modz_small; smt(). - congr; congr; rewrite /WArray16.set8 /= /loadW8 /=. - rewrite WArray16.of_listE /= WArray16.of_listE WArray16.setE. - pose X := WArray16.init64 _. - apply WArray16.init_ext => i [Hi0 Hi1] /=. - have HX: WArray16."_.[_]" X i - = if i < to_uint j{hr} then Glob.mem{hr}.[to_uint (in_0{hr})+i] else W8.zero. - rewrite /X; clear X. - rewrite -(WArray16.init_ext (fun i => if i < to_uint j{hr} - then Glob.mem{hr}.[to_uint in_0{hr} + i] - else W8.zero)). - by move=> k [Hk0 Hk1] /=; rewrite nth_mkseq_if Hk0. - by rewrite WArray16_init64K WArray16.initE Hi0 Hi1. - rewrite /mkseq iota_add //. - rewrite iota1 /= map_cat nth_cat /= size_map size_iota max_ler //. - case: (i < to_uint j{hr}) => ?. - have ->/=: !i = to_uint j{hr}; first smt(). - rewrite nth_mkseq_if H Hi0 /=. - by rewrite HX H. - case: (i = to_uint j{hr}) => //= ?. - have ->/= : !i - to_uint j{hr} = 0 by smt(). - rewrite HX. - by have ->: !i < to_uint j{hr} by smt(). -+ wp; skip; progress. - by rewrite uleE /=; smt(W64.to_uint_cmp). - rewrite mkseq0 WArray16.of_listE /=. - rewrite -Array2.ext_eq_all /all_eq /=. - by split; - rewrite WArray16.get64E /=; - apply W64.word_modeqP; congr; - rewrite pack8u8_init_mkseq /mkseq /= (to_uint_unpack8u8 (W8u8.pack8_t _)) /=. -+ move: {H H4 H5} H0 H1. - have ->: len{hr} = j0 by move: H2 H3; rewrite uleE ultE -lezNgt; smt(W64.word_modeqP). - move: (W64.to_uint_cmp in_0{hr}) (W64.to_uint_cmp j{hr}) => [Hin0 Hin1] [Hj0 X] {X} Hj1 Hptr. - rewrite load_lblock_alt /load_lblock' //=. - rewrite repres2E valRep2_to_uint16u8; congr; congr; congr. - apply W16u8.Pack.init_ext => i [Hi0 Hi1]. - rewrite /WArray16.set8. - have ->: WArray16.of_list (mkseq (fun (i : int) => - Glob.mem{hr}.[to_uint in_0{hr} + i]) (to_uint j0)) - = WArray16.init (fun (i : int) => if i < to_uint j0 - then Glob.mem{hr}.[to_uint in_0{hr} + i] - else W8.zero). - rewrite WArray16.of_listE; apply WArray16.init_ext => k [Hk0 Hk1] /=. - by rewrite nth_mkseq_if Hk0. - rewrite WArray16_init64K WArray16.setE. - by do rewrite WArray16.initE /= Hi0 Hi1 /=. -wp; skip => ? [[?[?[?[?[?[??]]]]]]] *. -have E: [h2.[0]; h2.[1]; h2.[2]] - = add_limbs64nc [hh.[0]; hh.[1]; hh.[2]] - [s{hr}.[0]; s{hr}.[1]]. - rewrite /add_limbs64nc -!H0 /h2 /tpl1 /h1 /tpl0 /h0 /tpl //=; progress. - by rewrite addcE /=. -have /= := add_limbs64ncP' 4 0 [hh.[0]; hh.[1]; hh.[2]] - [s{hr}.[0]; s{hr}.[1]]. -rewrite/ nth_limbs64 /= ubW640 H3 /= -E => [[?/=?]]; split. - by apply (ubW64W 5). -rewrite -H6 !repres3E {1}/valRep3 valRep3E repres2E /= H7 /= inzpD /val_digits; congr. -by congr; simplify; ring. -qed. - -lemma load_last_add_spec_ll: islossless Mrep3.load_last_add. -proof. -proc; wp. -while (j \ule len) (to_uint (len-j)). - move=> *; wp; skip; progress. - move: H H0; rewrite uleE ultE; smt. - smt. -wp; skip; progress. - rewrite uleE; smt(W64.to_uint_cmp). -move: H; rewrite uleE ultE; smt. -qed. - -lemma load_last_add_spec mem hh inp inlen: - phoare [ Mrep3.load_last_add: - Glob.mem = mem /\ h = hh /\ in_0 = inp /\ len = inlen /\ ubW64 4 hh.[2] /\ - to_uint inlen < 16 /\ good_ptr in_0 (to_uint inlen) - ==> - ubW64 6 res.[2] /\ - repres3 res = repres3 hh + (load_lblock mem inlen inp) ] = 1%r. -proof. by conseq load_last_add_spec_ll (load_last_add_spec_h mem hh inp inlen). qed. - -lemma mulmod_spec_h (hh:Rep3) (rr:Rep3r): - hoare [ Mrep3.mulmod: - ubW64 6 hh.[2] /\ Rep3r_ok rr /\ - hh = h /\ rr = r - ==> - ubW64 4 res.[2] /\ - repres3 res = (repres3 hh * repres3r rr) ]. -proof. -proc. -seq 13: (ubW64 6 hh.[2] /\ Rep3r_ok rr /\ rr = r /\ - t2 = hh.[2]*rr.[2] /\ h.[0] = hh.[0] /\ h.[1] = hh.[1] /\ - t0 = hh.[0]*rr.[0] /\ - [t1; h.[2]] = add_limbs64nc [hh.[1] * rr.[0]; hh.[2] * rr.[0]] - [mulhi hh.[0] rr.[0]; mulhi hh.[1] rr.[0]]) => //. - wp; skip; progress. - by rewrite mulrC. - by rewrite muluE /= mulrC. - rewrite !muluE /= !addcE /=; congr. - by rewrite /mulhi mulzC; ring. - rewrite !muluE /= !addcE /=; congr; congr. - by rewrite /mulhi mulzC. - by congr; rewrite /carry_add mulrC /#. -seq 12: (ubW64 6 hh.[2] /\ Rep3r_ok rr /\ rr = r /\ - h.[1] = mulhi hh.[1] r.[2] + hh.[2]*r.[2] /\ - [t0; t1; h.[2]] = mulmod3_pass1 hh rr) => //. - wp; skip; progress; rewrite !muluE /=. - by congr; rewrite /mulhi mulzC H2. - by rewrite H2 !addcE /=; ring. - rewrite -H3 /= !H1 !H2; progress. - by rewrite mulrC (W64.WRingA.mulrC r{hr}.[2] hh.[1]). - rewrite {1}mulrC -!H1 !addcE //=; congr; congr. - by rewrite /mulhi mulzC. - by rewrite (W64.WRingA.mulrC r{hr}.[2] hh.[1]). -(* split_h2 *) -exists* (h.[2]); elim* => h2. -seq 6: (ubW64 6 hh.[2] /\ Rep3r_ok rr /\ rr = r /\ - h.[1] = mulhi hh.[1] r.[2] + hh.[2]*r.[2] /\ - split_h2(h2) = (h.[2], h.[0]) /\ - [t0; t1; h2] = mulmod3_pass1 hh rr) => //. - by wp; skip; progress; rewrite andwC; congr. -(* final chain *) -wp; skip => ? H *; move: H => |> *. -have E: [h1.[0]; h1.[1]; h1.[2]] = mulmod3_pass2 hh r{hr}. - rewrite /mulmod3_pass2 /h1 /tpl1 /h0 /tpl0 /h /tpl; clear h1 tpl1 h0 tpl0 h tpl. - rewrite /= -H3 /= H2 /= -H1 /add_limbs64nc /=; progress. - by rewrite !addcE /=; ring. - by rewrite !addcE /= carry_addC; ring. - by rewrite !addcE /= carry_addC (W64.carry_addC t0{hr}). -by have [-> ->] := mulmod3_spec _ _ _ H H0 E. -qed. - -lemma mulmod_spec_ll : islossless Mrep3.mulmod. -proof. by proc; auto. qed. - -lemma mulmod_spec (hh: W64.t Array3.t) rr : - phoare [ Mrep3.mulmod: - ubW64 6 hh.[2] /\ Rep3r_ok rr /\ - hh = h /\ rr = r - ==> - ubW64 4 res.[2] /\ - repres3 res = (repres3 hh * repres3r rr) ] = 1%r. -proof. by conseq mulmod_spec_ll (mulmod_spec_h hh rr). qed. - -lemma freeze_spec_h hh: - hoare [ Mrep3.freeze : - hh = h /\ ubW64 4 h.[2] - ==> - valRep2 res = (asint (repres3 hh)) %% 2^128 ]. -proof. -have X: ubW64 4 hh.[2] => valRep3 hh < 2*p. - rewrite /ubW64 /= pE. - have ??:= W64.to_uint_cmp hh.[0]. - have ?:= W64.to_uint_cmp hh.[1]. - smt. -proc. -seq 11: (#pre /\ val_limbs64 [g.[0]; g.[1]; g2] = valRep3 hh + 5) => //. - wp; skip => ?[<-?]*. - split; first by []. - have ->: 5 = val_limbs64 [W64.of_int 5] by smt(). - have ->: [g3.[0]; g3.[1]; (addc hh.[2] W64.zero tpl0.`1).`2] - = add_limbs64nc [hh.[0]; hh.[1]; hh.[2]] [W64.of_int 5]. - by rewrite /add_limbs64nc /g3 /tpl0 /g1 /tpl /g0 /= !addcE. - have ->:= add_limbs64ncP 4 0 [hh.[0]; hh.[1]; hh.[2]] [W64.of_int 5] _ _ _ _; - by rewrite /nth_limbs64 /val_digits //=; ring. -seq 2: (#[/1:3]pre /\ - (p <= valRep3 hh => val_limbs64 [g.[0]; g.[1]] = (asint (repres3 hh)) %% W128.modulus) /\ - mask = if valRep3 hh < p then W64.zerow else W64.onew) => //. - wp; skip => ?[[<-?]E]*. - split; first by done. - split. - move: (X H) => ??. - rewrite repres3E inzpK zp_over_lt2p_red. - by split => // ? //. - rewrite -E modz_mod_pow2 /min /=. - by rewrite (val_limbs64_mod2128 g{hr}.[0] g{hr}.[1] g2{hr}). - have : to_uint (g2{hr}) %/ 4 = if valRep3 hh < p then 0 else 1. - rewrite -(ltz_add2r 5) pE. - have ->: 2^130 - 5 + 5 = 2^130 by done. - case: (valRep3 hh + 5 < 2 ^ 130). - rewrite -E -(val_limbs64_div2130 g{hr}.[0] g{hr}.[1]) /= => ?. - apply divz_eq0 => //; split => //. - by rewrite /val_digits /=; smt(W64.to_uint_cmp). - move: E; pose x:=val_limbs64 [g{hr}.[0]; g{hr}.[1]; g2{hr}]; move => E. - move: (X H); rewrite -(ltz_add2r 5) pE -E. - rewrite -(val_limbs64_div2130 g{hr}.[0] g{hr}.[1]) -/x. - smt. - case: (valRep3 hh < p) => ??. - have ->: g2{hr} `>>` (of_int 2)%W8 = W64.zerow. - apply W64.word_modeqP; rewrite to_uint_shr //. - rewrite (W8.of_uintK 2) !pow2_2. - by rewrite (modz_small 2 W8.modulus) // H1 to_uint0. - by rewrite /W64.zerow; ring. - have ->: g2{hr} `>>` (of_int 2)%W8 = W64.one. - apply W64.word_modeqP; rewrite to_uint_shr //. - rewrite (W8.of_uintK 2) !pow2_2. - by rewrite (modz_small 2 W8.modulus) // H1 to_uint1. - by rewrite minus_one. -(* *) -wp; skip => ?[[<-?]]. -case: (valRep3 hh < p). - move => |> *. - rewrite !andw0 !xor0w repres3E inzpK (modz_small (valRep3 hh)). - apply bound_abs => /=; smt. - by rewrite /valRep3 (val_limbs64_mod2128 hh.[0] hh.[1] hh.[2]) /val_digits. -move => ?; have -> /= [? ->/=]: p <= valRep3 hh by smt. -by rewrite -!xorwA !xorwK !xorw0 H1 !repres3E !inzpK. -qed. - -lemma freeze_spec_ll: islossless Mrep3.freeze. -proof. by proc; auto. qed. - -lemma freeze_spec hh: - phoare [ Mrep3.freeze : - hh = h /\ ubW64 4 h.[2] - ==> - valRep2 res = (asint (repres3 hh)) %% 2^128 ] = 1%r. -proof. by conseq freeze_spec_ll (freeze_spec_h hh). qed. - -lemma load2_spec mem kk: - phoare [ Mrep3.load2: - Glob.mem = mem /\ kk = p /\ good_ptr kk 16 - ==> - valRep2 res = W128.to_uint (loadW128 mem (to_uint kk)) ] = 1%r. -proof. -proc; wp; skip; progress. -by rewrite -load2u64 // to_uint2u64 to_uintD_small //; smt(). -qed. - -lemma store2_spec mem pp xx: - phoare [ Mrep3.store2: - Glob.mem = mem /\ pp = p /\ xx = x /\ good_ptr pp 16 - ==> - Glob.mem = storeW128 mem (to_uint pp) (W128.of_int (valRep2 xx))] = 1%r. -proof. -proc; wp; skip; progress. -rewrite to_uintD_small. - by rewrite of_uintK modz_small //; smt(StdOrder.IntOrder.ltr_le_trans). -rewrite -store2u64; congr. -apply W128.word_modeqP; congr. -rewrite to_uint2u64 of_uintK modz_small //. -apply bound_abs; split; first smt(W64.to_uint_cmp). -move=> *. -have ?:= W64.to_uint_cmp x{hr}.[0]. -have ?:= W64.to_uint_cmp x{hr}.[1]. -smt. -qed. - -lemma add2_spec (hh ss: W64.t Array2.t): - phoare [ Mrep3.add2 : - hh = h /\ ss = s - ==> - valRep2 res = (valRep2 hh + valRep2 ss)%%2^128 ] = 1%r. -proof. -proc; wp; skip => ?[<- <-]*. -have E : (tpl0.`1, [tpl.`2; tpl0.`2]) = add_limbs64 [hh.[0]; hh.[1]] [ss.[0]; ss.[1]] false. - by rewrite /= !addcE /= /h0 //=. -have ->: valRep2 hh + valRep2 ss = valRep2 hh + valRep2 ss + b2i false by rewrite b2i0. -have := (add_limbs64P [hh.[0]; hh.[1]] [ss.[0]; ss.[1]] false). -rewrite -E /= => <-. -rewrite /val_digits /=. -rewrite -modzDmr modzMr /= modz_small. - apply bound_abs. - have ? := W64.to_uint_cmp tpl.`2. - have ? := W64.to_uint_cmp tpl0.`2. - smt. -by rewrite /h0 /tpl. -qed. - diff --git a/proof/crypto_scalarmult/curve25519/Zp.ec b/proof/crypto_scalarmult/curve25519/Zp.ec deleted file mode 100644 index 2800943..0000000 --- a/proof/crypto_scalarmult/curve25519/Zp.ec +++ /dev/null @@ -1,71 +0,0 @@ -require import List Int IntDiv IntExtra CoreMap. -require import EClib. - -from Jasmin require import JModel. - -(* modular operations modulo P *) -op p = 2^255 - 19 axiomatized by pE. - -lemma two_pow255E: 2^255 = 57896044618658097711785492504343953926634992332820282019728792003956564819968 by done. - -(* Embedding into ring theory *) - -require ZModP. - -clone import ZModP as Zp with - op p <- p - rename "zmod" as "zp" - proof le2_p by rewrite pE. - - -(* congruence "mod p" *) - -lemma zpcgr_over a b: - zpcgr (a + 57896044618658097711785492504343953926634992332820282019728792003956564819968 * b) (a + 19 * b). -proof. -have /= ->: (2^ 255) = 19 + p by rewrite pE. -by rewrite (mulzC _ b) mulzDr addzA modzMDr mulzC. -qed. - -lemma inzp_over x: - inzp (57896044618658097711785492504343953926634992332820282019728792003956564819968 * x) = inzp (19*x). -proof. by have /= := zpcgr_over 0 x; rewrite -eq_inzp. qed. - -lemma zp_over_lt2p_red x: - p <= x < 2*p => - x %% p = (x + 19) %% 2^255. -proof. -move=> *. -rewrite modz_minus //. -have ->: x-p = x+19-2^255. - by rewrite pE; ring. -rewrite modz_minus. - by move: H; rewrite !pE /= /#. -done. -qed. - -require import W64limbs. - -op inzp_limbs base l = inzp (val_limbs base l). - -lemma val_limbs64_div2255 x0 x1 x2 x3: - val_limbs64 [x0; x1; x2; x3] %/ 2^255 = to_uint x3 %/ 9223372036854775808. -proof. -rewrite /val_digits /=. -have := (divz_eq (to_uint x3) 9223372036854775808). -rewrite addzC mulzC => {1}->. -rewrite !mulzDr -!mulzA /=. -have /= ? := W64.to_uint_cmp x0. -have /= ? := W64.to_uint_cmp x1. -have /= ? := W64.to_uint_cmp x2. -have /= ? := W64.to_uint_cmp x3. -have ? : 0 <= to_uint x3 %% 9223372036854775808 < 9223372036854775808 by smt(W64.to_uint_cmp modz_cmp). -rewrite !addzA (mulzC 57896044618658097711785492504343953926634992332820282019728792003956564819968) divzMDr //. -have ->: (to_uint x0 + 18446744073709551616 * to_uint x1 + - 340282366920938463463374607431768211456 * to_uint x2 + - 6277101735386680763835789423207666416102355444464034512896 * (to_uint x3 %% 9223372036854775808)) %/ - 57896044618658097711785492504343953926634992332820282019728792003956564819968 = 0. - by rewrite -divz_eq0 /#. -by ring. -qed. - diff --git a/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Operations.ec b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Operations.ec new file mode 100644 index 0000000..1c64f76 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Operations.ec @@ -0,0 +1,558 @@ +require import Bool List Int IntDiv CoreMap Real Ring StdOrder Zp_25519 Zp_limbs EClib. +from Jasmin require import JModel. +require import Curve25519_Spec. +import Zp StdOrder.IntOrder Ring.IntID Array4 EClib. + +(* sets last bit to 0 *) +op last_bit_to_zero64 (x: Rep4) : Rep4 = let x = x.[3 <- x.[3].[63 <- false]] in x. + +(* returns the first 2 elements of the input triple *) +op select_double_from_triple (t : ('a * 'a) * ('a * 'a) * 'c) : ('a * 'a) * ('a * 'a) = (t.`1, t.`2). + +(* if the third element is true then the first 2 elements are swapped *) +(* - this op returns the first 2 elements in the correct order *) +op reconstruct_triple (t : ('a * 'a) * ('a * 'a) * bool) : ('a * 'a) * ('a * 'a) = + if t.`3 + then spec_swap_tuple (select_double_from_triple t) + else select_double_from_triple t. + +(* given that t.`3 is false, we can convert from a "list" with two elems + to a ""list"" with three elements, whereby the last element is false *) +lemma eq_reconstruct_select_triple (t : (('a * 'a) * ('a * 'a) * bool)) : + t.`3 = false => + select_double_from_triple t = reconstruct_triple t. +proof. + rewrite /reconstruct_triple /select_double_from_triple. + by move => ? /#. +qed. + + +(** step1: add_and_double = add_and_double1 : reordered to match implementation **) +op op_add_and_double (qx : zp, nqs : (zp * zp) * (zp * zp)) : (zp * zp) * (zp * zp) = + let x1 = qx in + let (x2, z2) = nqs.`1 in + let (x3, z3) = nqs.`2 in + let t0 = x2 + (- z2) in + let x2 = x2 + z2 in + let t1 = x3 + (- z3) in + let z2 = x3 + z3 in + let z3 = x2 * t1 in + let z2 = z2 * t0 in + let t2 = x2 * x2 in + let t1 = t0 * t0 in + let x3 = z3 + z2 in + let z2 = z3 + (- z2) in + let x2 = t2 * t1 in + let t0 = t2 + (- t1) in + let z2 = z2 * z2 in + let z3 = t0 * (inzp 121665) in + let x3 = x3 * x3 in + let t2 = t2 + z3 in + let z3 = x1 * z2 in + let z2 = t0 * t2 + in ((x2,z2), (x3,z3)). + +(* lemma: spec_add_and_double = op_add_and_double *) +lemma eq_op_add_and_double (qx : zp, nqs : (zp * zp) * (zp * zp)) : + spec_add_and_double qx nqs = op_add_and_double qx nqs. +proof. + rewrite /spec_add_and_double /op_add_and_double. + auto => />. smt(). +qed. + +(** step 2: montgomery ladder and isolate foldl function and introduce reconstruct tuple **) +op op_montgomery_ladder1 (init : zp, k : W256.t) : (zp * zp) * (zp * zp) = + let nqs0 = ((Zp.one,Zp.zero),(init,Zp.one)) in + foldl (fun (nqs : (zp * zp) * (zp * zp)) ctr => + if spec_ith_bit k ctr + then spec_swap_tuple (op_add_and_double init (spec_swap_tuple(nqs))) + else op_add_and_double init nqs) nqs0 (rev (iota_ 0 255)). + + +op op_montgomery_ladder2_step(k : W256.t, init : zp, nqs : (zp * zp) * (zp * zp), ctr : int) : (zp * zp) * (zp * zp)= + if spec_ith_bit k ctr + then spec_swap_tuple(op_add_and_double init (spec_swap_tuple(nqs))) + else op_add_and_double init nqs. + +op op_montgomery_ladder2(init : zp, k : W256.t) : (zp * zp) * (zp * zp) = + let nqs0 = reconstruct_triple ((Zp.one,Zp.zero),(init,Zp.one),false) in + foldl (op_montgomery_ladder2_step k init) nqs0 (rev (iota_ 0 255)). + +(* lemma: spec_montgomery_ladder = op_montgomery_ladder1 *) +lemma eq_op_montgomery_ladder1 (init : zp) (k : W256.t) : + spec_montgomery_ladder init k = op_montgomery_ladder1 init k. + proof. + rewrite /spec_montgomery_ladder /op_montgomery_ladder1 /=. + apply foldl_in_eq. + move => nqs ctr inlist => /=. + case (spec_ith_bit k ctr). + by move => ?; rewrite /spec_swap_tuple /#. + by move => ?; rewrite /spec_swap_tuple /#. + qed. + +(* lemma: op_montgomery_ladder1 = op_montgomery_ladder2 *) +lemma eq_op_montgomery_ladder2 (init : zp) (k : W256.t) : + op_montgomery_ladder1 init k = op_montgomery_ladder2 init k. +proof. + rewrite /op_montgomery_ladder1 /op_montgomery_ladder2 /reconstruct_triple /select_double_from_triple. + rewrite /op_montgomery_ladder2_step. + by simplify. +qed. + + +(** step 3: extend the state to contain an additional bit stating if the state is swapped * *) + +op cswap (t : ('a * 'a) * ('a * 'a), b : bool) : ('a * 'a) * ('a * 'a) = + if b + then spec_swap_tuple t + else t. + +op op_montgomery_ladder3_step(k : W256.t, init : zp, nqs : (zp * zp) * (zp * zp) * bool, ctr : int) : (zp * zp) * (zp * zp) * bool = + let nqs = cswap (select_double_from_triple nqs) (nqs.`3 ^^ (spec_ith_bit k ctr)) in + let nqs = op_add_and_double init nqs in + (nqs.`1, nqs.`2, (spec_ith_bit k ctr)). + +op op_montgomery_ladder3(init : zp, k : W256.t) : (zp * zp) * (zp * zp) * bool = + let nqs0 = ((Zp.one, Zp.zero),(init, Zp.one),false) in + foldl (op_montgomery_ladder3_step k init) nqs0 (rev (iota_ 0 255)). + +(** lemma: op_montgomery_ladder2 = op_reconstruct_triple(op_montgomery_ladder3 ) **) +lemma eq_op_montgomery_ladder3_reconstruct (init : zp) (k: W256.t) : + op_montgomery_ladder2 init k = reconstruct_triple (op_montgomery_ladder3 init k). +proof. + rewrite /op_montgomery_ladder2 /op_montgomery_ladder3 //=. + apply foldl_in_eq_r. + move => ? ? ?. + rewrite /reconstruct_triple /op_montgomery_ladder2_step /op_montgomery_ladder3_step. + rewrite /spec_swap_tuple /select_double_from_triple /cswap. + simplify => /#. +qed. + +(* lemma: if the first bit of k is 0, which will be because of spec_decode_scalar_25519, *) +(* then op_montgomery_ladder2 = select_double_from_triple op_montgomery_ladder3 *) +lemma eq_op_montgomery_ladder3 (init : zp, k: W256.t) : + k.[0] = false => + op_montgomery_ladder2 init k = select_double_from_triple (op_montgomery_ladder3 init k). +proof. + move => hkf. + have tbf : (op_montgomery_ladder3 init k).`3 = false. (*third bit false *) + rewrite /op_montgomery_ladder3 /op_montgomery_ladder3_step /select_double_from_triple /cswap /spec_ith_bit /spec_swap_tuple => />. + rewrite foldl_rev; auto => />; rewrite -iotaredE => />. + have seqr : select_double_from_triple (op_montgomery_ladder3 init k) = reconstruct_triple (op_montgomery_ladder3 init k). + by apply /eq_reconstruct_select_triple /tbf. + rewrite seqr. + by apply eq_op_montgomery_ladder3_reconstruct. +qed. + +(** step 4: spec_montgomery_ladder = select_double_from_triple op_montgomery_ladder3 * *) +lemma eq_op_montgomery_ladder123 (init : zp, k: W256.t) : + k.[0] = false => spec_montgomery_ladder init k = select_double_from_triple (op_montgomery_ladder3 init k). +proof. + move => H. + rewrite eq_op_montgomery_ladder1 eq_op_montgomery_ladder2 eq_op_montgomery_ladder3. apply H. trivial. +qed. + +(** step 5: introduce reordering in encode point **) +(* - we split invert in 3 parts to make the proof faster *) +op op_invert_p_p1 (z1 : zp) : (zp * zp) = + let z2 = ZModpRing.exp z1 2 in + let z8 = ZModpRing.exp z2 (2*2) in + let z9 = z1 * z8 in + let z11 = z2 * z9 in + let z22 = ZModpRing.exp z11 2 in + let z_5_0 = z9 * z22 in + (z_5_0, z11). + +op op_invert_p_p2(z_5_0 : zp) : zp = + let z_10_5 = ZModpRing.exp z_5_0 (2^5) in + let z_10_0 = z_10_5 * z_5_0 in + let z_20_10 = ZModpRing.exp z_10_0 (2^10) in + let z_20_0 = z_20_10 * z_10_0 in + let z_40_20 = ZModpRing.exp z_20_0 (2^20) in + let z_40_0 = z_40_20 * z_20_0 in + let z_50_10 = ZModpRing.exp z_40_0 (2^10) in + let z_50_0 = z_50_10 * z_10_0 in + z_50_0. + +op op_invert_p_p3(z_50_0 z11 : zp) : zp = + let z_100_50 = ZModpRing.exp z_50_0 (2^50) in + let z_100_0 = z_100_50 * z_50_0 in + let z_200_100 = ZModpRing.exp z_100_0 (2^100) in + let z_200_0 = z_200_100 * z_100_0 in + let z_250_50 = ZModpRing.exp z_200_0 (2^50) in + let z_250_0 = z_250_50 * z_50_0 in + let z_255_5 = ZModpRing.exp z_250_0 (2^5) in + let z_255_21 = z_255_5 * z11 in + z_255_21. + +op op_invert_p(z1 : zp) : zp = + let (z_5_0, z11) = op_invert_p_p1 z1 in + let z_50_0 = op_invert_p_p2 z_5_0 in + let z_255_21 = op_invert_p_p3 z_50_0 z11 in + z_255_21 axiomatized by op_invert_pE. + +(* lemma: invert is the same as z1^(p-2) from fermat's little theorem *) + +lemma eq_op_invert_p (z1: zp) : + op_invert_p z1 = ZModpRing.exp z1 (p-2). +proof. + rewrite op_invert_pE. + rewrite /op_invert_p_p1 /= expE //= /op_invert_p_p3 /op_invert_p_p2 => />. + rewrite -!ZModpRing.exprS => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite -!ZModpRing.exprM => />. + rewrite -ZModpRing.exprD_nneg => />. + rewrite -ZModpRing.exprM => />. + rewrite -!ZModpRing.exprD_nneg => />. + rewrite pE => />. +qed. + +(* now we define invert as one op and prove it equiv to exp z1 (p-2) *) +op op_invert0(z1 : zp) : zp = + let z2 = ZModpRing.exp z1 2 in + let z8 = ZModpRing.exp z2 (2*2) in + let z9 = z1 * z8 in + let z11 = z2 * z9 in + let z22 = ZModpRing.exp z11 2 in + let z_5_0 = z9 * z22 in + let z_10_5 = ZModpRing.exp z_5_0 (2^5) in + let z_10_0 = z_10_5 * z_5_0 in + let z_20_10 = ZModpRing.exp z_10_0 (2^10) in + let z_20_0 = z_20_10 * z_10_0 in + let z_40_20 = ZModpRing.exp z_20_0 (2^20) in + let z_40_0 = z_40_20 * z_20_0 in + let z_50_10 = ZModpRing.exp z_40_0 (2^10) in + let z_50_0 = z_50_10 * z_10_0 in + let z_100_50 = ZModpRing.exp z_50_0 (2^50) in + let z_100_0 = z_100_50 * z_50_0 in + let z_200_100 = ZModpRing.exp z_100_0 (2^100) in + let z_200_0 = z_200_100 * z_100_0 in + let z_250_50 = ZModpRing.exp z_200_0 (2^50) in + let z_250_0 = z_250_50 * z_50_0 in + let z_255_5 = ZModpRing.exp z_250_0 (2^5) in + let z_255_21 = z_255_5 * z11 in + z_255_21 axiomatized by op_invert0E. + +(* lemma: op_invert0 = op_invert_p *) +lemma eq_op_invert0 (z1 : zp) : + op_invert0 z1 = op_invert_p z1. +proof. + rewrite op_invert0E op_invert_pE /op_invert_p_p1 /op_invert_p_p2 /op_invert_p_p3 //. +qed. + +(* lemma: hence, op_invert0 is equal to z1^(p-1) *) +lemma eq_op_invert0p (z1 : zp) : + op_invert0 z1 = ZModpRing.exp z1 (p-2). +proof. + rewrite eq_op_invert0 eq_op_invert_p //. +qed. + +(* we now need to define various iterated sqaure operations and lemmas *) +op op_sqr(z : zp) : zp = + ZModpRing.exp z 2. + +op op_it_sqr(e : int, z : zp) : zp = + ZModpRing.exp z (2^e). + +op op_it_sqr1(e : int, z : zp) : zp = + foldl (fun (z' : zp) _ => ZModpRing.exp z' 2) z (iota_ 0 e). + +op op_it_sqr_x2(e : int, z : zp) : zp = + ZModpRing.exp z (4^e). + +op op_it_sqr1_x2(e : int, z : zp) : zp = + foldl (fun (z' : zp) _ => ZModpRing.exp z' 4) z (iota_ 0 e). + +(* lemma: op_it_sqr1 = op_itr_sqr *) +lemma eq_op_it_sqr1 (e : int, z : zp) : + 0 <= e => + op_it_sqr1 e z = op_it_sqr e z. +proof. + move : e. + rewrite /op_it_sqr1 /op_it_sqr. elim. simplify. rewrite -iotaredE ZModpRing.expr1 //. + move => i ige0 hin. + rewrite iotaSr // -cats1 foldl_cat hin /= expE /=. smt(gt0_pow2). + congr. clear hin. + rewrite exprS // mulzC //. +qed. + +(* lemma: op_it_sqr1_x2 = op_itr_sqr_x2 *) +lemma eq_op_it_sqr1_x2 (e : int, z : zp) : + 0 <= e => + op_it_sqr1_x2 e z = op_it_sqr_x2 e z. +proof. + move : e. + rewrite /op_it_sqr1_x2 /op_it_sqr_x2. elim. simplify. rewrite -iotaredE ZModpRing.expr1 //. + move => i ige0 hin. + rewrite iotaSr // -cats1 foldl_cat hin /= expE /=. + have ->: 4^i = 2^2^i. rewrite expr2 //. + rewrite -exprM. smt(gt0_pow2). + congr. clear hin. + rewrite exprS // mulzC //. +qed. + +(* lemma: op_it_sqr = op_itr_sqr_x2 *) +lemma eq_op_it_sqr_x2_4 (e : int, z : zp) : + 0 <= e => e %% 2 = 0 => + op_it_sqr e z = op_it_sqr_x2 (e%/2) z. +proof. + move => e1 e2. + rewrite /op_it_sqr /op_it_sqr_x2. + congr. + have ->: 4 ^ (e %/ 2) = 2 ^ 2 ^ (e %/ 2). rewrite expr2 => />. + rewrite -exprM => />. smt(). +qed. + +(* lemma: op_it_sqr1 = op_itr1_sqr_x2 *) +lemma eq_op_it_sqr1_x2_4 (e : int, z : zp) : + 0 <= e => e %% 2 = 0 => + op_it_sqr1 e z = op_it_sqr1_x2 (e%/2) z. +proof. + move => H H0. + rewrite eq_op_it_sqr1 // eq_op_it_sqr1_x2. smt(). + apply eq_op_it_sqr_x2_4; trivial. +qed. + +(* lemma: op_it_sqr1 = op_itr_sqr_x2, other perspective *) +lemma eq_op_it_sqr1_x2_4_twice (e : int, z : zp) : + 0 <= e => e %% 2 = 0 => + op_it_sqr1 (2*e) z = op_it_sqr1_x2 e z. +proof. + move => H H0. + rewrite eq_op_it_sqr1 //. smt(). rewrite eq_op_it_sqr1_x2. smt(). + rewrite /op_it_sqr /op_it_sqr_x2 => />. + have ->: 4 ^ e = 2 ^ 2 ^ e. rewrite expr2 => />. + rewrite -exprM => />. +qed. + +(* lemma: op_it_sqr1 with arg 0 *) +lemma op_it_sqr1_0 (e : int) (z : zp) : + 0 = e => op_it_sqr1 e z = z. +proof. + move => ?. + rewrite eq_op_it_sqr1. smt(). + rewrite /op_it_sqr. subst. simplify. + rewrite ZModpRing.expr1 //. +qed. + +(* lemma: op_it_sqr1_x2 with arg 0 *) +lemma op_it_sqr1_x2_0 (e : int) (z : zp) : + 0 = e => op_it_sqr1_x2 e z = z. +proof. + move => ?. + rewrite eq_op_it_sqr1_x2. smt(). + rewrite /op_it_sqr_x2. subst. simplify. + rewrite ZModpRing.expr1 //. +qed. + +(* lemma: "decomposing" op_it_sqr1 with e-2 *) +lemma op_it_sqr1_m2_exp4 (e : int) (z : zp) : + 0 <= e - 2 => op_it_sqr1 e z = op_it_sqr1 (e-2) (ZModpRing.exp (ZModpRing.exp z 2) 2). +proof. + rewrite expE // /= => ?. + rewrite !eq_op_it_sqr1. smt(). trivial. + rewrite /op_it_sqr (*expE *). + (* directly rewriting expE takes too long *) + have ee : ZModpRing.exp (ZModpRing.exp z 4) (2 ^ (e - 2)) = ZModpRing.exp z (2^2 * 2 ^ (e - 2)). smt(expE). + rewrite ee. congr. + rewrite -exprD_nneg //. +qed. + +(* lemma: "decomposing" op_it_sqr1 witg arg e-1 *) +lemma op_it_sqr1_m2_exp1 (e : int) (z : zp) : + 0 <= e - 1 => op_it_sqr1 e z = op_it_sqr1 (e-1) (ZModpRing.exp z 2). +proof. + have ->: ZModpRing.exp z 2 = ZModpRing.exp (ZModpRing.exp z 1) 2. rewrite expE. smt(). trivial. + rewrite expE // /= => ?. + rewrite !eq_op_it_sqr1. smt(). smt(). + rewrite /op_it_sqr (*expE *). rewrite expE. split. smt(). + rewrite expr_ge0 //. congr. have ->: 2 * 2^(e-1) = 2^1 * 2^(e-1). rewrite expr1 //. + rewrite -exprD_nneg //. +qed. + +(* lemma: "decomposing" op_it_sqr1_x2 with arg e-2 *) +lemma op_it_sqr1_m2_exp4_x2 (e : int) (z : zp) : + 0 <= e - 2 => op_it_sqr1_x2 e z = op_it_sqr1_x2 (e-2) (ZModpRing.exp (ZModpRing.exp z 4) 4). +proof. + have E: 4^(e-2) = 2^(2*(e-2)) by rewrite exprM => />. + rewrite expE // /= => H. + rewrite !eq_op_it_sqr1_x2. smt(). trivial. + rewrite /op_it_sqr_x2. + rewrite expE. rewrite E. smt(gt0_pow2). + congr => />. have ->: 16 = 4^2 by rewrite expr2. + rewrite -exprD_nneg //. +qed. + +(* lemma: "decomposing" op_it_sqr1_x2 with arg e-1 *) +lemma op_it_sqr1_m2_exp1_x2 (e : int) (z : zp) : + 0 <= e - 1 => op_it_sqr1_x2 e z = op_it_sqr1_x2 (e-1) (ZModpRing.exp z 4). +proof. + have ->: ZModpRing.exp z 4 = ZModpRing.exp (ZModpRing.exp z 1) 4. rewrite expE. smt(). trivial. + rewrite expE // /= => ?. + rewrite !eq_op_it_sqr1_x2. smt(). smt(). + rewrite /op_it_sqr_x2 (*expE *). rewrite expE. split. smt(). + rewrite expr_ge0 //. congr. have ->: 4 * 4^(e-1) = 4^1 * 4^(e-1). rewrite expr1 //. + rewrite -exprD_nneg //. +qed. + +op op_invert1(z1 : zp) : zp = + let t0 = op_sqr z1 in (* z1^2 *) + let t1 = op_sqr t0 in (* z1^4 *) + let t1 = op_sqr t1 in (* z1^8 *) + let t1 = z1 * t1 in (* z1^9 *) + let t0 = t0 * t1 in (* z1^11 *) + let t2 = op_sqr t0 in (* z1^22 *) + let t1 = t1 * t2 in (* z_5_0 *) + let t2 = op_sqr t1 in (* z_10_5 *) + let t2 = op_it_sqr 4 t2 in + let t1 = t1 * t2 in (* z_10_0 *) + let t2 = op_it_sqr 10 t1 in (* z_20_10 *) + let t2 = t1 * t2 in (* z_20_0 *) + let t3 = op_it_sqr 20 t2 in (* z_40_20 *) + let t2 = t2 * t3 in (* z_40_0 *) + let t2 = op_it_sqr 10 t2 in (* z_50_10 *) + let t1 = t1 * t2 in (* z_50_0 *) + let t2 = op_it_sqr 50 t1 in (* z_100_50 *) + let t2 = t1 * t2 in (* z_100_0 *) + let t3 = op_it_sqr 100 t2 in (* z_200_100 *) + let t2 = t2 * t3 in (* z_200_0 *) + let t2 = op_it_sqr 50 t2 in (* z_250_50 *) + let t1 = t1 * t2 in (* z_250_0 *) + let t1 = op_it_sqr 4 t1 in (* z_255_5 *) + let t1 = op_sqr t1 in + let t1 = t0 * t1 in + t1 axiomatized by op_invert1E. + +(* lemma: op_invert1 = op_invert0 *) +lemma eq_op_invert1 (z1: zp) : + op_invert1 z1 = op_invert0 z1. +proof. + rewrite op_invert1E op_invert0E /= /op_it_sqr /op_sqr /=. + smt(exprS exprD expE). +qed. + +(** split invert2 in 3 parts : jump from it_sqr to it_sqr1 **) + +op op_invert2(z1 : zp) : zp = + let t0 = op_sqr z1 in (* z1^2 *) + let t1 = op_sqr t0 in (* z1^4 *) + let t1 = op_sqr t1 in (* z1^8 *) + let t1 = z1 * t1 in (* z1^9 *) + let t0 = t0 * t1 in (* z1^11 *) + let t2 = op_sqr t0 in (* z1^22 *) + let t1 = t1 * t2 in (* z_5_0 *) + let t2 = op_sqr t1 in (* z_10_5 *) + let t2 = op_it_sqr1 4 t2 in + let t1 = t1 * t2 in (* z_10_0 *) + let t2 = op_it_sqr1 10 t1 in (* z_20_10 *) + let t2 = t1 * t2 in (* z_20_0 *) + let t3 = op_it_sqr1 20 t2 in (* z_40_20 *) + let t2 = t2 * t3 in (* z_40_0 *) + let t2 = op_it_sqr1 10 t2 in (* z_50_10 *) + let t1 = t1 * t2 in (* z_50_0 *) + let t2 = op_it_sqr1 50 t1 in (* z_100_50 *) + let t2 = t1 * t2 in (* z_100_0 *) + let t3 = op_it_sqr1 100 t2 in (* z_200_100 *) + let t2 = t2 * t3 in (* z_200_0 *) + let t2 = op_it_sqr1 50 t2 in (* z_250_50 *) + let t1 = t1 * t2 in (* z_250_0 *) + let t1 = op_it_sqr1 4 t1 in (* z_255_5 *) + let t1 = op_sqr t1 in + let t1 = t0 * t1 in + t1 axiomatized by op_invert2E. + + +(* lemma: op_invert2 = op_invert 1 *) +lemma eq_op_invert2 (z1: zp) : + op_invert2 z1 = op_invert1 z1. +proof. + rewrite op_invert2E op_invert1E. smt(eq_op_it_sqr1). +qed. + +(* lemma: op_invert2 returns the inverse of its arg *) +lemma eq_op_invert210p (z1: zp) : + op_invert2 z1 = ZModpRing.exp z1 (p-2). +proof. + rewrite eq_op_invert2 eq_op_invert1 eq_op_invert0p //. +qed. + +(* now we define an alternative version of spec_encode_point *) +op op_encode_point (q: zp * zp) : W256.t = + let qi = op_invert2 q.`2 in + let q = q.`1 * qi in + W256.of_int (asint q) axiomatized by op_encode_pointE. + +(* lemma: op_encode_point = spec_encode_point *) +lemma eq_op_encode_point (q: zp * zp) : + op_encode_point q = spec_encode_point q. +proof. + rewrite op_encode_pointE spec_encode_pointE. simplify. congr. + rewrite eq_op_invert210p //. +qed. + +(* step 6: spec_scalarmult with updated op_montgomery_ladder3 *) + +op op_scalarmult_internal(u: zp, k:W256.t) : W256.t = + let r = op_montgomery_ladder3 u k in + op_encode_point (r.`1) axiomatized by op_scalarmult_internalE. + +(* lemma: spec_scalarmult = op_scalarmult *) +lemma eq_op_scalarmult_internal (u:zp) (k:W256.t) : +k.[0] = false => op_scalarmult_internal u k = spec_scalarmult_internal u k. +proof. + rewrite /op_scalarmult_internal. simplify. + rewrite eq_op_encode_point /spec_scalarmult_internal. simplify. move => H. + congr. + have ml123 : spec_montgomery_ladder u k = select_double_from_triple (op_montgomery_ladder3 u k). + apply eq_op_montgomery_ladder123. apply H. + rewrite ml123 /select_double_from_triple //. +qed. + +hint simplify op_scalarmult_internalE. + +op op_scalarmult (k:W256.t) (u:W256.t) : W256.t = + let k = spec_decode_scalar_25519 k in + let u = spec_decode_u_coordinate u in + op_scalarmult_internal (inzp (to_uint u)) k axiomatized by op_scalarmultE. + +hint simplify op_scalarmultE. + +op op_scalarmult_base(k:W256.t) : W256.t = + op_scalarmult (k) (W256.of_int(9%Int)). + +(* lemma: spec_scalarmult = op_scalarmult *) +lemma eq_op_scalarmult (k:W256.t) (u:W256.t) : + op_scalarmult k u = spec_scalarmult k u. +proof. + simplify. + pose du := spec_decode_u_coordinate u. + pose dk := spec_decode_scalar_25519 k. + rewrite eq_op_encode_point /spec_scalarmult_internal. simplify. + congr. + have kb0f : (dk).[0] = false. (* k bit 0 false *) + rewrite /dk /spec_decode_scalar_25519 //. + have ml123 : spec_montgomery_ladder (inzp (to_uint du)) dk = select_double_from_triple (op_montgomery_ladder3 (inzp (to_uint du)) dk). + move : kb0f. apply eq_op_montgomery_ladder123. + rewrite ml123 /select_double_from_triple //. +qed. + +(* lemma: spec_ op_scalarmult_base = spec_scalarmult_base *) +lemma eq_op_scalarmult_base (k:W256.t) : + op_scalarmult_base k = spec_scalarmult_base k. +proof. + apply eq_op_scalarmult. +qed. diff --git a/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_PHoare.ec b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_PHoare.ec new file mode 100644 index 0000000..9b1bebf --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_PHoare.ec @@ -0,0 +1,206 @@ +require import Bool List Int IntDiv CoreMap Real Ring Zp_25519. +from Jasmin require import JModel. +require import Curve25519_Spec. +require import Curve25519_Operations. +require import Curve25519_Procedures. + +import Zp Ring.IntID Curve25519_Spec Curve25519_Procedures Curve25519_Operations. + +(** step 1 : decode_scalar_25519 **) +lemma ill_decode_scalar_25519 : islossless CurveProcedures.decode_scalar. +proof. islossless. qed. + +lemma eq_ph_decode_scalar_25519 k: + phoare [CurveProcedures.decode_scalar : + k' = k + ==> + res = spec_decode_scalar_25519 k] = 1%r. +proof. + by conseq ill_decode_scalar_25519 (eq_proc_op_decode_scalar k). +qed. + + +(** step 2 : decode_u_coordinate **) +lemma ill_decode_u_coordinate : islossless CurveProcedures.decode_u_coordinate by islossless. + +lemma ill_decode_u_coordinate_base : islossless CurveProcedures.decode_u_coordinate_base by islossless. + +lemma eq_ph_decode_u_coordinate u: + phoare [CurveProcedures.decode_u_coordinate : + u' = u + ==> + res = inzp (to_uint (spec_decode_u_coordinate u))] = 1%r. +proof. + by conseq ill_decode_u_coordinate (eq_proc_op_decode_u_coordinate u). +qed. + +lemma eq_ph_decode_u_coordinate_base: + phoare [CurveProcedures.decode_u_coordinate_base : + true + ==> + res = inzp (to_uint (spec_decode_u_coordinate (W256.of_int 9)))] = 1%r. +proof. + by conseq ill_decode_u_coordinate_base (eq_proc_op_decode_u_coordinate_base). +qed. + + +(** step 3 : spec_ith_bit **) +lemma ill_ith_bit : islossless CurveProcedures.ith_bit by islossless. + +lemma eq_ph_ith_bit (k : W256.t) i: + phoare [CurveProcedures.ith_bit : + k' = k /\ + ctr = i + ==> + res = spec_ith_bit k i] = 1%r. +proof. + by conseq ill_ith_bit (eq_proc_op_ith_bit k i). +qed. + +(** step 4 : cswap **) +lemma ill_cswap : islossless CurveProcedures.cswap by islossless. + +lemma eq_ph_cswap (t : (zp * zp) * (zp * zp) ) b: + phoare [CurveProcedures.cswap : x2 = (t.`1).`1 /\ + z2 = (t.`1).`2 /\ + x3 = (t.`2).`1 /\ + z3 = (t.`2).`2 /\ + toswap = b + ==> ((res.`1, res.`2),(res.`3, res.`4)) = cswap t b] = 1%r. +proof. + by conseq ill_cswap (eq_proc_op_cswap t b). +qed. + +(** step 5 : add_and_double **) +lemma ill_add_and_double : islossless CurveProcedures.add_and_double by islossless. + +lemma eq_ph_add_and_double (qx : zp) (nqs : (zp * zp) * (zp * zp)): + phoare [CurveProcedures.add_and_double : init = qx /\ + x2 = nqs.`1.`1 /\ + z2 = nqs.`1.`2 /\ + x3 = nqs.`2.`1 /\ + z3 = nqs.`2.`2 + ==> ((res.`1, res.`2),(res.`3, res.`4)) = op_add_and_double qx nqs] = 1%r. +proof. + by conseq ill_add_and_double (eq_proc_op_add_and_double qx nqs). +qed. + +(** step 6 : montgomery_ladder_step **) +lemma ill_montgomery_ladder_step : islossless CurveProcedures.montgomery_ladder_step by islossless. + +lemma eq_ph_montgomery_ladder_step (k : W256.t) + (init : zp) + (nqs : (zp * zp) * (zp * zp) * bool) + (ctr : int) : + phoare [CurveProcedures.montgomery_ladder_step : k' = k /\ + init' = init /\ + x2 = nqs.`1.`1 /\ + z2 = nqs.`1.`2 /\ + x3 = nqs.`2.`1 /\ + z3 = nqs.`2.`2 /\ + swapped = nqs.`3 /\ + ctr' = ctr + ==> ((res.`1, res.`2),(res.`3, res.`4),res.`5) = + op_montgomery_ladder3_step k init nqs ctr] = 1%r. +proof. + by conseq ill_montgomery_ladder_step (eq_proc_op_montgomery_ladder_step k init nqs ctr). +qed. + +(** step 7 : montgomery_ladder **) +lemma ill_montgomery_ladder : islossless CurveProcedures.montgomery_ladder. +proof. + islossless. while true (ctr+1). move => ?. wp. simplify. + call(_:true ==> true). islossless. sp. skip; smt(). + skip; smt(). +qed. + +lemma eq_ph_montgomery_ladder (init : zp) + (k : W256.t) : + phoare [CurveProcedures.montgomery_ladder : init' = init /\ + k.[0] = false /\ + k' = k + ==> ((res.`1, res.`2),(res.`3,res.`4)) = + select_double_from_triple (op_montgomery_ladder3 init k)] = 1%r. +proof. + by conseq ill_montgomery_ladder (eq_proc_op_montgomery_ladder init k). +qed. + +(** step 8 iterated square **) +lemma ill_it_sqr : islossless CurveProcedures.it_sqr. +proof. + islossless. + while true ii. move => ?. wp. + inline CurveProcedures.sqr. wp. skip. move => &hr [H] H1. smt(). + skip. smt(). +qed. + +lemma eq_ph_it_sqr (e : int) (z : zp) : + phoare[CurveProcedures.it_sqr : + i = e && 1 <= i && f = z + ==> + res = op_it_sqr1 e z] = 1%r. +proof. + by conseq ill_it_sqr (eq_proc_op_it_sqr e z). +qed. + +(** step 9 : invert **) +lemma ill_invert : islossless CurveProcedures.invert. +proof. + proc. + inline CurveProcedures.sqr CurveProcedures.mul. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + wp; sp. call(_: true ==> true). apply ill_it_sqr. + skip. trivial. +qed. + +lemma eq_ph_invert (z : zp) : + phoare[CurveProcedures.invert : fs = z ==> res = op_invert2 z] = 1%r. +proof. + conseq ill_invert (eq_proc_op_invert z). auto => />. +qed. + +(** step 10 : encode point **) +lemma ill_encode_point : islossless CurveProcedures.encode_point. +proof. + proc. inline CurveProcedures.mul. wp; sp. call(_: true ==> true). apply ill_invert. trivial. +qed. + +lemma eq_ph_encode_point (q : zp * zp) : + phoare[CurveProcedures.encode_point : x2 = q.`1 /\ z2 = q.`2 ==> res = op_encode_point q] = 1%r. +proof. + conseq ill_encode_point (eq_proc_op_encode_point q). auto => />. +qed. + +(** step 11 : scalarmult **) +lemma ill_scalarmult_internal : islossless CurveProcedures.scalarmult_internal. +proof. + proc. sp. + call(_: true ==> true). apply ill_encode_point. + call(_: true ==> true). apply ill_montgomery_ladder. + skip. trivial. +qed. + +(** step 11 : spec_scalarmult **) +lemma ill_scalarmult : islossless CurveProcedures.scalarmult. +proof. + proc. sp. + call(_: true ==> true). apply ill_scalarmult_internal. + call(_: true ==> true). apply ill_decode_u_coordinate. + call(_: true ==> true). apply ill_decode_scalar_25519. + skip. trivial. +qed. + +lemma ill_scalarmult_base : islossless CurveProcedures.scalarmult_base. +proof. + proc. sp. + call(_: true ==> true). apply ill_scalarmult_internal. + call(_: true ==> true). apply ill_decode_u_coordinate_base. + call(_: true ==> true). apply ill_decode_scalar_25519. + skip. trivial. +qed. diff --git a/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Procedures.ec b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Procedures.ec new file mode 100644 index 0000000..1d7d903 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Procedures.ec @@ -0,0 +1,631 @@ +require import Bool List Int IntDiv CoreMap Real Zp_25519 Ring Distr StdOrder BitEncoding Zp_25519 W64limbs StdBigop. +from Jasmin require import JModel JWord JModel_x86. +require import Curve25519_Spec. +require import Curve25519_Operations. +import Zp Zp_25519 Ring.IntID StdOrder.IntOrder BitEncoding.BS2Int StdBigop.Bigint. + +module CurveProcedures = { + + (* h = f + g *) + proc add(f g : zp) : zp = + { + var h: zp; + h <- f + g; + return h; + } + + (* h = f - g *) + proc sub(f g : zp) : zp = + { + var h: zp; + h <- f - g; + return h; + } + + (* h = f * a24 *) + proc mul_a24 (f : zp, a24 : int) : zp = + { + var h: zp; + h <- f * (inzp a24); + return h; + } + + (* h = f * g *) + proc mul (f g : zp) : zp = + { + var h : zp; + h <- f * g; + return h; + } + + (* h = f * f *) + proc sqr (f : zp) : zp = + { + var h : zp; + (*h <- f * f;*) + h <- ZModpRing.exp f 2; + return h; + } + + (* iterated sqr *) + proc it_sqr (f : zp, i : int) : zp = + { + var h, g: zp; + var ii, counter: int; + + counter <- 1; + g <- f; + ii <- i; + h <@ sqr(f); + + ii <- ii - 1; + while (0 < ii) { + h <@ sqr(h); + counter <- counter + 1; + ii <- ii - 1; + } + return h; +} + +proc it_sqr_aux (a : zp, l : int) : zp = { + var f, h: zp; + var ii: int; + ii <- l; + h <@ sqr(a); + f <@ sqr(h); + ii <- ii - 1; + while (0 < ii) { + h <@ sqr(f); + f <@ sqr(h); + ii <- ii - 1; +} + return f; + } + + (* f ** 2**255-19-2 *) + proc invert (fs : zp) : zp = + { + var t1s : zp; + var t0s : zp; + var t2s : zp; + var t3s : zp; + + t0s <- witness; + t1s <- witness; + t2s <- witness; + t3s <- witness; + + t0s <@ sqr (fs); + t1s <@ sqr (t0s); + t1s <@ sqr (t1s); + t1s <@ mul (fs, t1s); + t0s <@ mul (t0s, t1s); + t2s <@ sqr (t0s); + t1s <@ mul (t1s, t2s); + t2s <@ sqr (t1s); + t2s <@ it_sqr (t2s,4); + t1s <@ mul (t1s, t2s); + t2s <@ it_sqr (t1s,10); + t2s <@ mul (t1s, t2s); + t3s <@ it_sqr (t2s,20); + t2s <@ mul(t2s, t3s); + t2s <@ it_sqr (t2s,10); + t1s <@ mul (t1s, t2s); + t2s <@ it_sqr (t1s,50); + t2s <@ mul (t1s, t2s); + t3s <@ it_sqr (t2s,100); + t2s <@ mul (t2s, t3s); + t2s <@ it_sqr (t2s,50); + t1s <@ mul (t1s, t2s); + t1s <@ it_sqr (t1s,4); + t1s <@ sqr (t1s); + t1s <@ mul (t0s, t1s); + return t1s; + } + +proc invert_helper (fs : zp) : zp = +{ + var t1s : zp; + var t0s : zp; + var t2s : zp; + var t3s : zp; + t0s <- witness; + t1s <- witness; + t2s <- witness; + t3s <- witness; + t0s <@ sqr (fs); + t1s <@ sqr (t0s); + t1s <@ sqr (t1s); + t1s <@ mul (t1s, fs); + t0s <@ mul (t0s, t1s); + t2s <@ sqr (t0s); + t1s <@ mul (t1s, t2s); + t2s <@ sqr (t1s); + t2s <@ it_sqr (t2s, 4); + t1s <@ mul (t1s, t2s); + t2s <@ it_sqr (t1s, 10); + t2s <@ mul (t2s, t1s); + t3s <@ it_sqr (t2s, 20); + t2s <@ mul(t2s, t3s); + t2s <@ it_sqr (t2s, 10); + t1s <@ mul (t1s, t2s); + t2s <@ it_sqr (t1s, 50); + t2s <@ mul (t2s, t1s); + t3s <@ it_sqr (t2s, 100); + t2s <@ mul (t2s, t3s); + t2s <@ it_sqr (t2s, 50); + t1s <@ mul (t1s, t2s); + t1s <@ it_sqr (t1s, 4); + t1s <@ sqr (t1s); + t1s <@ mul (t1s, t0s); + return t1s; +} + + proc cswap (x2 z2 x3 z3 : zp, toswap : bool) : zp * zp * zp * zp = + { + if(toswap){ + (x2,z2,x3,z3) <- (x3,z3,x2,z2); + }else{ + (x2,z2,x3,z3) <- (x2,z2,x3,z3);} + return (x2,z2,x3,z3); + } + + proc ith_bit (k' : W256.t, ctr : int) : bool = + { + return k'.[ctr]; + } + + proc decode_scalar (k' : W256.t) : W256.t = + { + k'.[0] <- false; + k'.[1] <- false; + k'.[2] <- false; + k'.[255] <- false; + k'.[254] <- true; + return k'; + } + + proc decode_u_coordinate (u' : W256.t) : zp = + { + (* last bit of u is cleared but that can be introduced at the same time as arrays *) + u'.[255] <- false; + return inzp ( to_uint u' ); + } + + proc init_points (init : zp) : zp * zp * zp * zp = + { + var x2 : zp; + var z2 : zp; + var x3 : zp; + var z3 : zp; + + x2 <- witness; + x3 <- witness; + z2 <- witness; + z3 <- witness; + + x2 <- Zp.one; + z2 <- Zp.zero; + x3 <- init; + z3 <- Zp.one; + + return (x2, z2, x3, z3); + } + +proc decode_u_coordinate_base () : zp = + { + var u' : zp; + u' <@ decode_u_coordinate (W256.of_int 9); + return u'; + } + + proc add_and_double (init x2 z2 x3 z3 : zp) : zp * zp * zp * zp = + { + var t0 : zp; + var t1 : zp; + var t2 : zp; + t0 <- witness; + t1 <- witness; + t2 <- witness; + t0 <@ sub (x2, z2); + x2 <@ add (z2, x2); + t1 <@ sub (x3, z3); + z2 <@ add (x3, z3); + z3 <@ mul (x2, t1); + z2 <@ mul (z2, t0); + t2 <@ sqr (x2); + t1 <@ sqr (t0); + x3 <@ add (z3, z2); + z2 <@ sub (z3, z2); + x2 <@ mul (t2, t1); + t0 <@ sub (t2, t1); + z2 <@ sqr (z2); + z3 <@ mul_a24 (t0, 121665); + x3 <@ sqr (x3); + t2 <@ add (t2, z3); + z3 <@ mul (init, z2); + z2 <@ mul (t0, t2); + return (x2, z2, x3, z3); + } + + proc montgomery_ladder_step (k' : W256.t, init' x2 z2 x3 z3 : zp, swapped : bool, ctr' : int) : zp * zp * zp * zp * bool = + { + var bit : bool; + var toswap : bool; + bit <@ ith_bit (k', ctr'); + toswap <- swapped; + toswap <- (toswap ^^ bit); + (x2, z2, x3, z3) <@ cswap (x2, z2, x3, z3, toswap); + swapped <- bit; + (x2, z2, x3, z3) <@ add_and_double (init', x2, z2, x3, z3); + return (x2, z2, x3, z3, swapped); + } + + proc montgomery_ladder (init' : zp, k' : W256.t) : zp * zp * zp * zp = + { + var x2 : zp; + var z2 : zp; + var x3 : zp; + var z3 : zp; + var ctr : int; + var c : int; + var swapped : bool; + + + x2 <- witness; + x3 <- witness; + z2 <- witness; + z3 <- witness; + (x2, z2, x3, z3) <@ init_points (init'); + ctr <- 255; + swapped <- false; + + while (0 < ctr) + { + ctr <- ctr - 1; + (x2, z2, x3, z3, swapped) <@ montgomery_ladder_step (k', init', x2, z2, x3, z3, swapped, ctr); + + } + return (x2, z2, x3, z3); + } + + proc encode_point (x2 z2 : zp) : W256.t = + { + var r : zp; + r <- witness; + z2 <@ invert (z2); + r <@ mul (x2, z2); + (* no need to 'freeze' or 'tobytes' r in this spec *) + return (W256.of_int (asint r)); + } +proc scalarmult_internal(u'': zp, k': W256.t ) : W256.t = { + var x2 : zp; + var z2 : zp; + var x3 : zp; + var z3 : zp; + var r : W256.t; + r <- witness; + x2 <- witness; + x3 <- witness; + z2 <- witness; + z3 <- witness; + (x2, z2, x3, z3) <@ montgomery_ladder(u'', k'); + r <@ encode_point(x2, z2); + return r; +} + + proc scalarmult (k' u' : W256.t) : W256.t = + { + var u'' : zp; + var r : W256.t; + r <- witness; + + k' <@ decode_scalar (k'); + u'' <@ decode_u_coordinate (u'); + r <@ scalarmult_internal(u'', k'); + return r; + } + +proc scalarmult_base(k': W256.t) : W256.t = { + var u'' : zp; + var x2 : zp; + var z2 : zp; + var x3 : zp; + var z3 : zp; + var r : W256.t; + r <- witness; + x2 <- witness; + x3 <- witness; + z2 <- witness; + z3 <- witness; + k' <@ decode_scalar (k'); +u'' <@ decode_u_coordinate_base (); + r <@ scalarmult_internal(u'', k'); + + return r; } +}. + +(** step 1 : decode_scalar_25519 **) +lemma eq_proc_op_decode_scalar k: + hoare [ CurveProcedures.decode_scalar: k' = k + ==> res = spec_decode_scalar_25519 k]. +proof. + proc; wp; rewrite /spec_decode_scalar_25519 /=; skip. + move => &hr hk; rewrite hk //. +qed. + +(** step 2 : decode_u_coordinate **) +lemma eq_proc_op_decode_u_coordinate u: + hoare [ CurveProcedures.decode_u_coordinate : u' = u + ==> res = inzp (to_uint (spec_decode_u_coordinate u))]. +proof. + proc; wp. rewrite /spec_decode_u_coordinate /=; skip. + move => &mu hu; rewrite hu //. +qed. + +lemma eq_proc_op_decode_u_coordinate_base: + hoare[ CurveProcedures.decode_u_coordinate_base: + true + ==> + res = inzp (to_uint (spec_decode_u_coordinate( W256.of_int 9))) + ]. +proof. + proc *; inline 1; wp. + ecall (eq_proc_op_decode_u_coordinate (W256.of_int 9)). + auto => />. + qed. + +(** step 3 : ith_bit **) +lemma eq_proc_op_ith_bit (k : W256.t) i: + hoare [CurveProcedures.ith_bit : k' = k /\ ctr = i ==> res = spec_ith_bit k i]. +proof. + proc. rewrite /spec_ith_bit. skip => />. +qed. + +(** step 4 : cswap **) +lemma eq_proc_op_cswap (t : (zp * zp) * (zp * zp) ) b: + hoare [CurveProcedures.cswap : x2 = (t.`1).`1 /\ + z2 = (t.`1).`2 /\ + x3 = (t.`2).`1 /\ + z3 = (t.`2).`2 /\ + toswap = b + ==> ((res.`1, res.`2),(res.`3, res.`4)) = cswap t b]. +proof. + by proc; wp; skip; simplify => /#. +qed. + +(** step 5 : add_and_double **) +lemma eq_proc_op_add_and_double (qx : zp) (nqs : (zp * zp) * (zp * zp)): + hoare [CurveProcedures.add_and_double : init = qx /\ + x2 = nqs.`1.`1 /\ + z2 = nqs.`1.`2 /\ + x3 = nqs.`2.`1 /\ + z3 = nqs.`2.`2 + ==> ((res.`1, res.`2),(res.`3, res.`4)) = op_add_and_double qx nqs]. +proof. + proc; inline *; wp; skip. + rewrite /op_add_and_double /=. rewrite !ZModpRing.expr2. smt(). +qed. + +(** step 6 : montgomery_ladder_step **) +lemma eq_proc_op_montgomery_ladder_step (k : W256.t) + (init : zp) + (nqs : (zp * zp) * (zp * zp) * bool) + (ctr : int) : + hoare [CurveProcedures.montgomery_ladder_step : k' = k /\ + init' = init /\ + x2 = nqs.`1.`1 /\ + z2 = nqs.`1.`2 /\ + x3 = nqs.`2.`1 /\ + z3 = nqs.`2.`2 /\ + swapped = nqs.`3 /\ + ctr' = ctr + ==> ((res.`1, res.`2),(res.`3, res.`4),res.`5) = + op_montgomery_ladder3_step k init nqs ctr]. +proof. + proc => /=. + ecall (eq_proc_op_add_and_double init (cswap (select_double_from_triple nqs) (nqs.`3 ^^ (spec_ith_bit k ctr)))). + wp. + ecall (eq_proc_op_cswap (select_double_from_triple nqs) (nqs.`3 ^^ (spec_ith_bit k ctr))). + wp. + ecall (eq_proc_op_ith_bit k ctr). auto. + rewrite /op_montgomery_ladder3_step => /#. +qed. + +(** step 7 : montgomery_ladder **) +lemma unroll_ml3s k init nqs (ctr : int) : (** unroll montgomery ladder 3 step **) + 0 <= ctr => + foldl (op_montgomery_ladder3_step k init) + nqs + (rev (iota_ 0 (ctr+1))) + = + foldl (op_montgomery_ladder3_step k init) + (op_montgomery_ladder3_step k init nqs ctr) + (rev (iota_ 0 (ctr))). +proof. +move => ctrge0. +rewrite 2!foldl_rev iotaSr //= -cats1 foldr_cat => /#. +qed. + +lemma eq_proc_op_montgomery_ladder (init : zp) + (k : W256.t) : + hoare [CurveProcedures.montgomery_ladder : init' = init /\ + k.[0] = false /\ + k' = k + ==> ((res.`1, res.`2),(res.`3,res.`4)) = + select_double_from_triple (op_montgomery_ladder3 init k)]. +proof. +proc. + inline CurveProcedures.init_points. sp. simplify. + rewrite /op_montgomery_ladder3. + while (foldl (op_montgomery_ladder3_step k' init') + ((Zp.one, Zp.zero), (init, Zp.one), false) + (rev (iota_ 0 255)) + = + foldl (op_montgomery_ladder3_step k' init') + ((x2,z2), (x3,z3), swapped) + (rev (iota_ 0 (ctr))) + ). + wp. sp. + ecall (eq_proc_op_montgomery_ladder_step k' init' ((x2,z2),(x3,z3),swapped) ctr). + skip. simplify. + move => &hr [?] ? ? ?. smt(unroll_ml3s). + skip. move => &hr [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] [?] ?. subst. + split; first by done. + move => H H0 H1 H2 H3 H4 H5 H6. + have _ : rev (iota_ 0 (H)) = []; smt(iota0). +qed. + +(** step 8 : iterated square **) + + +lemma eq_proc_op_it_sqr (e : int) (z : zp) : + hoare[CurveProcedures.it_sqr : + i = e && 1 <= i && f = z + ==> + res = op_it_sqr1 e z]. +proof. + proc. inline CurveProcedures.sqr. sp. wp. simplify. + while (0 <= i && 0 <= ii && op_it_sqr1 e z = op_it_sqr1 ii h). + wp; skip. + move => &hr [[H [H0 H1 H2 H3]]]. split. + apply H. move => H4. split. rewrite /H3. + smt(). move => H5. rewrite /H3. + smt(op_it_sqr1_m2_exp1). skip. + move => &hr [H [H0 [H1 [H2 [H3]]]]] H4. split. split. smt(). move => H5. split. smt(). move => H6. + smt(op_it_sqr1_m2_exp1). move => H5 H6 H7 [H8 [H9 H10]]. smt(op_it_sqr1_0). +qed. + +lemma eq_proc_op_it_sqr_x2 (e : int) (z : zp) : + hoare[CurveProcedures.it_sqr : + i%/2 = e && 2 <= i && i %% 2 = 0 && f = z + ==> + res = op_it_sqr1_x2 e z]. +proof. + proc. inline CurveProcedures.sqr. sp. wp. simplify. + while (0 <= i && 0 <= ii && 2*e = i && op_it_sqr1_x2 e z = op_it_sqr1 i f && op_it_sqr1 (2*e) z = op_it_sqr1 ii h). + wp; skip. + move => &hr [[H]] [H0] [H1] [H2] H3 H4 H5. + split. assumption. move => H6. + split. smt(). move => H7. + split. assumption. move => H8. + split. assumption. move => H9. + rewrite H3 /H5 => />. smt(op_it_sqr1_m2_exp1). skip. +move => &hr [H] [H0] [H1] [H2] [H3] [H4] [H5] [H6] [H7] H8. + do! split. + + smt(). move => H9. + split. smt(). move => H10. + split. smt(). move => H11. + split. rewrite eq_op_it_sqr1_x2. smt(). + rewrite eq_op_it_sqr1. smt(). + rewrite /op_it_sqr_x2 /op_it_sqr H8 -H11. congr. + rewrite exprM => />. move => H12. + rewrite !eq_op_it_sqr1; first smt(). smt(). + rewrite !/op_it_sqr. rewrite H3 H2 H1 -H8 H11. rewrite -ZModpRing.exprM => />. + congr. rewrite H4. + have ->: 2 * 2 ^ (i{hr} - 1) = 2^1 * 2 ^ (i{hr} - 1). rewrite expr1 //. + rewrite -exprD_nneg //. smt(). + move => h II H9 [H10] [H11] [H12] [H13] H14. rewrite H13 -H12 H8 H14. + smt(op_it_sqr1_0). + qed. + +(** step 9 : invert **) +lemma eq_proc_op_invert (z : zp) : + hoare[CurveProcedures.invert : fs = z ==> res = op_invert2 z]. +proof. + proc. + inline CurveProcedures.sqr CurveProcedures.mul. wp. + ecall (eq_proc_op_it_sqr 4 t1s). wp. + ecall (eq_proc_op_it_sqr 50 t2s). wp. + ecall (eq_proc_op_it_sqr 100 t2s). wp. + ecall (eq_proc_op_it_sqr 50 t1s). wp. + ecall (eq_proc_op_it_sqr 10 t2s). wp. + ecall (eq_proc_op_it_sqr 20 t2s). wp. + ecall (eq_proc_op_it_sqr 10 t1s). wp. + ecall (eq_proc_op_it_sqr 4 t2s). wp. + skip. simplify. + move => &hr H. + move=> ? ->. move=> ? ->. + move=> ? ->. move=> ? ->. + move=> ? ->. move=> ? ->. + move=> ? ->. move=> ? ->. + rewrite op_invert2E /sqr /= H /#. +qed. + +equiv eq_sqr: + CurveProcedures.it_sqr ~ CurveProcedures.it_sqr: + f{1} = f{2} /\ i{1} = i{2} ==> ={res}. +proof. + sim. +qed. + +equiv eq_proc_proc_invert: + CurveProcedures.invert_helper ~ CurveProcedures.invert: + fs{1} = fs{2} ==> res{1} = res{2}. +proof. + proc => //=; inline CurveProcedures.sqr CurveProcedures.mul; wp. + call eq_sqr; wp. call eq_sqr; wp. call eq_sqr; wp. + call eq_sqr; wp. call eq_sqr; wp. call eq_sqr; wp. + call eq_sqr; wp. call eq_sqr; wp. + skip => />. smt(). +qed. + + +(** step 10 : encode point **) +lemma eq_proc_op_encode_point (q : zp * zp) : + hoare[CurveProcedures.encode_point : x2 = q.`1 /\ z2 = q.`2 ==> res = op_encode_point q]. +proof. + proc. inline CurveProcedures.mul. wp. sp. + ecall (eq_proc_op_invert z2). + skip. simplify. + move => &hr [H] [H0] H1 H2 H3. + rewrite op_encode_pointE. + auto => />. congr; congr; congr. rewrite -H1. apply H3. +qed. + +(** step 11 : scalarmult **) +lemma eq_proc_op_scalarmult_internal (u: zp, k: W256.t) : + hoare[CurveProcedures.scalarmult_internal : k.[0] = false /\ k' = k /\ u'' = u ==> res = op_scalarmult_internal u k]. +proof. + proc; sp. + ecall (eq_proc_op_encode_point (x2, z2)). simplify. + ecall (eq_proc_op_montgomery_ladder u'' k'). simplify. skip. + move => &1 [H0] [H1] [H2] [H3] [H4] [H5] [H6] H7. split. + rewrite H6 => />. + move => H8 H9 H10 H11 H12. smt(). +qed. + +lemma eq_proc_op_scalarmult (k u : W256.t) : + hoare[CurveProcedures.scalarmult : k' = k /\ u' = u ==> res = spec_scalarmult k u]. +proof. + rewrite -eq_op_scalarmult. + proc. + pose dk := spec_decode_scalar_25519 k. + have kb0f : (dk).[0] = false. (* k bit 0 false *) + rewrite /dk /spec_decode_scalar_25519 //. + ecall (eq_proc_op_scalarmult_internal u'' k'). + ecall (eq_proc_op_decode_u_coordinate u'). + ecall (eq_proc_op_decode_scalar k'). + simplify. sp. skip. + move => &hr [H] [H0] H1 H2 H3 H4 H5. split. rewrite H3 H0. apply kb0f. + move=> H6 H7 ->. rewrite !op_encode_pointE. auto => />. congr. congr. congr. congr. congr. + rewrite H5 H3 H1 H0 => />. +rewrite H5 H1 H3 H0 => />. + qed. + +lemma eq_proc_op_scalarmult_base (k : W256.t) : + hoare[CurveProcedures.scalarmult_base : k' = k ==> res = spec_scalarmult_base k]. +proof. + rewrite -eq_op_scalarmult_base. + proc. + pose dk := spec_decode_scalar_25519 k. + have kb0f : (dk).[0] = false. (* k bit 0 false *) + rewrite /dk /spec_decode_scalar_25519 //. + ecall (eq_proc_op_scalarmult_internal u'' k'). + ecall (eq_proc_op_decode_u_coordinate_base). + ecall (eq_proc_op_decode_scalar k'). simplify. sp. skip. + move => &hr [H] [H0] [H1] [H2] [H3] H4 H5 H6 H7 H8. split. rewrite H6 H4. apply kb0f. + move=> H9 H10 ->. rewrite /op_scalarmult_base /op_scalarmult. auto => />. + rewrite !op_encode_pointE. progress. congr; congr; congr; congr. congr. congr. + rewrite H6 H4 => />. congr. congr. congr. rewrite H6 H4 => />. + qed. diff --git a/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Spec.ec b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Spec.ec new file mode 100644 index 0000000..81a740f --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/common/Curve25519_Spec.ec @@ -0,0 +1,63 @@ +require import List Int Int IntDiv CoreMap Real. +require import Zp_25519. +import Zp. + +from Jasmin require import JModel. + +op spec_decode_scalar_25519 (k:W256.t) = + let k = k.[0 <- false] in + let k = k.[1 <- false] in + let k = k.[2 <- false] in + let k = k.[255 <- false] in + let k = k.[254 <- true ] in + k. + +op spec_decode_u_coordinate (u:W256.t) = let u = u.[255 <- false] in u. + +op spec_add_and_double (qx : zp) (nqs : (zp * zp) * (zp * zp)) = + let x_1 = qx in + let (x_2, z_2) = nqs.`1 in + let (x_3, z_3) = nqs.`2 in + let a = x_2 + z_2 in + let aa = a * a in + let b = x_2 + (- z_2) in + let bb = b*b in + let e = aa + (- bb) in + let c = x_3 + z_3 in + let d = x_3 + (- z_3) in + let da = d * a in + let cb = c * b in + let x_3 = (da + cb)*(da + cb) in + let z_3 = x_1 * ((da + (- cb))*(da + (- cb))) in + let x_2 = aa * bb in + let z_2 = e * (aa + (inzp 121665 * e)) in + ((x_2,z_2), (x_3,z_3)). + +op spec_swap_tuple (t : ('a * 'a) * ('a * 'a)) = (t.`2, t.`1). + +op spec_ith_bit(k : W256.t, i : int) = k.[i]. + +op spec_montgomery_ladder(init : zp, k : W256.t) = + let nqs0 = ((Zp.one,Zp.zero),(init,Zp.one)) in + foldl (fun (nqs : (zp * zp) * (zp * zp)) ctr => + if spec_ith_bit k ctr + then spec_swap_tuple (spec_add_and_double init (spec_swap_tuple(nqs))) + else spec_add_and_double init nqs) nqs0 (rev (iota_ 0 255)). + +op spec_encode_point (q: zp * zp) : W256.t = + let q = q.`1 * (ZModpRing.exp q.`2 (p - 2)) in + W256.of_int (asint q) axiomatized by spec_encode_pointE. + +op spec_scalarmult_internal (k: zp) (u: W256.t) : W256.t = + let r = spec_montgomery_ladder k u in + spec_encode_point (r.`1) axiomatized by scalarmult_internalE. + +op spec_scalarmult (k: W256.t) (u: W256.t) : W256.t = + let k = spec_decode_scalar_25519 k in + let u = spec_decode_u_coordinate u in + spec_scalarmult_internal (inzp (to_uint u)) k axiomatized by spec_scalarmultE. + +hint simplify spec_scalarmultE. + +op spec_scalarmult_base (k:W256.t) : W256.t = + spec_scalarmult (k) (W256.of_int(9%Int)). diff --git a/proof/crypto_scalarmult/curve25519/amd64/common/EClib.ec b/proof/crypto_scalarmult/curve25519/amd64/common/EClib.ec new file mode 100644 index 0000000..b1e65e6 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/common/EClib.ec @@ -0,0 +1,201 @@ +require import List Int IntDiv CoreMap Ring StdOrder W64limbs StdBigop BitEncoding. +import Ring.IntID IntOrder StdBigop.Bigint.BIA BitEncoding.BS2Int. + +from Jasmin require import JModel JWord JWord_array. + +lemma foldl_in_eq_r (f1 : 'a1 -> 'b -> 'a1) + (f2 : 'a2 -> 'b -> 'a2) + (s : 'b list) + (a2 : 'a2) + (r : 'a2 -> 'a1) : + (forall a2 b, b \in s => f1 (r a2) b = r (f2 a2 b)) => + foldl f1 (r a2) s = r (foldl f2 a2 s). + proof. + move: s a2. elim. by move => a2. + move => x l hrec a2 /= hin. rewrite hin. + by left. + rewrite hrec //; move => ? ? h; rewrite hin. + by right. + by trivial. + qed. + +lemma ltr_pmul2 x1 x2 y1 y2: + 0 <= x1 => 0 <= x2 => x1 < y1 => x2 < y2 => x1 * x2 < y1 * y2 by smt(). + +lemma divzU a b q r: + 0 <= r < `|b|%Int => a = b * q + r => q = a%/b by smt(). + +lemma divz_div a b c: + 0 < b => 0 < c => a %/ b %/ c = a %/ (b * c). +proof. +move=> *. +apply (divzU _ _ _ (b * ((a %/ b) %%c) + a %% b)). + split; smt(). smt(). +qed. + +lemma modz_minus x d: + (d <= x < 2 * d)%Int => x %% d = x - d by smt(). + +lemma iota_split len2 n len: + 0 <= len2 <= len => iota_ n len = iota_ n len2 ++ iota_ (n+len2) (len-len2). +proof. +move=> H. have E: len = len2 + (len - len2) by smt(). +by rewrite {1} E iota_add // /#. +qed. + +(* different views on datatypes *) +lemma of_int2u64 i0 i1: + pack2 [ W64.of_int i0; W64.of_int i1] = W128.of_int (i0 %% W64.modulus + W64.modulus * i1). +proof. +rewrite W2u64.of_uint_pack2 -iotaredE /=; congr; congr => />. split. + apply W64.word_modeqP; congr. + by rewrite !of_uintK mulzC modzMDr !modz_mod. +rewrite mulzC divzMDr //. +have ->:i0 %% 18446744073709551616 %/ 18446744073709551616 = 0 by smt(modz_cmp divz_eq0). +by rewrite !of_intE modz_mod. +qed. + +lemma to_uint_unpack2u64 w: + W128.to_uint w = val_digits W64.modulus (map W64.to_uint (W2u64.to_list w)). +proof. +have [? /= ?]:= W128.to_uint_cmp w. +rewrite /val_digits /=. +do 2! (rewrite bits64_div 1:// /=). +rewrite !of_uintK /=. +have P: forall x, x = x %% 18446744073709551616 + 18446744073709551616 * (x %/ 18446744073709551616). + by move=> x; rewrite {1}(divz_eq x 18446744073709551616) /=; ring. +rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /=. +by ring; smt(). +qed. + +lemma to_uint2u64 w0 w1: + to_uint (W2u64.pack2 [w0; w1]) = to_uint w0 + W64.modulus * to_uint w1. +proof. by rewrite to_uint_unpack2u64. qed. + +lemma to_uint_unpack4u32 w: + W128.to_uint w = val_digits W32.modulus (map W32.to_uint (W4u32.to_list w)). +proof. +have [? /= ?]:= W128.to_uint_cmp w. +rewrite /val_digits /=. +do 4! (rewrite bits32_div 1:// /=). +rewrite !of_uintK /=. +have P: forall x, x = x %% 4294967296 + 4294967296 * (x %/ 4294967296). + by move=> x; rewrite {1}(divz_eq x 4294967296) /=; ring. +rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 4294967296)) divz_div 1..2:/# + {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 79228162514264337593543950336)) divz_div 1..2:/# /=. +by ring; smt(). +qed. + +lemma to_uint_unpack16u8 w: + W128.to_uint w = val_digits W8.modulus (map W8.to_uint (W16u8.to_list w)). +proof. +have [? /= ?]:= W128.to_uint_cmp w. +rewrite /val_digits /=. +do 16! (rewrite bits8_div 1:// /=). +have P: forall x, x = x %% 256 + 256 * (x %/ 256). + by move=> x; rewrite {1}(divz_eq x W8.modulus) /=; ring. +rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 256)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 65536)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 16777216)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 4294967296)) divz_div 1..2:/# /=. +rewrite {1}(P (to_uint w %/ 1099511627776)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 281474976710656)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 72057594037927936)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /=. +rewrite {1}(P (to_uint w %/ 4722366482869645213696)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 1208925819614629174706176)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 309485009821345068724781056)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 79228162514264337593543950336)) divz_div 1..2:/# /=. +rewrite {1}(P (to_uint w %/ 20282409603651670423947251286016)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 5192296858534827628530496329220096)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 1329227995784915872903807060280344576)) divz_div 1..2:/# /=. +ring; smt(). +qed. + +lemma to_uint_unpack8u8 w: + W64.to_uint w = val_digits W8.modulus (map W8.to_uint (W8u8.to_list w)). +proof. +have [? /= ?]:= W64.to_uint_cmp w. +rewrite /val_digits /=. +do 8! (rewrite bits8_div 1:// /=). +have P: forall x, x = x %% 256 + 256 * (x %/ 256). + by move=> x; rewrite {1}(divz_eq x 256) /=; ring. +rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 256)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 65536)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 16777216)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 4294967296)) divz_div 1..2:/# /=. +rewrite {1}(P (to_uint w %/ 1099511627776)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 281474976710656)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 72057594037927936)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /=. +ring; smt(). +qed. + +lemma pack8u8_init_mkseq f: + pack8_t (init f)%W8u8.Pack = pack8 (mkseq f 8). +proof. by rewrite W8u8.Pack.init_of_list. qed. + +lemma load8u8' mem p: + loadW64 mem p = pack8 (mkseq (fun i => mem.[p+i]) 8). +proof. +rewrite /mkseq /= /loadW64 -iotaredE; congr => />. + rewrite W8u8.Pack.init_of_list -iotaredE. by congr => />. + qed. + +lemma to_uint_unpack4u64 w: + W256.to_uint w = val_digits W64.modulus (map W64.to_uint (W4u64.to_list w)). +proof. + have [? /= ?]:= W256.to_uint_cmp w. + rewrite /val_digits /=. + do 4! (rewrite bits64_div 1:// /=). + rewrite !of_uintK /=. + have P: forall x, x = x %% 18446744073709551616 + 18446744073709551616 * (x %/ 18446744073709551616). + by move=> x; rewrite {1}(divz_eq x 18446744073709551616) /=; ring. + rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /= + {1}(P (to_uint w)) {1}(P (to_uint w %/ 340282366920938463463374607431768211456)) divz_div 1..2:/# /= + {1}(P (to_uint w)) {1}(P (to_uint w %/ 6277101735386680763835789423207666416102355444464034512896)) divz_div 1..2:/# /=. + by ring; smt(). +qed. + +lemma to_uint_unpack32u8 w: + W256.to_uint w = val_digits W8.modulus (map W8.to_uint (W32u8.to_list w)). + proof. + have [? /= ?]:= W256.to_uint_cmp w. + rewrite /val_digits /=. + do 32! (rewrite bits8_div 1:// /=). + have P: forall x, x = x %% 256 + 256 * (x %/ 256). + by move=> x; rewrite {1}(divz_eq x W8.modulus) /=; ring. + rewrite {1}(P (to_uint w)) {1}(P (to_uint w %/ 256)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 65536)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 16777216)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 4294967296)) divz_div 1..2:/# /=. + rewrite {1}(P (to_uint w %/ 1099511627776)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 281474976710656)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 72057594037927936)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 18446744073709551616)) divz_div 1..2:/# /=. + rewrite {1}(P (to_uint w %/ 4722366482869645213696)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 1208925819614629174706176)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 309485009821345068724781056)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 79228162514264337593543950336)) divz_div 1..2:/# /=. + rewrite {1}(P (to_uint w %/ 20282409603651670423947251286016)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 5192296858534827628530496329220096)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 1329227995784915872903807060280344576)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 340282366920938463463374607431768211456)) divz_div 1..2:/# /=. + rewrite {1}(P (to_uint w %/ 87112285931760246646623899502532662132736)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 22300745198530623141535718272648361505980416)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 5708990770823839524233143877797980545530986496)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 1461501637330902918203684832716283019655932542976)) divz_div 1..2:/# /=. + rewrite {1}(P (to_uint w %/ 374144419156711147060143317175368453031918731001856)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 95780971304118053647396689196894323976171195136475136)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 24519928653854221733733552434404946937899825954937634816)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 6277101735386680763835789423207666416102355444464034512896)) divz_div 1..2:/# /=. + rewrite {1}(P (to_uint w %/ 1606938044258990275541962092341162602522202993782792835301376)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 411376139330301510538742295639337626245683966408394965837152256)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 105312291668557186697918027683670432318895095400549111254310977536)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 26959946667150639794667015087019630673637144422540572481103610249216)) divz_div 1..2:/# /=. + rewrite {1}(P (to_uint w %/ 6901746346790563787434755862277025452451108972170386555162524223799296)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 1766847064778384329583297500742918515827483896875618958121606201292619776)) divz_div 1..2:/# /= + {1}(P (to_uint w %/ 452312848583266388373324160190187140051835877600158453279131187530910662656)) divz_div 1..2:/# /=. + ring; smt(). + qed. diff --git a/proof/crypto_scalarmult/curve25519/W64limbs.ec b/proof/crypto_scalarmult/curve25519/amd64/common/W64limbs.ec similarity index 91% rename from proof/crypto_scalarmult/curve25519/W64limbs.ec rename to proof/crypto_scalarmult/curve25519/amd64/common/W64limbs.ec index 64ea80f..43abe7d 100644 --- a/proof/crypto_scalarmult/curve25519/W64limbs.ec +++ b/proof/crypto_scalarmult/curve25519/amd64/common/W64limbs.ec @@ -1,9 +1,9 @@ -require import Int IntDiv IntExtra StdOrder. +require import AllCore StdRing IntDiv StdOrder. require import List. -from Jasmin require import JUtils JWord. +from Jasmin require import JModel. -import IntOrder. +import IntOrder Ring.IntID. abbrev MAX x y = if (x < y)%Int then y else x. @@ -25,9 +25,9 @@ lemma val_digits_cat base x y: val_digits base (x++y) = val_digits base x + base^(size x) * val_digits base y. proof. elim: x => //= x xs IH /=. -rewrite !val_digits_cons IH -pow_add //. +rewrite !val_digits_cons IH (addrC (1)) exprSr. by apply size_ge0. -by rewrite pow1; ring. +by ring. qed. op add_digits (x y: int list) : int list = @@ -53,13 +53,10 @@ by rewrite !val_digits_cons IH; ring. qed. op mul1_digits (x:int) (y: int list): int list = map (fun h => x*h) y. -(* axiomatized by mul1_digitsE.*) - -(*hint simplify mul1_digitsE.*) lemma mul1_digitsCE x y: mul1_digits x y = List.map (transpose Int.( * ) x) y. -proof. by elim: y => //= y ys IH; split => //=; ring. qed. +proof. by elim: y => //= y ys IH @/mul1_digits /=; split; 1: ring. qed. lemma mul1_digitsP x y base: val_digits base (mul1_digits x y) = x * val_digits base y. @@ -107,7 +104,7 @@ proof. rewrite /ubW64; smt(W64.to_uint_cmp). qed. lemma ubW640 (x: W64.t): ubW64 0 x <=> x=W64.zero. proof. -rewrite /ubW64; split => *. +rewrite /ubW64; split => H. have <-: W64.of_int (to_uint x) = W64.of_int 0. congr; by smt(W64.to_uint_cmp). by rewrite to_uintK. @@ -123,7 +120,7 @@ lemma ubW64W n1 n2 (x: W64.t): ubW64 n1 x => ubW64 n2 x. proof. -rewrite /ubW64 => *. +rewrite /ubW64 => H H0. by apply (lez_trans _ _ _ H0 H). qed. @@ -137,7 +134,7 @@ lemma ubW64M nx ny (x y: W64.t): ubW64 nx x => ubW64 ny y => ubW64 (nx*ny) (x*y). -proof. rewrite /ubW64 to_uintM; smt (le_modz W64.to_uint_cmp). qed. +proof. rewrite /ubW64 to_uintM; smt(ler_pmul le_modz W64.to_uint_cmp). qed. lemma ubW64Mhi nx ny (x y: W64.t): ubW64 nx x => @@ -148,7 +145,7 @@ rewrite /ubW64 /mulhi => *. rewrite of_uintK modz_small. have ?: to_uint x * to_uint y %/ W64.modulus < W64.modulus. have := (divz_cmp W64.modulus (to_uint x * to_uint y) W64.modulus _ _); first smt(). - split; move=> *. + split; move=> *. smt (mulr_ge0 W64.to_uint_cmp). by apply ltr_pmul; have := W64.to_uint_cmp; smt(). by move=> [? ?]. @@ -166,7 +163,7 @@ lemma ubW64_mulhi0 nx ny x y: nx * ny < W64.modulus => to_uint (mulhi x y) = 0. proof. -move=> ???. +move=> H H0 ?. move: (ubW64Mhi _ _ _ _ H H0). rewrite /mulhi divz_small. apply bound_abs; split => //. @@ -179,7 +176,7 @@ lemma ubW64shr k n (x: W64.t): ubW64 n x => ubW64 (n %/ 2^k) (x `>>>` k). proof. -move=> *; +move=> ? H0; rewrite /ubW64 to_uint_shr //. apply leq_div2r. by apply H0. @@ -203,7 +200,7 @@ lemma ubW64andlsb k n (x: W64.t): ubW64 n x => ubW64 (2^k -1) (x `&` W64.masklsb k). proof. -move=> ?; rewrite /ubW64 to_uint_and_mod // => ?. +move=> ?; rewrite /ubW64 to_uint_and_mod 1:/# => ?. have := modz_cmp (to_uint x) (2^k) _. by apply gt0_pow2. smt(). @@ -278,7 +275,7 @@ proof. move=> ? /bW64P [? ?]; rewrite bW64ub 1:/#. apply (ubW64W (2 ^ n1 - 1)) => //. apply ler_sub => //. -by apply pow_Mle. +apply ler_weexpn2l => //. qed. lemma bW64D nx ny (x y: W64.t): @@ -286,16 +283,16 @@ lemma bW64D nx ny (x y: W64.t): bW64 ny y => bW64 (max nx ny + 1) (x+y). proof. -move=> /bW64P [? ?] /bW64P [? ?]; rewrite bW64ub 1:/#. +move=> /bW64P [H H0] /bW64P [H1 H2]; rewrite bW64ub 1:/#. have T := (ubW64D _ _ _ _ H0 H2). apply (ubW64W _ _ _ _ T). apply (lez_trans (2^(max nx ny + 1)-1)). - rewrite powS 1:/#. + rewrite exprSr 1:/#. case: (nx <= ny) => ?. - apply (ler_trans (2 ^ ny - 1 + (2 ^ ny - 1))); first smt(pow_Mle). - smt (max_is_ub). - smt(pow_Mle). -smt(pow_Mle). + apply (ler_trans (2 ^ ny - 1 + (2 ^ ny - 1))); 1: smt(ler_weexpn2l). + smt(ler_weexpn2l). + smt(ler_weexpn2l). +smt(ler_weexpn2l). qed. lemma bW64DW nx ny (x y: W64.t) n: @@ -313,12 +310,12 @@ lemma bW64M nx ny (x y: W64.t): bW64 ny y => bW64 (nx+ny) (x*y). proof. -move=> /bW64P [? ?] /bW64P [? ?]. +move=> /bW64P [H H0] /bW64P [H1 H2]. rewrite bW64ub 1:/#. have T := (ubW64M _ _ _ _ H0 H2). apply (ubW64W _ _ _ _ T); clear T. have ->: (2 ^ nx - 1) * (2 ^ ny - 1) = 2 ^ (nx+ny) - 2 ^ nx - 2 ^ ny + 1. - by rewrite -pow_add //; ring. ++ by rewrite exprD_nneg //;ring. smt(gt0_pow2). qed. @@ -327,19 +324,19 @@ lemma bW64mask n x: bW64 n (andw x (W64.masklsb n)). proof. rewrite bW64E /mask => /> *. -rewrite to_uint_and_mod //. -have HH: 0 < 2^n by apply powPos. -by move: (modz_cmp (to_uint x) (2^n) HH); progress. +rewrite to_uint_and_mod 1:/#. +have HH: 0 < 2^n by apply gt0_pow2. +by move: (modz_cmp (to_uint x) (2^n) HH); smt(). qed. -lemma nosmt bW64andmaskE n w: +lemma bW64andmaskE n w: 0 <= n => bW64 n w <=> w = w `&` W64.masklsb n. proof. move=> *; split; last by move=> ->; apply bW64mask. rewrite bW64ub // /ubW64 => ?. apply W64.word_modeqP; rewrite !modz_small; first 2 by apply bound_abs; apply W64.to_uint_cmp. -rewrite to_uint_and_mod // modz_small //. +rewrite to_uint_and_mod 1:/# modz_small //. by apply bound_abs; smt(W64.to_uint_cmp). qed. @@ -351,13 +348,13 @@ proof. by move => ?; apply (bW64W 64) => //; apply bW64T. qed. lemma bW64const n (c: int) : 0 <= n => 0 <= c < 2^n => bW64 n (W64.of_int c). proof. -move=> ? [??]; rewrite bW64E. +move=> H [??]; rewrite bW64E. rewrite of_uintK. case: (c < W64.modulus) => *. rewrite modz_small; first by apply bound_abs; split => // /#. smt(). rewrite H /=. -have [? ?]:= modz_cmp c W64.modulus _; first by apply W64.gt0_modulus. +have [? H4]:= modz_cmp c W64.modulus _; first by apply W64.gt0_modulus. have ?: W64.modulus < 2^n. apply (ler_lt_trans c); by rewrite // lezNgt. by apply (ltz_trans _ _ _ H4). @@ -389,12 +386,12 @@ move=> /> ???. case: (n < 64); last first. move=> ??; apply (bW64W 64); first smt (). by apply bW64T. -rewrite !bW64E => /> *. +rewrite !bW64E => /> ??H4 ?. rewrite to_uint_shr. by rewrite of_uintK; smt(modz_small). have ->: (to_uint ((of_int k))%W8 %% 64) = k by rewrite of_uintK; smt(modz_small). -move: H4;rewrite -pow_add //= => *. +move: H4;rewrite exprD_nneg //= => *. have := (divz_cmp (2^k) (to_uint x) (2^n) _ _); first by apply gt0_pow2. smt(W64.to_uint_cmp). smt(). @@ -411,17 +408,20 @@ move=> /> ???. case: (n < 64); last first. move=> ??; apply (bW64W 64); first smt (). by apply bW64T. -rewrite !bW64E => /> *. +rewrite !bW64E => /> H4. rewrite to_uint_shl. by rewrite of_uintK; smt(modz_small). have ->: (to_uint ((of_int k))%W8 %% 64) = k by rewrite of_uintK; smt(modz_small). move: H4; rewrite -(ltr_pmul2r (2^k)); first by apply gt0_pow2. -rewrite pow_add // Ring.IntID.subrK => *. -have ? := pow_Mle n 64 _; first by smt(). +rewrite ltr_pmul2r 1:gt0_pow2 => H3 ?. +rewrite exprD_subz 1,2: /# ltz_divRL; first by apply gt0_pow2. ++ apply dvdz_exp2l; 1: by smt(). +move => H4. rewrite modz_small //. apply bound_abs; split => *; first smt(divr_ge0 W64.to_uint_cmp gt0_pow2). -by apply (ltr_le_trans _ _ _ H4); apply pow_Mle; smt(). +apply (ltr_le_trans (2 ^ n) _ _ H4). +apply ler_weexpn2l; smt(). qed. lemma bW64_shlw n k x: @@ -448,8 +448,8 @@ proof. rewrite !bW64E => /> *; apply W64.to_uintM_small. move: (W64.to_uint_cmp x) (W64.to_uint_cmp y) => *. apply (StdOrder.IntOrder.ltr_le_trans (2^nx * 2^ny)). - smt(StdOrder.IntOrder.ltr_pmul2l StdOrder.IntOrder.ltr_pmul2r). -rewrite pow_add //; apply pow_Mle. +apply ltr_pmul; smt(W64.to_uint_cmp). +rewrite -exprD_nneg //; apply ler_weexpn2l => //. smt(). qed. @@ -461,6 +461,7 @@ qed. **********************************************************************************************) abbrev digits64 = List.map W64.to_uint. +abbrev digits8 = List.map W8.to_uint. abbrev val_limbs base l = val_digits base (digits64 l). abbrev bW64_limbs w = List.all (bW64 w). @@ -496,9 +497,9 @@ lemma size_add_limbs x y: size (add_limbs x y) = max (size x) (size y). proof. elim: x y => //=. - by move=> y; rewrite add_limbs_nill max_ler //; apply size_ge0. + move=> y; rewrite add_limbs_nill ler_maxr //; apply size_ge0. move=> x xs IH; elim => //=. - by rewrite max_lel; smt(size_ge0). + by rewrite ler_maxl; smt(size_ge0). move=> y IH2; rewrite IH /#. qed. @@ -573,7 +574,7 @@ move=> H; elim: x y => //= x xs IH; elim => /=. apply (bW64W_limbs 0); first smt(size_ge0 bW64_pos). by apply bW64M_limbs_nilr. move=> y ys IH2 [??] [??]; split. - smt(@BW64 size_ge0). + smt(BW64.bW64W' BW64.bW64MW size_ge0). apply (bW64W_limbs (max (nx+ny) (nx+ny+size xs) + 1)); first smt(size_ge0). apply bW64D_limbs. by rewrite -(mul1_limbsE x); apply bW64M1_limbs. @@ -611,6 +612,9 @@ op nth_digits (x: int list) (n: int) : int = nth 0 x n. abbrev val_digits64 = val_digits (2^64). abbrev val_limbs64 x = val_digits64 (digits64 x). +abbrev val_digits8 = val_digits (2^8). +abbrev val_limbs8 x = val_digits8 (digits8 x). + lemma val_limbs64_cons x xs: val_limbs64 (x::xs) = to_uint x + 2^64 * val_limbs64 xs. proof. by rewrite /= val_digits_cons. qed. @@ -647,12 +651,12 @@ lemma carryprop_limbs64P x c: proof. elim: x c => //= x xs IH c. have := (IH (carry_add W64.zero x c)). -case: (carryprop_limbs64 xs (carry_add W64.zero x c)) => ?? /=. +case: (carryprop_limbs64 xs (carry_add W64.zero x c)) => x1 x2 /=. rewrite !val_digits_cons. have := W64.addcP W64.zero x c. rewrite addcE /= => E1 E2. have ->: 2 ^ (64 * (1 + size xs)) * b2i x1 = 2^64*2^(64*size xs)*b2i x1. - by rewrite mulzDr pow_add //; smt(size_ge0). + rewrite mulzDr exprD_nneg //; smt(size_ge0). smt(). qed. @@ -674,7 +678,7 @@ by move: (W64.addcP W64.zero x c); rewrite addcE /#. qed. *) -lemma nosmt carryprop_limbs64_ncP bx x c: +lemma carryprop_limbs64_ncP bx x c: 0 < size x => 1 + bx < W64.modulus => ubW64 bx (nth_limbs64 x (size x - 1)) => @@ -682,14 +686,14 @@ lemma nosmt carryprop_limbs64_ncP bx x c: /\ ubW64 (bx+1) (nth_limbs64 (carryprop_limbs64 x c).`2 (size x - 1)). proof. elim: x c => //= x xs IH c H0 /=. -rewrite /nth_limbs64 /nth_digits /=; case: (size xs = 0) => /= E ?. +rewrite /nth_limbs64 /nth_digits /=; case: (size xs = 0) => /= E H. rewrite size_eq0 in E; rewrite E carryprop_limbs64_nil /val_digits /=. - move=> ?; split; first by rewrite to_uintD_small of_uintK; smt(modz_small). + move=> H1; split; first by rewrite to_uintD_small of_uintK; smt(modz_small). apply (ubW64D _ _ _ (W64.of_int (b2i c)) H1). by apply ubW641. -have E2 ? : 0 < size xs by smt(size_ge0). +have E2 H1 : 0 < size xs by smt(size_ge0). have := IH (carry_add W64.zero x c) E2 H H1. -case: (carryprop_limbs64 _ _) => /= ?[??]. +case: (carryprop_limbs64 _ _) => /= ? [H2?]. rewrite !val_digits_cons H2 E /=. split; last by []. by move: (W64.addcP W64.zero x c); rewrite addcE /#. @@ -724,7 +728,7 @@ move=> x xs IH; elim => //= [|y ys IH2] c. rewrite (IH ys (addc x y c).`1); smt(). qed. -lemma nosmt add_limbs64P x y c: +lemma add_limbs64P x y c: let (c',z) = add_limbs64 x y c in val_limbs64 z + 2^(64*MAX (size x) (size y)) * b2i c' = val_limbs64 x + val_limbs64 y + b2i c. proof. @@ -739,16 +743,16 @@ elim => /=. move: (carryprop_limbs64P xs (carry_add W64.zero x c)). rewrite (Core.pairS (carryprop_limbs64 _ _)) //=. move: (W64.addcP W64.zero x c); rewrite addcE /= => E1 E2. - by rewrite !val_digits_cons mulzDr -pow_add; smt(size_ge0). + by rewrite !val_digits_cons mulzDr exprD_nneg; smt(size_ge0). move=> y ys IH2 c; rewrite !val_digits_cons !addcE /=. have ->: MAX (1 + size xs) (1 + size ys) = 1 + MAX (size xs) (size ys) by smt(size_ge0). -rewrite mulzDr -pow_add /=; first 2 smt(size_ge0). -move: (IH ys (carry_add x y c)); rewrite (Core.pairS (add_limbs64 _ _ _ )) //=. +rewrite mulzDr exprD_nneg /=; first 2 smt(size_ge0). +move: (IH ys (carry_add x y c)); rewrite (Core.pairS (add_limbs64 _ _ _ )) //=. case: (add_limbs64 xs ys (carry_add x y c)) => /= ?? E. by move: (W64.addcP x y c); rewrite addcE /= => ?; smt(). qed. -lemma nosmt add_limbs64nc_aux nx ny x y c: +lemma add_limbs64nc_aux nx ny x y c: 0 < MAX (size x) (size y) => 1 + nx + ny < W64.modulus => ubW64 nx (nth_limbs64 x (MAX (size x) (size y) - 1)) => @@ -786,7 +790,7 @@ lemma size_add_limbs64nc x y: size (add_limbs64nc x y) = MAX (size x) (size y). proof. by rewrite /add_limbs64nc; apply (size_add_limbs64 x y false). qed. -lemma nosmt add_limbs64ncP nx ny x y: +lemma add_limbs64ncP nx ny x y: 0 < MAX (size x) (size y) => 1 + nx + ny < W64.modulus => ubW64 nx (nth_limbs64 x (MAX (size x) (size y) - 1)) => @@ -794,7 +798,7 @@ lemma nosmt add_limbs64ncP nx ny x y: val_limbs64 (add_limbs64nc x y) = val_limbs64 x + val_limbs64 y. proof. by move=> *; have [? _] := add_limbs64nc_aux nx ny x y false _ _ _ _. qed. -lemma nosmt add_limbs64ncP' nx ny x y: +lemma add_limbs64ncP' nx ny x y: 0 < MAX (size x) (size y) => 1 + nx + ny < W64.modulus => ubW64 nx (nth_limbs64 x (MAX (size x) (size y) - 1)) => @@ -818,8 +822,8 @@ elim: x y => //= x xs IH ys. rewrite !val_digits_cons !add_digitsP !val_digits_cons /=. have ->: (to_uint x + 2^64 * val_limbs64 xs) * val_limbs64 ys = to_uint x * val_limbs64 ys + 2^64 * (val_limbs64 xs * val_limbs64 ys) by ring. -rewrite -IH; congr. -by rewrite mul1_digitsP. +rewrite -IH; congr => //. +rewrite mul1_digitsP /#. qed. diff --git a/proof/crypto_scalarmult/curve25519/amd64/common/Zp_25519.ec b/proof/crypto_scalarmult/curve25519/amd64/common/Zp_25519.ec new file mode 100644 index 0000000..31eb38a --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/common/Zp_25519.ec @@ -0,0 +1,209 @@ +require import List Int IntDiv Ring CoreMap StdOrder. +require import EClib W64limbs Array4 Array5 Array32. + +from Jasmin require import JModel. +import Ring.IntID IntOrder. + +(* modular operations modulo P *) +op p = 2^255 - 19 axiomatized by pE. + +lemma two_pow255E: 2^255 = 57896044618658097711785492504343953926634992332820282019728792003956564819968 by done. + +(* Embedding into ring theory *) +lemma pVal: p = 57896044618658097711785492504343953926634992332820282019728792003956564819949 by smt(pE two_pow255E). + +require ZModP. + +clone import ZModP.ZModRing as Zp with + op p <- p + rename "zmod" as "zp" + proof ge2_p by rewrite pE. + + +(* congruence "mod p" *) + +lemma zpcgr_over a b: + Zp.zpcgr (a + 57896044618658097711785492504343953926634992332820282019728792003956564819968 * b) (a + 19 * b). +proof. +have /= ->: (2^ 255) = 19 + p by rewrite pE. +by rewrite (mulzC _ b) mulzDr addzA modzMDr mulzC. +qed. + +lemma inzp_over x: + Zp.inzp (57896044618658097711785492504343953926634992332820282019728792003956564819968 * x) = Zp.inzp (19*x). +proof. by have /= := zpcgr_over 0 x; rewrite -eq_inzp. qed. + +lemma zp_over_lt2p_red x: + p <= x < 2*p => + x %% p = (x + 19) %% 2^255. +proof. +move=> *. +rewrite modz_minus. split; smt(). +have ->: x-p = x+19-2^255. + by rewrite pE. +rewrite modz_minus. split. +apply (lez_trans (p+19) (2^255) (x+19)). +rewrite pE. trivial. smt(). + move => *. apply (ltz_trans (2*p+19) (x+19) (2*2^255)). smt(). +simplify. rewrite pE; trivial. +smt(). +qed. + + +lemma twop255_cgr : 2^255 %% p = 19 by smt(powS_minus pow2_256). +lemma twop256_cgr : 2^256 %% p = 38 by smt(powS_minus pow2_256). +lemma twop256_cgr2 : 2^256 * 2 %% p = 76 by smt(powS_minus pow2_256). + +lemma ltP_overflow x: + (x + 2^255 + 19 < 2^256) = (x < p). +proof. +have ->: 2^255 = p + 19 by rewrite pE /#. +have ->: 2^256 = p + p + 19 + 19 by rewrite !pE /#. +smt(). +qed. + +op red x = if x + 2^255 + 19 < 2^256 then x else (x + 2^255 + 19) %% 2^256. + +lemma redE x: + p <= x < 2^256 => + (x + 2^255 + 19) %% 2^256 = x - p. +proof. + move=> [H1 H2]. + pose y := x-p. + rewrite (_: x= y+p) 1:/# (_:2^255 = p+19) 1:pE 1:/#. + rewrite -addrA -addrA (_:p + (p + 19 + 19) = 2^256) 1:pE 1:/#. + rewrite modzDr modz_small; last reflexivity. + apply bound_abs. + move: H2; have ->: 2^256 = p + p + 19 + 19 by rewrite !pE /#. + smt(pVal). +qed. + +lemma redP x: + 0 <= x < 2^256 => + x %% p = red (red x). +proof. + move=> [H1 H2]. + rewrite /red !ltP_overflow. + case: (x < p) => Hx1. + rewrite Hx1 /= modz_small; last done. + by apply bound_abs => /#. + rewrite redE. + split => *; [smt() | assumption]. + case: (x - p < p) => Hx2. + rewrite {1}(_: x = x - p + p) 1:/# modzDr modz_small; last reflexivity. + by apply bound_abs => /#. + rewrite redE. + split => *; first smt(). + rewrite (_:W256.modulus = W256.modulus-0) 1:/#. + apply (ltr_le_sub); first assumption. + smt(pVal). + rewrite (_: x = x - p - p + p + p) 1:/#. + rewrite modzDr modzDr modz_small. + apply bound_abs; split => *; first smt(). + move: H2; have ->: 2^256 = p + p + 19 + 19 by rewrite !pE /#. + smt(pVal). + smt(). +qed. + +op bezout_coef256 (x : int) : int * int = (x %/ W256.modulus, x %% W256.modulus). + +op red256 (x: int) : int = + (bezout_coef256 x).`2 + 38 * (bezout_coef256 x).`1. + +lemma red256P x: Zp.zpcgr x (red256 x). +proof. + by rewrite {1}(divz_eq x (2^256)) -modzDml -modzMmr twop256_cgr + modzDml /red256 /split256 /= addrC mulrC. +qed. + + +lemma red256_bnd B x: + 0 <= x < W256.modulus * B => + 0 <= red256 x < W256.modulus + 38*B. +proof. + move=> [Hx1 Hx2]; rewrite /red256 /bezout_coef256 /=; split => *. + apply addz_ge0; first smt(modz_cmp). + apply mulr_ge0; first done. + apply divz_ge0; smt(). + have H1: x %/ W256.modulus < B by smt(pow2_256). + have H2: x %% W256.modulus < W256.modulus by smt(modz_cmp). + smt(pow2_256). +qed. + +lemma red256_once x: + 0 <= x < W256.modulus * W256.modulus => + 0 <= red256 x < W256.modulus*39. +proof. + have ->: W256.modulus*39 = W256.modulus + 38*W256.modulus by ring. + exact red256_bnd. +qed. + +lemma red256_twice x: + 0 <= x < W256.modulus*W256.modulus => + 0 <= red256 (red256 x) < W256.modulus*2. +proof. + move=> Hx; split => *. + smt(red256_once). + move: (red256_once x Hx). + move => Hy. + move: (red256_bnd 39 _ Hy); smt(). +qed. + +lemma red256_twiceP x a b: + 0 <= x < W256.modulus*W256.modulus => + (a,b) = bezout_coef256 (red256 (red256 x)) => + (* 0 <= a < 2 /\ (a=0 \/ b <= 38*38).*) + a=0 \/ a=1 /\ b <= 38*38. +proof. + move=> Hx Hab. + have Ha: 0 <= a < 2. + have H := (red256_twice x Hx). + move: Hab; rewrite /split256. + move => [-> _]. smt(pow2_256). + case: (a=0) => Ea /=; first done. + have {Ea} Ea: a=1 by smt(). + rewrite Ea /=. + move: Hab; pose y := red256 x. + rewrite /red256 /bezout_coef256 /=. + pose yL := y%%W256.modulus. + pose yH := y%/W256.modulus. + have Hy := red256_once x Hx. + have HyH : 0 <= yH <= 38 by smt(). + move => [Hab1 Hab2]. + have E: W256.modulus + b = yL + 38 * yH. + by move: (divz_eq (yL + 38 * yH) W256.modulus); smt(pow2_256). + smt(modz_cmp). +qed. + +lemma red256_thrice x: + 0 <= x < W256.modulus*W256.modulus => + 0 <= red256 (red256 (red256 x)) < W256.modulus. +proof. + move=> Hx; pose y:= red256 (red256 x). + rewrite /red256. + have := (red256_twiceP x (bezout_coef256 y).`1 (bezout_coef256 y).`2 _ _). + smt(pow2_256). + smt(red256_twice). + move=> [->|[-> H2]] /=. + rewrite /bezout_coef256; smt(modz_cmp). + split. + rewrite /bezout_coef256; smt(modz_cmp). + smt(). +qed. + +op reduce x = red256 (red256 (red256 x)). + +lemma reduceP x: + 0 <= x < W256.modulus * W256.modulus => + Zp.zpcgr x (reduce x) /\ 0 <= reduce x < W256.modulus. +proof. + rewrite /reduce => H; split; first smt(red256P). + smt(pow2_256 red256_thrice). +qed. + +lemma expE (z : zp) (e1 e2 : int) : 0 <= e1 /\ 0 <= e2 => + ZModpRing.exp (ZModpRing.exp z e1) e2 = + ZModpRing.exp z (e1*e2). +proof. + rewrite -ZModpRing.exprM => />. +qed. diff --git a/proof/crypto_scalarmult/curve25519/amd64/common/Zp_limbs.ec b/proof/crypto_scalarmult/curve25519/amd64/common/Zp_limbs.ec new file mode 100644 index 0000000..3115c41 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/common/Zp_limbs.ec @@ -0,0 +1,191 @@ +require import List Int IntDiv Ring CoreMap StdOrder. +require import Zp_25519 EClib W64limbs Array4 Array32. + +from Jasmin require import JModel JWord. + +import Zp EClib Ring.IntID Array4 Array32 IntOrder JWord.W8 JWord.W64. + + +op inzp_limbs base l = inzp (val_limbs base l). + +type Rep4 = W64.t Array4.t. +type Rep32 = W8.t Array32.t. + +op valRep4 (x : Rep4) : int = val_limbs64 (Array4.to_list x) axiomatized by valRep4E. +op valRep4List (x : W64.t list) : int = val_limbs64 x axiomatized by valRep4ListE. +op inzpRep4 (x : Rep4) : zp = inzp (valRep4 x) axiomatized by inzpRep4E. +op inzpRep4List (x: W64.t list) : zp = inzp (valRep4List x) axiomatized by inzpRep4ListE. + +op valRep32List (x : W8.t list) : int = val_limbs8 x axiomatized by valRep32ListE. +op valRep32 (x : Rep32) : int = val_limbs8 (Array32.to_list x) axiomatized by valRep32E. +op inzpRep32 (x : Rep32) : zp = inzp (valRep32 x) axiomatized by inzpRep32E. +op inzpRep32List (x : W8.t list) : zp = inzp (valRep32List x) axiomatized by inzpRep32ListE. + +lemma val_limbs64_div2255 x0 x1 x2 x3: + val_limbs64 [x0; x1; x2; x3] %/ 2^255 = to_uint x3 %/ 9223372036854775808. +proof. + rewrite /val_digits /=. + have := (divz_eq (to_uint x3) 9223372036854775808). + rewrite addzC mulzC => {1}->. + rewrite !mulzDr -!mulzA /=. + have /= ? := W64.to_uint_cmp x0. + have /= ? := W64.to_uint_cmp x1. + have /= ? := W64.to_uint_cmp x2. + have /= ? := W64.to_uint_cmp x3. + have ? : 0 <= to_uint x3 %% 9223372036854775808 < 9223372036854775808 by smt(). + rewrite !addzA (mulzC 57896044618658097711785492504343953926634992332820282019728792003956564819968) divzMDr //. + have ->: (to_uint x0 + 18446744073709551616 * to_uint x1 + + 340282366920938463463374607431768211456 * to_uint x2 + + 6277101735386680763835789423207666416102355444464034512896 * (to_uint x3 %% 9223372036854775808)) %/ + 57896044618658097711785492504343953926634992332820282019728792003956564819968 = 0. + by rewrite -divz_eq0 /#. + by ring. +qed. + + +lemma val_limbs64_div2256 x0 x1 x2 x3: + val_limbs64 [x0; x1; x2; x3] %/ 2^256 = to_uint x3 %/ 2^64. +proof. + rewrite /val_digits /=. + have := (divz_eq (to_uint x3) 18446744073709551616). + rewrite addzC mulzC => {1}->. + rewrite !mulzDr -!mulzA /=. + have /= ? := W64.to_uint_cmp x0. + have /= ? := W64.to_uint_cmp x1. + have /= ? := W64.to_uint_cmp x2. + have /= ? := W64.to_uint_cmp x3. + have ? : 0 <= to_uint x3 %% 18446744073709551616 < 18446744073709551616 by smt(). + rewrite !addzA (mulzC 115792089237316195423570985008687907853269984665640564039457584007913129639936) divzMDr //. + have ->: (to_uint x0 + 18446744073709551616 * to_uint x1 + + 340282366920938463463374607431768211456 * to_uint x2 + + 6277101735386680763835789423207666416102355444464034512896 * (to_uint x3 %% W64.modulus)) %/ + 115792089237316195423570985008687907853269984665640564039457584007913129639936 = 0. + by rewrite -divz_eq0 /#. + by ring. +qed. + + +op valid_ptr(p : int, o : int) = 0 <= o => 0 <= p /\ p + o < W64.modulus. + +op load_array4 (m : global_mem_t, p : address) : W64.t list = + [loadW64 m p; loadW64 m (p+8); loadW64 m (p+16); loadW64 m (p+24)]. + +op load_array32(m : global_mem_t, p : address) : W8.t Array32.t = + Array32.init (fun i => m.[p + i]). + +lemma valRep4ToPack x: valRep4 x = W256.to_uint (W4u64.pack4 (Array4.to_list x)). +proof. + rewrite valRep4E. rewrite to_uint_unpack4u64. + auto => />. + have E: forall k, 0 <= k < 4 => nth W64.zero (to_list x) k = x.[k]. + + move => H H0. rewrite /to_list /mkseq -iotaredE => />. smt(). + rewrite !E; trivial. rewrite /to_list /mkseq -iotaredE => />. +qed. + +lemma inzpRep4ToPack x: inzpRep4 x = inzp (W256.to_uint (W4u64.pack4 (Array4.to_list x))). +proof. + rewrite inzpRep4E. congr. apply valRep4ToPack. +qed. + +lemma valRep4ToPack_xy (x: W256.t, y): + W256.to_uint x = valRep4 y => x = W4u64.pack4 (Array4.to_list y). + rewrite valRep4ToPack. move => H. + smt(W256.to_uintK). +qed. + + +lemma load_store_pos (mem: global_mem_t, p: W64.t, w: Rep4, i: int) : + valid_ptr (to_uint p) 32 => (i = 0 \/ i = 8 \/ i = 16 \/ i = 24) => + w.[i %/ 8] = + loadW64 + (storeW64 + (storeW64 + (storeW64 + (storeW64 mem (W64.to_uint p) w.[0]) + (W64.to_uint (p + (W64.of_int 8)%W64)) w.[1]) + (W64.to_uint (p + (W64.of_int 16)%W64)) w.[2]) + (W64.to_uint (p + (W64.of_int 24)%W64)) w.[3]) + (W64.to_uint p + i). +proof. + move => V0 I. + rewrite /load_array4 !/storeW64 !/stores /= load8u8' /mkseq -iotaredE => />. + rewrite wordP => V1 V2. rewrite !to_uintD_small !to_uint_small => />. + move: V0. rewrite /valid_ptr. smt(). + move: V0. rewrite /valid_ptr. smt(). + move: V0. rewrite /valid_ptr. smt(). + rewrite pack8wE => />. rewrite get_of_list. smt(). + rewrite !bits8E !get_setE. auto => />. + case: (i = 0). auto => />. + case: (V1 %/ 8 = 0). move => V3. + do 31! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 1 = 0). move => *. + do 30! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 2 = 0). move => *. + do 29! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 3 = 0). move => *. + do 28! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 4 = 0). move => *. + do 27! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 5 = 0). move => *. + do 26! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 6 = 0). move => *. + do 25! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 7 = 0). move => *. + do 24! (rewrite ifF 1:/#). smt(W8.initE). + move => *. smt(W8.initE). + case: (i = 8). auto => />. + case: (V1 %/ 8 = 0). move => V3. + do 23! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 1 = 0). move => *. + do 22! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 2 = 0). move => *. + do 21! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 3 = 0). move => *. + do 20! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 4 = 0). move => *. + do 19! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 5 = 0). move => *. + do 18! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 6 = 0). move => *. + do 17! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 7 = 0). move => *. + do 16! (rewrite ifF 1:/#). smt(W8.initE). + move => *. smt(W8.initE). + case: (i = 16). auto => />. + case: (V1 %/ 8 = 0). move => V3. + do 15! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 1 = 0). move => *. + do 14! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 2 = 0). move => *. + do 13! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 3 = 0). move => *. + do 12! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 4 = 0). move => *. + do 11! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 5 = 0). move => *. + do 10! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 6 = 0). move => *. + do 9! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 7 = 0). move => *. + do 8! (rewrite ifF 1:/#). smt(W8.initE). + move => *. smt(W8.initE). + case: (i = 24). auto => />. + case: (V1 %/ 8 = 0). move => V3. + do 7! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 1 = 0). move => *. + do 6! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 2 = 0). move => *. + do 5! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 3 = 0). move => *. + do 4! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 4 = 0). move => *. + do 3! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 5 = 0). move => *. + do 2! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 6 = 0). move => *. + do 1! (rewrite ifF 1:/#). smt(W8.initE). + case: (V1 %/ 8 - 7 = 0). move => *. + do 0! (rewrite ifF 1:/#). smt(W8.initE). + move => *. smt(W8.initE). move => *. + smt(W8.initE). +qed. diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/CorrectnessProof.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/CorrectnessProof.ec new file mode 100644 index 0000000..6b07e38 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/CorrectnessProof.ec @@ -0,0 +1,1311 @@ +require import AllCore Bool List Int IntDiv StdOrder CoreMap Real Ring Distr. +from Jasmin require import JModel JMemory JWord JWord_array JUtils. +require import Curve25519_Procedures. +require import Curve25519_Operations. +require import Scalarmult_s. +import Zp_25519 Zp_limbs Zp. +import Curve25519_Procedures Curve25519_Operations StdOrder.IntOrder EClib. +import Scalarmult_s. + +require import Array4 Array8 Array32. +require import W64limbs. + +abbrev zexp = ZModpRing.exp. + +(** hoares, lossless and phoares **) +lemma h_add_rrs_mulx (_f _g: zp): + hoare [M.__add4_rrs : + inzpRep4 f = _f /\ inzpRep4 g = _g + ==> + inzpRep4 res = _f + _g + ]. +proof. + proc. + admit. +qed. + +lemma h_sub_rrs_mulx (_f _g: zp): + hoare [M.__sub4_rrs : + inzpRep4 f = _f /\ inzpRep4 gs = _g + ==> + inzpRep4 res = _f - _g + ]. +proof. + proc. + admit. +qed. + +(* inline mul4_c0 mul4_c1 mul4_c2 mul4_c3 *) + +lemma h_mul_a24_mulx (_f : zp, _a24: int): + hoare [M.__mul4_a24_rs : + inzpRep4 fs = _f /\ _a24 = to_uint a24 + ==> + inzpRep4 res = _f * inzp _a24 + ]. +proof. + proc. + admit. +qed. + + +lemma h_mul_rsr_mulx (_f _g: zp): + hoare [M.__mul4_rsr : + inzpRep4 fs = _f /\ inzpRep4 g = _g + ==> + inzpRep4 res = _f * _g + ]. +proof. + proc. + admit. +qed. + +lemma h_sqr_rr_mulx (_f: zp): + hoare [M.__sqr4_rr : + inzpRep4 f = _f + ==> + inzpRep4 res = ZModpRing.exp _f 2 + ]. +proof. + proc. + admit. +qed. + +lemma ill_add_rrs_mulx : islossless M.__add4_rrs. + by proc; do 2! unroll for ^while; islossless. +qed. + +lemma ph_add_rrs_mulx (_f _g: zp): + phoare [M.__add4_rrs : + inzpRep4 f = _f /\ inzpRep4 g = _g + ==> + inzpRep4 res = _f + _g + ] = 1%r. +proof. + by conseq ill_add_rrs_mulx (h_add_rrs_mulx _f _g). +qed. + +lemma ill_sub_rrs_mulx : islossless M.__sub4_rrs. + by proc; do 2! unroll for ^while; islossless. +qed. + +lemma ph_sub_rrs_mulx (_f _g: zp): + phoare [M.__sub4_rrs : + inzpRep4 f = _f /\ inzpRep4 gs = _g + ==> + inzpRep4 res = _f - _g + ] = 1%r. +proof. + by conseq ill_sub_rrs_mulx (h_sub_rrs_mulx _f _g). +qed. + +lemma ill_mul_a24_mulx : islossless M.__mul4_a24_rs by islossless. + +lemma ph_mul_a24_mulx (_f: zp, _a24: int): + phoare [M.__mul4_a24_rs : + inzpRep4 fs = _f /\ _a24 = to_uint a24 + ==> + inzpRep4 res = _f * inzp _a24 + ] = 1%r. +proof. + by conseq ill_mul_a24_mulx (h_mul_a24_mulx _f _a24). +qed. + +lemma ill_mul_rsr_mulx : islossless M.__mul4_rsr by islossless. + +lemma ph_mul_rsr_mulx (_f _g : zp): + phoare [M.__mul4_rsr : + inzpRep4 fs = _f /\ inzpRep4 g = _g + ==> + inzpRep4 res = _f * _g] = 1%r. +proof. + by conseq ill_mul_rsr_mulx (h_mul_rsr_mulx _f _g). +qed. + +lemma ill_sqr_rr_mulx : islossless M.__sqr4_rr + by islossless. + +lemma ph_sqr_rr_mulx (_f: zp): + phoare [M.__sqr4_rr : + inzpRep4 f = _f + ==> + inzpRep4 res = ZModpRing.exp _f 2] = 1%r. +proof. + by conseq ill_sqr_rr_mulx (h_sqr_rr_mulx _f). +qed. + +(** step 0 : add sub mul sqr **) +equiv eq_spec_impl_add_rrs_mulx : CurveProcedures.add ~ M.__add4_rrs: + f{1} = inzpRep4 f{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_add_rrs_mulx (inzpRep4 f{2}) (inzpRep4 g{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_sub_rrs_mulx : CurveProcedures.sub ~ M.__sub4_rrs: + f{1} = inzpRep4 f{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_sub_rrs_mulx (inzpRep4 f{2}) (inzpRep4 gs{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_mul_a24_mulx : CurveProcedures.mul_a24 ~ M.__mul4_a24_rs: + f{1} = inzpRep4 fs{2} /\ + a24{1} = to_uint a24{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_mul_a24_mulx (inzpRep4 fs{2}) (to_uint a24{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_mul_rsr_mulx : CurveProcedures.mul ~ M.__mul4_rsr: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_mul_rsr_mulx (inzpRep4 fs{2}) (inzpRep4 g{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl__sqr_rr_mulx : CurveProcedures.sqr ~ M.__sqr4_rr: + f{1} = inzpRep4 f{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_sqr_rr_mulx (inzpRep4 f{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +(** step 0.5 : transitivity stuff **) +equiv eq_spec_impl_add_ssr_mulx : CurveProcedures.add ~ M.__add4_ssr: + g{1} = inzpRep4 fs{2} /\ + f{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__add4_ssr. wp. sp. + call eq_spec_impl_add_rrs_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_add_sss_mulx : CurveProcedures.add ~ M.__add4_sss: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__add4_sss. wp. sp. + call eq_spec_impl_add_rrs_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_sub_sss_mulx : CurveProcedures.sub ~ M.__sub4_sss: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__sub4_sss. wp. sp. + call eq_spec_impl_sub_rrs_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_a24_ss_mulx : CurveProcedures.mul_a24 ~ M.__mul4_a24_ss: + f{1} = inzpRep4 fs{2} /\ + a24{1} = to_uint a24{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__mul4_a24_ss. wp. sp. + call eq_spec_impl_mul_a24_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_rss_mulx : CurveProcedures.mul ~ M.__mul4_rss: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__mul4_rss. wp. sp. + call eq_spec_impl_mul_rsr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_ssr_mulx : CurveProcedures.mul ~ M.__mul4_ssr: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__mul4_ssr. wp. sp. + call eq_spec_impl_mul_rsr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_sss_mulx : CurveProcedures.mul ~ M.__mul4_sss: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__mul4_sss. wp. sp. + call eq_spec_impl_mul_rsr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_sqr_rs_mulx : CurveProcedures.sqr ~ M.__sqr4_rs: + f{1} = inzpRep4 fs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__sqr4_rs. wp. sp. + call eq_spec_impl__sqr_rr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_sqr_ss_mulx : CurveProcedures.sqr ~ M.__sqr4_ss: + f{1} = inzpRep4 fs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__sqr4_ss. wp. sp. + call eq_spec_impl__sqr_rr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_rsr__rpr_mulx : M.__mul4_rsr ~ M.__mul4_rpr: + fs{1} = fp{2} /\ + g{1} = g{2} + ==> + res{1} = res{2}. +proof. + by sim. +qed. + +equiv eq_spec_impl_mul__rpr_mulx : CurveProcedures.mul ~ M.__mul4_rpr: + f{1} = inzpRep4 fp{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + transitivity + M.__mul4_rsr + ( f{1} = inzpRep4 fs{2} /\ g{1} = inzpRep4 g{2} ==> res{1} = inzpRep4 res{2}) + ( fs{1} = fp{2} /\ g{1} = g{2} ==> res{1} = res{2}). + move => &1 &2 [H] H0. + exists(fp{2}, g{2}) => />. + move => &1 &m &2 => H H0. by rewrite -H0 H. + proc *; call eq_spec_impl_mul_rsr_mulx. + by skip => />. + proc *; call eq_spec_impl_mul_rsr__rpr_mulx. + by done. +qed. + +equiv eq_spec_impl_sub_rrs_rsr_mulx : M.__sub4_rrs ~ M.__sub4_rsr: + f{1} = fs{2} /\ gs{1} = g{2} ==> ={res}. +proof. + proc. + do 2! unroll for{1} ^while. + do 2! unroll for{2} ^while. + wp; skip => />. +qed. + +equiv eq_spec_impl_sub_rsr_mulx : CurveProcedures.sub ~ M.__sub4_rsr: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + transitivity + M.__sub4_rrs + ( f{1} = inzpRep4 f{2} /\ g{1} = inzpRep4 gs{2} ==> res{1} = inzpRep4 res{2}) + ( f{1} = fs{2} /\ gs{1} = g{2} ==> res{1} = res{2}). + move => &1 &2 [H] H0. + exists(fs{2}, g{2}) => />. + move => &1 &m &2 H H0. by rewrite -H0 H. + proc *; call eq_spec_impl_sub_rrs_mulx. + by skip => />. + proc *; call eq_spec_impl_sub_rrs_rsr_mulx. + by done. +qed. + + +equiv eq_spec_impl_sub_ssr_mulx : CurveProcedures.sub ~ M.__sub4_ssr: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M.__sub4_ssr. wp. sp. + call eq_spec_impl_sub_rsr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_rpr_mulx : CurveProcedures.mul ~ M._mul4_rpr: + f{1} = inzpRep4 fp{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M._mul4_rpr. wp. sp. + call eq_spec_impl_mul__rpr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_rsr__mulx : CurveProcedures.mul ~ M._mul4_rsr_: + f{1} = inzpRep4 _fs{2} /\ + g{1} = inzpRep4 _g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M._mul4_rsr_. wp. sp. + call eq_spec_impl_mul_rpr_mulx. skip. auto => />. +qed. + +equiv eq_spec_impl_sqr_rr_mulx : CurveProcedures.sqr ~ M._sqr4_rr: + f{1} = inzpRep4 f{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M._sqr4_rr. wp. sp. + call (eq_spec_impl__sqr_rr_mulx) . skip. auto => />. +qed. + + +equiv eq_spec_impl_sqr_rr__mulx : CurveProcedures.sqr ~ M._sqr4_rr_: + f{1} = inzpRep4 _f{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M._sqr4_rr_. wp. sp. rewrite /copy_64 => />. + call eq_spec_impl_sqr_rr_mulx. skip. auto => />. +qed. + +(** setting last bit to 0 **) +lemma eq_set_last_bit_to_zero64_mulx x : + hoare [ + M.__decode_u_coordinate4 : + u = x + ==> + res = Curve25519_Operations.last_bit_to_zero64 x + ]. +proof. + proc; wp; skip => />. + rewrite /last_bit_to_zero64 => />; congr. + pose X := x.[3]. + rewrite /of_int /int2bs /mkseq /to_list -iotaredE => />. + rewrite andE wordP => /> k K0 K1. + rewrite map2iE // get_bits2w //. + smt(W64.initE). +qed. + +lemma ill_set_last_bit_to_zero64_mulx: islossless M.__decode_u_coordinate4 by islossless. + +lemma eq_ph_set_last_bit_to_zero64_mulx x: + phoare [ + M.__decode_u_coordinate4 : + u = x + ==> + res = Curve25519_Operations.last_bit_to_zero64 x + ] = 1%r. +proof. + by conseq ill_set_last_bit_to_zero64_mulx (eq_set_last_bit_to_zero64_mulx x). +qed. + +(** to bytes **) +lemma eq_to_bytes_mulx r: + hoare [M.__tobytes4 : + r = f + ==> + pack4 (to_list res) = (W256.of_int (asint (inzpRep4 r))) + ]. +proof. + proc. + admit. +qed. + +lemma ill_to_bytes_mulx : islossless M.__tobytes4 by islossless. + +lemma ph_to_bytes_mulx r: + phoare [M.__tobytes4 : + r = f + ==> + pack4 (to_list res) = (W256.of_int (asint (inzpRep4 r))) + ] = 1%r. +proof. + by conseq ill_to_bytes_mulx (eq_to_bytes_mulx r). +qed. + + +(** step 1 : decode_scalar_25519 **) +equiv eq_spec_impl_decode_scalar_25519_mulx : CurveProcedures.decode_scalar ~ M.__decode_scalar: + k'{1} = pack4 (to_list k{2}) + ==> + res{1} = pack32 (to_list res{2}). +proof. + proc; wp; auto => />. + unroll for{2} ^while => />; wp; skip => /> &2. + rewrite !/set64_direct !/get8 !/init8 => />. + rewrite pack4E pack32E. + rewrite !/to_list /mkseq -!iotaredE => /> . + rewrite !of_intE modz_small. by apply bound_abs. rewrite !bits2wE /int2bs /mkseq -!iotaredE => />. + rewrite wordP => i rgi />. + rewrite !of_listE !bits8E //= => />. + rewrite !get_setE //= !orE !andE !map2E //=. + rewrite !initiE => />. + rewrite !initiE => />. smt(). smt(). + + case(i = 0) => /> *; case(i = 1) => /> *; case(i = 2) => /> *; case(i = 254) => /> *; case(i = 255) => /> *. + + case(i %/ 8 = 0) => /> *. + + rewrite initiE => /> . smt(). rewrite initiE => />. smt(). rewrite initiE => />. smt(). smt(). + + case(i %/ 8 - 1 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 2 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 3 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 4 = 0) => /> *. + rewrite initiE => /> /#. + + case(i %/ 8 - 5 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 6 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 7 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 8 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 9 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 10 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 11 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 12 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 13 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 14 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 15 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 16 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 17 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 18 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 19 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 20 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 21 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 22 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 23 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 24 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 25 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 26 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 27 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 28 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 29 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 30 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 31 = 0) => /> *. + + rewrite !initiE => />. smt(). + + rewrite !initiE => />. smt(). + case(i %/ 64 = 0) => /> *. smt(). smt(). + + rewrite !initiE => /> /#. smt(). +qed. + +(** step 2 : decode_u_coordinate **) +equiv eq_spec_impl_decode_u_coordinate_mulx : CurveProcedures.decode_u_coordinate ~ M.__decode_u_coordinate4: + u'{1} = pack4 (to_list u{2}) + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (eq_ph_set_last_bit_to_zero64_mulx u{2}). + inline *; wp; skip => /> &2. + rewrite inzpRep4E. congr. + rewrite to_uint_unpack4u64 valRep4E; congr; congr. + rewrite /last_bit_to_zero64 => />. + rewrite /to_list /mkseq /to_list -iotaredE => />. + do split. + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + + rewrite set_neqiE. smt(). + + rewrite pack4E => />. rewrite of_listE => />. + + rewrite initE => />. + + have ->: (0 <= i && i < 256) by smt(). auto => />. + + rewrite initE => />. have ->: 0 <= i %/ 64 by smt(). auto => />. + + case(i %/ 64 < 4) => /> *. smt(). smt(). + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + + rewrite set_neqiE. smt(). + + rewrite pack4E => />. rewrite of_listE => />. + + rewrite initE => />. + + have ->: (0 <= 64 + i && 64 + i < 256) by smt(). auto => />. + + rewrite initE => />. have ->: 0 <= (64 + i) %/ 64 by smt(). auto => />. + + case((64 + i) %/ 64 < 4) => /> *. smt(). smt(). + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + + rewrite set_neqiE. smt(). + + rewrite pack4E => />. rewrite of_listE => />. + + rewrite initE => />. + + have ->: (0 <= 128 + i && 128 + i < 256) by smt(). auto => />. + + rewrite initE => />. have ->: 0 <= (128 + i) %/ 64 by smt(). auto => />. + + case((128 + i) %/ 64 < 4) => /> *. smt(). smt(). + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + rewrite pack4E => />. rewrite of_listE => />. + rewrite !setE => />. rewrite initE => />. + have ->: (0 <= 192 + i && 192 + i < 256) by smt(). auto => />. + rewrite !initE => />. + have ->: (0 <= i && i < 64) by smt(). + have ->: (0 <= 192 + i && 192 + i < 256) by smt(). + auto => />. + case (i <> 63) => /> C. + have ->: 192 + i <> 255 by smt(). + auto => />. rewrite !initE. smt(). +qed. + +equiv eq_spec_impl_decode_u_coordinate_base_mulx : + CurveProcedures.decode_u_coordinate_base ~ M.__decode_u_coordinate_base4: + true + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + inline *; wp; skip => />. + rewrite inzpRep4E. congr. + rewrite to_uint_unpack4u64 valRep4E; congr; congr. + rewrite /last_bit_to_zero64 => />. + have !->: ((of_int 9))%W256.[255 <- false] = ((of_int 9))%W256. + rewrite !of_intE !bits2wE !/int2bs !/mkseq -iotaredE => />. + apply W256.ext_eq => />. move => X X0 X1. + rewrite get_setE //. case (X = 255) => /> C. + rewrite /to_list /mkseq /to_list -iotaredE => />. +qed. + + +(** step 3 : ith_bit **) +equiv eq_spec_impl_ith_bit_mulx : CurveProcedures.ith_bit ~ M.__ith_bit : + k'{1} = pack32 (to_list k{2}) /\ + ctr{1} = to_uint ctr{2} /\ + 0 <= ctr{1} < 256 + ==> + b2i res{1} = to_uint res{2}. +proof. + proc; wp; skip => /> &2 H H0. + rewrite (W64.and_mod 3 ctr{2}) //= (W64.and_mod 6 (of_int (to_uint ctr{2} %% 8))%W64) //= !to_uint_shr //= !shr_shrw. + smt(W64.to_uint_cmp W64.of_uintK W64.to_uintK). + rewrite /zeroextu64 /truncateu8 //= !of_uintK => />. + + rewrite of_intE modz_small. apply bound_abs. smt(W8.to_uint_cmp @JUtils). + rewrite bits2wE /int2bs /mkseq -iotaredE => />. + auto => />. + rewrite (modz_small (to_uint ctr{2} %% 8) W64.modulus). apply bound_abs. smt(W64.to_uint_cmp). + rewrite (modz_small (to_uint ctr{2} %% 8) 64). apply bound_abs. smt(W64.to_uint_cmp). + rewrite (modz_small (to_uint ctr{2} %% 8) W64.modulus). apply bound_abs. smt(W64.to_uint_cmp). + pose ctr := to_uint ctr{2}. + rewrite pack32E of_listE /to_list !/mkseq !initiE // -!iotaredE => />. + rewrite !initiE //=. auto => />. smt(). + rewrite !/b2i !of_intE !bits2wE !/int2bs !/mkseq //=. + rewrite -!iotaredE => />. + rewrite !to_uintE !/bs2int !/w2bits !/mkseq /big /range !/predT -!iotaredE => />. + rewrite !b2i0 => />. + rewrite !initiE => />. smt(). auto => />. + + case(ctr %/ 8 = 0) => /> *. smt(). + + case(ctr %/ 8 - 1 = 0) => /> *. smt(). + + case(ctr %/ 8 - 2 = 0) => /> *. smt(). + + case(ctr %/ 8 - 3 = 0) => /> *. smt(). + + case(ctr %/ 8 - 4 = 0) => /> *. smt(). + + case(ctr %/ 8 - 5 = 0) => /> *. smt(). + + case(ctr %/ 8 - 6 = 0) => /> *. smt(). + + case(ctr %/ 8 - 7 = 0) => /> *. smt(). + + case(ctr %/ 8 - 8 = 0) => /> *. smt(). + + case(ctr %/ 8 - 9 = 0) => /> *. smt(). + + case(ctr %/ 8 - 10 = 0) => /> *. smt(). + + case(ctr %/ 8 - 11 = 0) => /> *. smt(). + + case(ctr %/ 8 - 12 = 0) => /> *. smt(). + + case(ctr %/ 8 - 13 = 0) => /> *. smt(). + + case(ctr %/ 8 - 14 = 0) => /> *. smt(). + + case(ctr %/ 8 - 15 = 0) => /> *. smt(). + + case(ctr %/ 8 - 16 = 0) => /> *. smt(). + + case(ctr %/ 8 - 17 = 0) => /> *. smt(). + + case(ctr %/ 8 - 18 = 0) => /> *. smt(). + + case(ctr %/ 8 - 19 = 0) => /> *. smt(). + + case(ctr %/ 8 - 20 = 0) => /> *. smt(). + + case(ctr %/ 8 - 21 = 0) => /> *. smt(). + + case(ctr %/ 8 - 22 = 0) => /> *. smt(). + + case(ctr %/ 8 - 23 = 0) => /> *. smt(). + + case(ctr %/ 8 - 24 = 0) => /> *. smt(). + + case(ctr %/ 8 - 25 = 0) => /> *. smt(). + + case(ctr %/ 8 - 26 = 0) => /> *. smt(). + + case(ctr %/ 8 - 27 = 0) => /> *. smt(). + + case(ctr %/ 8 - 28 = 0) => /> *. smt(). + + case(ctr %/ 8 - 29 = 0) => /> *. smt(). + + case(ctr %/ 8 - 30 = 0) => /> *. smt(). + + case(ctr %/ 8 - 31 = 0) => /> *. smt(). + + case(ctr %/ 8 - 32 = 0) => /> *. smt(). + smt(). +qed. + +equiv eq_spec_impl_init_points_mulx : + CurveProcedures.init_points ~ M.__init_points4 : + init{1} = inzpRep4 initr{2} + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4. + proof. + proc. + wp. unroll for{2} ^while. wp. skip. move => &1 &2 H H0 H1 H2 H3 H4 H5 H6. + split; auto => />. rewrite /H4 /H0 /H2 /H3 /Zp.one /set0_64_ /inzpRep4 => />. + rewrite /valRep4 /to_list /mkseq -iotaredE => />. + split; auto => />. rewrite /H5 /H0 /H3 /H2 /Zp.zero /set0_64_ /inzpRep4 => />. + rewrite /valRep4 /to_list /mkseq -iotaredE => />. + rewrite /H6 /H0 /H3 /H2 /Zp.zero /set0_64_ /inzpRep4 // /valRep4 /to_list /mkseq -iotaredE => />. + qed. + +(** step 4 : cswap **) +equiv eq_spec_impl_cswap_mulx : + CurveProcedures.cswap ~ M.__cswap4: + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z3{1} = inzpRep4 z3{2} /\ + b2i toswap{1} = to_uint toswap{2} + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4. +proof. +proc. +do 4! unroll for{2} ^while. +case: (toswap{1}). + rcondt {1} 1 => //. wp => /=. skip. + move => &1 &2 [#] 4!->> ??. + have mask_set : (set0_64.`6 - toswap{2}) = W64.onew. rewrite /set0_64_ /=. smt(W64.to_uint_cmp). + rewrite !mask_set /=. + have lxor1 : forall (x1 x2:W64.t), x1 `^` (x2 `^` x1) = x2. + move=> *. rewrite xorwC -xorwA xorwK xorw0 //. + have lxor2 : forall (x1 x2:W64.t), x1 `^` (x1 `^` x2) = x2. + move=> *. rewrite xorwA xorwK xor0w //. + rewrite !lxor1 !lxor2. + split. congr. apply Array4.ext_eq. smt(Array4.get_setE). + split. congr. apply Array4.ext_eq. smt(Array4.get_setE). + split. congr. apply Array4.ext_eq. smt(Array4.get_setE). + congr. apply Array4.ext_eq. rewrite /copy_64 => />. smt(Array4.get_setE). + rcondf {1} 1 => //. wp => /=; skip. + move => &1 &2 [#] 4!->> ??. + have mask_not_set : (set0_64.`6 - toswap{2}) = W64.zero. rewrite /set0_64_ => />. smt(). + rewrite !mask_not_set !andw0 !xorw0 !/copy_64 => />. + do split. + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). +qed. + +(** step 5 : add_and_double **) +equiv eq_spec_impl_add_and_double_mulx : + CurveProcedures.add_and_double ~ M.__add_and_double4: + init{1} = inzpRep4 init{2} /\ + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z3{1} = inzpRep4 z3{2} + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4. +proof. +proc => /=; wp. + call eq_spec_impl_mul_rss_mulx; wp. + call eq_spec_impl_mul_sss_mulx; wp. + call eq_spec_impl_add_sss_mulx; wp. + call eq_spec_impl_sqr_ss_mulx; wp. + call eq_spec_impl_mul_a24_ss_mulx; wp. + call eq_spec_impl_sqr_ss_mulx; wp. swap{1} 14 1. + call eq_spec_impl_mul_ssr_mulx; wp. + call eq_spec_impl_sub_ssr_mulx; wp. + call eq_spec_impl_sub_sss_mulx; wp. + call eq_spec_impl_add_sss_mulx; wp. + call eq_spec_impl_sqr_rs_mulx; wp. + call eq_spec_impl_sqr_ss_mulx; wp. + call eq_spec_impl_mul_sss_mulx; wp. + call eq_spec_impl_mul_sss_mulx; wp. + call eq_spec_impl_add_sss_mulx; wp. + call eq_spec_impl_sub_sss_mulx; wp. + call eq_spec_impl_add_ssr_mulx; wp. + call eq_spec_impl_sub_ssr_mulx; + wp. skip. by done. +qed. + +(** step 6 : montgomery_ladder_step **) +equiv eq_spec_impl_montgomery_ladder_step_mulx : + CurveProcedures.montgomery_ladder_step ~ M.__montgomery_ladder_step4: + k'{1} = pack32 (to_list k{2}) /\ + init'{1} = inzpRep4 init{2} /\ + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z3{1} = inzpRep4 z3{2} /\ + b2i swapped{1} = to_uint swapped{2} /\ + ctr'{1} = to_uint ctr{2} /\ + 0 <= ctr'{1} < 256 + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4 /\ + b2i res{1}.`5 = to_uint res{2}.`5. +proof. + proc => /=; wp. + call eq_spec_impl_add_and_double_mulx. wp. + call eq_spec_impl_cswap_mulx. wp. + call eq_spec_impl_ith_bit_mulx. wp. skip. + move => &1 &2 [H0] [H1] [H2] [H3] [H4] [H5] [H6] H7. split. + auto => />. rewrite H0. + move => [H8 H9] H10 H11 H12 H13 H14. + split; auto => />. rewrite /H14 /H13. + rewrite /b2i. + case: (swapped{1} ^^ H10). + move => *. smt(W64.to_uintK W64.xorw0 W64.xorwC). + move => *. smt(W64.ge2_modulus W64.to_uintK W64.of_uintK W64.xorwK). +qed. + +(** step 7 : montgomery_ladder **) +equiv eq_spec_impl_montgomery_ladder_mulx : + CurveProcedures.montgomery_ladder ~ M.__montgomery_ladder4 : + init'{1} = inzpRep4 u{2} /\ + k'{1} = pack32 (to_list k{2}) + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2. +proof. + proc. wp. sp. + unroll {1} 4. + rcondt {1} 4. auto => />. inline CurveProcedures.init_points. + wp. sp. skip. auto => />. + while( + k'{1} = pack32 (to_list k{2}) /\ + ctr{1} = to_uint ctr{2} /\ + -1 <= ctr{1} < 256 /\ + init'{1} = inzpRep4 us{2} /\ + x2{1} = inzpRep4 x2{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + z3{1} = inzpRep4 z3{2} /\ + b2i swapped{1} = to_uint swapped{2}). + wp. sp. call eq_spec_impl_montgomery_ladder_step_mulx. skip. auto => />. + move => &1 &2 ctrR H H0 H1 H2 E3. split. + rewrite to_uintB. rewrite uleE to_uint1 => />. smt(). rewrite to_uint1 => />. + smt(W64.to_uint_cmp). + move => H3 H4 H5 H6 H7 H8 H9 H10 H11 H12. split. smt(W64.to_uint_cmp). + rewrite ultE to_uintB. rewrite uleE to_uint1. smt(). + rewrite to_uint1 to_uint0 //=. wp. + call eq_spec_impl_montgomery_ladder_step_mulx. wp. call eq_spec_impl_init_points_mulx. skip. done. +qed. + +(** step 8 : iterated square **) +equiv eq_spec_impl_it_sqr_aux_mulx : + M.__it_sqr4_x2 ~ CurveProcedures.it_sqr_aux: + inzpRep4 f{1} = a{2} /\ + W32.to_uint i{1} = l{2} /\ + 0 < l{2} + ==> + inzpRep4 res{1} = res{2}. +proof. +proc; simplify. + while( + 0 <= ii{2} /\ 0 <= W32.to_uint i{1} /\ ii{2} = W32.to_uint i{1} /\ + f{2} = inzpRep4 f{1} /\ zf{1} = (0 = W32.to_uint i{1}) + ). + wp. + symmetry. + call eq_spec_impl__sqr_rr_mulx. wp. call eq_spec_impl__sqr_rr_mulx. wp. symmetry. + skip => />. + move => &1 *. do split. smt(). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. + + rewrite to_uintB. rewrite uleE to_uint1; smt(). rewrite to_uint1. smt(W32.to_uint_cmp). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite to_uintB. + + rewrite uleE to_uint1; smt(). rewrite to_uint1 //. + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of to_uintB. + + rewrite uleE to_uint1. smt(). rewrite -to_uintB. rewrite uleE. smt(W32.to_uint_cmp). + + rewrite to_uintB. rewrite uleE to_uint1; smt(). rewrite to_uint1. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + wp. symmetry. call eq_spec_impl__sqr_rr_mulx. wp. call eq_spec_impl__sqr_rr_mulx. wp. + symmetry. + skip => />. move => &1 H. + do split. smt(). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + rewrite to_uintB. rewrite uleE to_uint1. smt(W32.to_uint_cmp). rewrite to_uint1 //. + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). +rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). +qed. + +equiv eq_spec_impl_it_sqr_aux_mulx_test : + CurveProcedures.it_sqr_aux ~ M.__it_sqr4_x2: + a{1} = inzpRep4 f{2} /\ + l{1} = W32.to_uint i{2} /\ + 0 < l{1} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc; simplify. + while( + 0 <= ii{1} /\ 0 <= W32.to_uint i{2} /\ ii{1} = W32.to_uint i{2} /\ + f{1} = inzpRep4 f{2} /\ zf{2} = (0 = W32.to_uint i{2}) + ). + wp. call eq_spec_impl__sqr_rr_mulx. wp. call eq_spec_impl__sqr_rr_mulx. wp. + skip => />. + move => &1 *. do split. smt(). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. + + rewrite to_uintB. rewrite uleE to_uint1; smt(). rewrite to_uint1. smt(W32.to_uint_cmp). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite to_uintB. + + rewrite uleE to_uint1; smt(). rewrite to_uint1 //. + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of to_uintB. + + rewrite uleE to_uint1. smt(). rewrite -to_uintB. rewrite uleE. smt(W32.to_uint_cmp). + + rewrite to_uintB. rewrite uleE to_uint1; smt(). rewrite to_uint1. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + wp. call eq_spec_impl__sqr_rr_mulx. wp. call eq_spec_impl__sqr_rr_mulx. wp. + skip => />. move => &1 H. + do split. smt(). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + rewrite to_uintB. rewrite uleE to_uint1. smt(W32.to_uint_cmp). rewrite to_uint1 //. + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + rewrite /DEC_32 /rflags_of_aluop_nocf_w => />. rewrite /ZF_of => *. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). +qed. + + +lemma eq_spec_impl__it_sqr_mulx (i1: int) (i2: int): + i1 = i2 => 2 <= i1 => +equiv[ + M.__it_sqr4_x2 ~ CurveProcedures.it_sqr: + i1 = W32.to_uint i{1} /\ + i2 = i{2} /\ + W32.to_uint i{1} = i{2} /\ + inzpRep4 f{1} = f{2} + ==> + inzpRep4 res{1} = zexp res{2} (exp 2 i1) + ]. +proof. + move => I I0. + transitivity + CurveProcedures.it_sqr_aux + ( + l{2} = W32.to_uint i{1} /\ + a{2} = inzpRep4 f{1} /\ + 1 < l{2} + ==> + inzpRep4 res{1} = res{2}) + ( a{1} = f{2} /\ + l{1} = i{2} /\ + l{1} = i1 /\ + l{1} = i2 /\ + 2 <= i{2} + ==> + res{1} = zexp res{2} (exp 2 i1)). + auto => />. + move => &1 &2 *. + exists(f{2}, i{2}) => />. smt(). + move => &1 &m &2 H H0. rewrite -H0. assumption. + proc *. + call eq_spec_impl_it_sqr_aux_mulx. skip => />. smt(W32.to_uint_cmp). + + proc; inline *; simplify. + + while( + 0 <= ii{1} /\ + 0 <= ii{2} /\ + ii{1} = ii{2} /\ + 1 <= counter{2} /\ + counter{2} = i1 - ii{2} /\ + f{1} = zexp h{2} (exp 2 (counter{2}))). + wp; skip. auto => />. + move => &1 &2 H H0 H1. + smt( ZModpRing.exprM IntID.exprN IntID.exprN1 IntID.exprD_nneg). + wp. + skip => />. move => &1 &2. + do split. smt(). smt(). smt(). + move => H H1 H2 H3 H4 H5 H6. + congr. smt(). +qed. + +lemma eq_spec_impl__it_sqr_mulx_x2 (i1: int) (i2: int): + 2*i1 = i2 => 2 <= i1 => 4 <= i2 => i2 %% 2 = 0 => +equiv[ + M.__it_sqr4_x2 ~ CurveProcedures.it_sqr: + i1 = W32.to_uint i{1} /\ + i2 = i{2} /\ + 2*W32.to_uint i{1} = i{2} /\ + i{2} %% 2 = 0 /\ + inzpRep4 f{1} = f{2} + ==> + inzpRep4 res{1} = res{2} + ]. +proof. + move => I I0 I1 I2; + transitivity + CurveProcedures.it_sqr_aux + ( + l{2} = W32.to_uint i{1} /\ + a{2} = inzpRep4 f{1} /\ + 1 < l{2} + ==> + inzpRep4 res{1} = res{2}) + ( a{1} = f{2} /\ + 2*l{1} = i{2} /\ + l{1} = i1 /\ + i{2} = i2 /\ + 4 <= i{2} /\ + 2 <= l{1} + ==> + res{1} = res{2}). + auto => />. move => &1 &2 *. + exists(f{2}, i1) => />. smt(). smt(). + proc *. + call eq_spec_impl_it_sqr_aux_mulx. skip => />. smt(W32.to_uint_cmp). + proc; simplify. inline *. + async while + [ (fun r => 0%r < ii%r), (ii{1} - 1)%r ] + [ (fun r => 0%r < ii%r), (ii{1} - 1)%r ] + (0 < ii{1} /\ 0 < ii{2}) (!(0 < ii{1})) + : + ( + (ii{2} %% 2 = 0 => 2*ii{1} - 1 = ii{2}) /\ + (ii{2} %% 2 <> 0 => 2*ii{1} = ii{2}) /\ + 0 <= ii{1} /\ + 0 <= ii{2} + ). + auto => />; move => &1 &2 * /#. + auto => />; move => &1 &2 * /#. + auto => />; move => &1 &2 * /#. + auto => />; move => &2 * /#. + move => &1; auto => />. + move => v1 v2; auto => />. + while( + 0 <= ii{1} /\ + 0 <= ii{2} /\ + (ii{2} %% 2 = 0 => 2*ii{1} - 1 = ii{2}) /\ + (ii{2} %% 2 <> 0 => 2*ii{1} = ii{2}) /\ + 1 <= counter{2} /\ + f{1} = zexp h{2} (exp 2 counter{2}) + ) => //=. + auto => />; move => &1 &2 H H0 H1 H2 H3 H4 H5 H6 H7. + smt( ZModpRing.exprM IntID.exprN IntID.exprN1 IntID.exprD_nneg). + auto => />; move => &1 &2 H H0 H1 H2 H3 H4 H5. + smt( ZModpRing.exprM IntID.exprN IntID.exprN1 IntID.exprD_nneg). + while true (ii) => //. + move => H; auto => />. skip => />; move => &hr H0 H1 H2 H3 H4 H5 /#. + while true (ii) => //. move => H; auto => /> /#. skip => /> /#. + wp. skip => />. move => &1 &2 H H0 H1 H2 H3 H4 H5 H6 H7 H8 H9. smt(). +qed. + + +lemma eq_spec_impl_it_sqr_x2_mulx (i1: int) (i2: int): + i1 = i2 => 2 <= i1 => i2 %% 2 = 0 => +equiv[ + M._it_sqr4_x2 ~ CurveProcedures.it_sqr: + i1 = W32.to_uint i{1} /\ + i2 = i{2} /\ + W32.to_uint i{1} = i{2} /\ + i{2} %% 2 = 0 /\ + inzpRep4 f{1} = f{2} + ==> + inzpRep4 res{1} = zexp res{2} (exp 2 i1) + ]. + move => *; proc *. + inline {1} 1. sp; wp. + call (eq_spec_impl__it_sqr_mulx i1 i2). skip => />. +qed. + + +lemma eq_spec_impl_it_sqr_x2__mulx (i1: int) (i2: int): + i1 = i2 => 2 <= i1 => i2 %% 2 = 0 => + equiv[ + M._it_sqr4_x2_ ~ CurveProcedures.it_sqr: + i1 = W32.to_uint i{1} /\ + i2 = i{2} /\ + W32.to_uint i{1} = i{2} /\ + i{2} %% 2 = 0 /\ + inzpRep4 _f{1} = f{2} + ==> + inzpRep4 res{1} = zexp res{2} (exp 2 i1) + ]. +proof. + move => *; proc *. + inline{1} 1. inline{1} 5. wp; sp. + call (eq_spec_impl__it_sqr_mulx i1 i2). skip => />. +qed. + + +lemma eq_spec_impl_it_sqr_x2_mulx_x2 (i1: int) (i2: int): + 2*i1 = i2 => 2 <= i1 => 4 <= i2 => i2 %% 2 = 0 => +equiv[ + M._it_sqr4_x2 ~ CurveProcedures.it_sqr: + i1 = W32.to_uint i{1} /\ + i2 = i{2} /\ + 2*W32.to_uint i{1} = i{2} /\ + i{2} %% 2 = 0 /\ + inzpRep4 f{1} = f{2} + ==> + inzpRep4 res{1} = res{2} + ]. + move => *; proc *. + inline {1} 1. sp; wp. + call (eq_spec_impl__it_sqr_mulx_x2 i1 i2). skip => />. +qed. + + +lemma eq_spec_impl_it_sqr_x2__mulx_x2 (i1: int) (i2: int): + 2*i1 = i2 => 2 <= i1 => 4 <= i2 => i2 %% 2 = 0 => + equiv[ + M._it_sqr4_x2_ ~ CurveProcedures.it_sqr: + i1 = W32.to_uint i{1} /\ + i2 = i{2} /\ + 2*W32.to_uint i{1} = i{2} /\ + i{2} %% 2 = 0 /\ + inzpRep4 _f{1} = f{2} + ==> + inzpRep4 res{1} = res{2} + ]. +proof. + move => *; proc *. + inline{1} 1. inline{1} 5. wp; sp. + call (eq_spec_impl__it_sqr_mulx_x2 i1 i2). skip => />. +qed. + + +(** step 9 : invert **) +equiv eq_spec_impl_invert_mulx : + CurveProcedures.invert ~ M.__invert4: + fs{1} = inzpRep4 f{2} + ==> res{1} = inzpRep4 res{2}. +proof. +proc. sp. auto => />. + call eq_spec_impl_mul_rsr__mulx. wp. + call eq_spec_impl_sqr_rr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 2 4); wp; symmetry. + call eq_spec_impl_mul_rsr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 25 50); wp; symmetry. + call eq_spec_impl_mul_rsr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 50 100); wp; symmetry. + call eq_spec_impl_mul_rsr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 25 50); wp; symmetry. + call eq_spec_impl_mul_rsr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 5 10); wp; symmetry. + call eq_spec_impl_mul_rsr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 10 20); wp; symmetry. + call eq_spec_impl_mul_rsr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 5 10); wp; symmetry. + call eq_spec_impl_mul_rsr__mulx. wp. + symmetry; call (eq_spec_impl_it_sqr_x2__mulx_x2 2 4); wp; symmetry. + call eq_spec_impl_sqr_rr__mulx. wp. + call eq_spec_impl_mul_rsr__mulx. wp. + call eq_spec_impl_sqr_rr__mulx. wp. + call eq_spec_impl_mul_rsr__mulx. wp. + call eq_spec_impl_mul_rsr__mulx. wp. + call eq_spec_impl_sqr_rr__mulx. wp. + call eq_spec_impl_sqr_rr__mulx. wp. + call eq_spec_impl_sqr_rr__mulx. wp. skip. + done. +qed. + +(** step 10 : encode point **) +equiv eq_spec_impl_encode_point_mulx : CurveProcedures.encode_point ~ M.__encode_point4: + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc. wp. + ecall {2} (ph_to_bytes_mulx (r{2})). wp. + call eq_spec_impl_mul_rsr_mulx. wp. + call eq_spec_impl_invert_mulx. + wp; skip => />. move => H H0 H1. + by rewrite -H1. +qed. + +(** step 11 : scalarmult **) +equiv eq_spec_impl_scalarmult_internal_mulx : + CurveProcedures.scalarmult_internal ~ M.__curve25519_internal_mulx: + k'{1} = pack32 (to_list k{2}) /\ + u''{1} = inzpRep4 u{2} + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc => /=. wp. + call eq_spec_impl_encode_point_mulx. wp. + call eq_spec_impl_montgomery_ladder_mulx. wp. skip. + done. +qed. + +equiv eq_spec_impl_scalarmult_mulx : + CurveProcedures.scalarmult ~ M.__curve25519_mulx: + k'{1} = pack4 (to_list _k{2}) /\ + u'{1} = pack4 (to_list _u{2}) + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc => /=. wp. + call eq_spec_impl_scalarmult_internal_mulx => />. wp. + call eq_spec_impl_decode_u_coordinate_mulx => />. wp. + call eq_spec_impl_decode_scalar_25519_mulx => />. + wp; skip => />. +qed. + +equiv eq_spec_impl_scalarmult_base_mulx : + CurveProcedures.scalarmult_base ~ M.__curve25519_mulx_base: + k'{1} = pack4 (to_list _k{2}) + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc => /=; wp. + call eq_spec_impl_scalarmult_internal_mulx => />; wp. + call eq_spec_impl_decode_u_coordinate_base_mulx => />; wp. + call eq_spec_impl_decode_scalar_25519_mulx. + wp. skip => />. +qed. + +lemma eq_spec_impl_scalarmult_jade_mulx _qp _np _pp: + equiv [CurveProcedures.scalarmult ~ M.jade_scalarmult_curve25519_amd64_mulx: + qp{2} = _qp /\ + np{2} = _np /\ + pp{2} = _pp /\ + k'{1} = pack4 (to_list np{2}) /\ + u'{1} = pack4 (to_list pp{2}) + ==> + res{1} = pack4 (to_list res{2}.`1) /\ + res{2}.`2 = W64.zero]. +proof. + proc *. inline M.jade_scalarmult_curve25519_amd64_mulx; wp; sp. + call eq_spec_impl_scalarmult_mulx. skip => />. +qed. + +lemma eq_spec_impl_scalarmult_jade_base _qp _np: + equiv [CurveProcedures.scalarmult_base ~ M.jade_scalarmult_curve25519_amd64_mulx_base: + qp{2} = _qp /\ + np{2} = _np /\ + k'{1} = pack4 (to_list np{2}) + ==> + res{1} = pack4 (to_list res{2}.`1) /\ + res{2}.`2 = W64.zero]. +proof. + proc *. inline M.jade_scalarmult_curve25519_amd64_mulx_base; wp; sp. + call eq_spec_impl_scalarmult_base_mulx. skip => />. +qed. + +(* Proofs for older implementation *) +(* +lemma eq_spec_impl_scalarmult_jade_mulx mem _qp _np _pp: + equiv [CurveProcedures.scalarmult ~ M.jade_scalarmult_curve25519_amd64_mulx: + valid_ptr (W64.to_uint _qp) 32 /\ + valid_ptr (W64.to_uint _np) 32 /\ + valid_ptr (W64.to_uint _pp) 32 /\ + Glob.mem{2} = mem /\ + qp{2} = _qp /\ + np{2} = _np /\ + pp{2} = _pp /\ + k'{1} = pack4 (load_array4 (Glob.mem{2}) (W64.to_uint np{2})) /\ + u'{1} = pack4 (load_array4 (Glob.mem{2}) (W64.to_uint pp{2})) + ==> + res{1} = pack4 (load_array4 Glob.mem{2} (W64.to_uint res{2}.`1)) /\ + res{2}.`2 = W64.zero]. +proof. + proc *. inline M.jade_scalarmult_curve25519_amd64_mulx; wp; sp. + inline M.__load4 M.__store4. + do 3! unroll for{2} ^while. wp. sp. + call eq_spec_impl_scalarmult_mulx. skip => />. + move => &2 H H0 H1 H2 H3 H4. + do split. + congr; congr; rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; congr; rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + move => H5 H6 H7. + congr; rewrite /load_array4 /to_list /mkseq -iotaredE => />. + congr; congr. + apply (load_store_pos Glob.mem{2} qp{2} H7 0). rewrite /valid_ptr; by do split => /> //=. done. + congr. + apply (load_store_pos Glob.mem{2} qp{2} H7 8). rewrite /valid_ptr; by do split => /> //=. done. + congr. + apply (load_store_pos Glob.mem{2} qp{2} H7 16). rewrite /valid_ptr; by do split => /> //=. done. + congr. + apply (load_store_pos Glob.mem{2} qp{2} H7 24). rewrite /valid_ptr; by do split => /> //=. done. +qed. +*) + +(* +lemma eq_spec_impl_scalarmult_jade_base mem _qp _np: + equiv [CurveProcedures.scalarmult_base ~ M.jade_scalarmult_curve25519_amd64_mulx_base: + valid_ptr (W64.to_uint _qp) 32 /\ + valid_ptr (W64.to_uint _np) 32 /\ + Glob.mem{2} = mem /\ + qp{2} = _qp /\ + np{2} = _np /\ + k'{1} = pack4 (load_array4 (Glob.mem{2}) (W64.to_uint np{2})) + ==> + res{1} = pack4 (load_array4 Glob.mem{2} (W64.to_uint res{2}.`1)) /\ + res{2}.`2 = W64.zero]. +proof. + proc *. inline M.jade_scalarmult_curve25519_amd64_mulx_base. wp. sp. + inline M.__load4 M.__store4. + do 2! unroll for{2} ^while. wp; sp. + call eq_spec_impl_scalarmult_base_mulx. skip => />. + move => &2 H H0 H1 H2. do split. + congr; congr; rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + move => H3 H4. + congr; congr; rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + apply (load_store_pos Glob.mem{2} qp{2} H4 0). rewrite /valid_ptr; by do split => /> //=. done. + apply (load_store_pos Glob.mem{2} qp{2} H4 8). rewrite /valid_ptr; by do split => /> //=. done. + apply (load_store_pos Glob.mem{2} qp{2} H4 16). rewrite /valid_ptr; by do split => /> //=. done. + apply (load_store_pos Glob.mem{2} qp{2} H4 24). rewrite /valid_ptr; by do split => /> //=. done. +qed. +*) diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Operations.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Operations.ec new file mode 120000 index 0000000..5b3e5c8 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Operations.ec @@ -0,0 +1 @@ +../common/Curve25519_Operations.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_PHoare.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_PHoare.ec new file mode 120000 index 0000000..fd8d683 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_PHoare.ec @@ -0,0 +1 @@ +../common/Curve25519_PHoare.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Procedures.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Procedures.ec new file mode 120000 index 0000000..e19bf68 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Procedures.ec @@ -0,0 +1 @@ +../common/Curve25519_Procedures.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Spec.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Spec.ec new file mode 120000 index 0000000..e27e330 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/Curve25519_Spec.ec @@ -0,0 +1 @@ +../common/Curve25519_Spec.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/EClib.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/EClib.ec new file mode 120000 index 0000000..904ee5a --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/EClib.ec @@ -0,0 +1 @@ +../common/EClib.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/W64limbs.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/W64limbs.ec new file mode 120000 index 0000000..ad16992 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/W64limbs.ec @@ -0,0 +1 @@ +../common/W64limbs.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/Zp_25519.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/Zp_25519.ec new file mode 120000 index 0000000..9e3e196 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/Zp_25519.ec @@ -0,0 +1 @@ +../common/Zp_25519.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/mulx/Zp_limbs.ec b/proof/crypto_scalarmult/curve25519/amd64/mulx/Zp_limbs.ec new file mode 120000 index 0000000..daf4f72 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/mulx/Zp_limbs.ec @@ -0,0 +1 @@ +../common/Zp_limbs.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/CorrectnessProof.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/CorrectnessProof.ec new file mode 100644 index 0000000..f7d33a8 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/CorrectnessProof.ec @@ -0,0 +1,1151 @@ +require import AllCore Bool List Int IntDiv StdOrder CoreMap Ring Distr BitEncoding StdRing Pervasive Logic StdBigop Zp_limbs. +from Jasmin require import JModel JMemory JWord JWord_array JUtils. +require import Curve25519_Procedures. +require import Scalarmult_s. +import Zp_25519 Zp_limbs EClib Zp. +import Curve25519_Procedures StdOrder.IntOrder EClib StdOrder.IntOrder BitEncoding.BS2Int Ring.IntID StdBigop.Bigint. +import Scalarmult_s. + +require import Array4 Array8 Array32. +require import W64limbs. + +(** hoares, lossless and phoares **) +lemma h_add_rrs_ref4 (_f _g: zp): + hoare [M.__add4_rrs : + inzpRep4 f = _f /\ inzpRep4 g = _g + ==> + inzpRep4 res = _f + _g + ]. +proof. + proc. + admit. +qed. + +lemma h_sub_rrs_ref4 (_f _g: zp): + hoare [M.__sub4_rrs : + inzpRep4 f = _f /\ inzpRep4 gs = _g + ==> + inzpRep4 res = _f - _g + ]. +proof. + proc. + admit. +qed. + +lemma h_mul_a24_ref4 (_f : zp, _a24: int): + hoare [M.__mul4_a24_rs : + inzpRep4 xa = _f /\ _a24 = to_uint a24 + ==> + inzpRep4 res = _f * inzp _a24 + ]. +proof. + proc. + admit. +qed. + +lemma h_mul_rss_ref4 (_f _g: zp): + hoare [M.__mul4_rss : + inzpRep4 xa = _f /\ inzpRep4 ya = _g + ==> + inzpRep4 res = _f * _g + ]. +proof. + proc. + admit. +qed. + + +lemma h_mul_pp_ref4 (_f _g: zp): + hoare [M._mul4_pp : + inzpRep4 xa = _f /\ inzpRep4 ya = _g + ==> + inzpRep4 res = _f * _g + ]. +proof. + proc. + admit. +qed. + +lemma h_sqr_rs_ref4 (_f: zp): + hoare [M.__sqr4_rs : + inzpRep4 xa = _f + ==> + inzpRep4 res = ZModpRing.exp _f 2 + ]. +proof. + proc. + admit. +qed. + + +lemma h_sqr_p_ref4 (_f: zp): + hoare [M._sqr4_p : + inzpRep4 xa = _f + ==> + inzpRep4 res = ZModpRing.exp _f 2 + ]. +proof. + proc. + admit. +qed. + +lemma ill_add_rrs_ref4 : islossless M.__add4_rrs. + by proc; do 2! unroll for ^while; islossless. +qed. + +lemma ph_add_rrs_ref4 (_f _g: zp): + phoare [M.__add4_rrs : + inzpRep4 f = _f /\ inzpRep4 g = _g + ==> + inzpRep4 res = _f + _g + ] = 1%r. +proof. + by conseq ill_add_rrs_ref4 (h_add_rrs_ref4 _f _g). +qed. + +lemma ill_sub_rrs_ref4 : islossless M.__sub4_rrs. + by proc; do 2! unroll for ^while; islossless. +qed. + +lemma ph_sub_rrs_ref4 (_f _g: zp): + phoare [M.__sub4_rrs : + inzpRep4 f = _f /\ inzpRep4 gs = _g + ==> + inzpRep4 res = _f - _g + ] = 1%r. +proof. + by conseq ill_sub_rrs_ref4 (h_sub_rrs_ref4 _f _g). +qed. + +lemma ill_mul_a24_ref4 : islossless M.__mul4_a24_rs by islossless. + +lemma ph_mul_a24_ref4 (_f: zp, _a24: int): + phoare [M.__mul4_a24_rs : + inzpRep4 xa = _f /\ _a24 = to_uint a24 + ==> + inzpRep4 res = _f * inzp _a24 + ] = 1%r. +proof. + by conseq ill_mul_a24_ref4 (h_mul_a24_ref4 _f _a24). +qed. + +lemma ill_mul_rss_ref4 : islossless M.__mul4_rss. +proof. + proc. + do 6! unroll for ^while. + rcondt 22. auto => />. rcondf 27; auto => />. rcondf 36; auto => />. rcondf 45; auto => />. + rcondt 60; auto => />. rcondf 68; auto => />. rcondt 72; auto => />. rcondf 80; auto => />. + rcondt 84; auto => />. rcondf 92; auto => />. rcondf 96; auto => />. rcondt 108; auto => />. + rcondf 116; auto => />. rcondt 120; auto => />. rcondf 128; auto => />. rcondt 132; auto => />. + rcondf 140; auto => />. rcondf 144; auto => />. rcondt 156; auto => />. rcondf 164; auto => />. + rcondt 168; auto => />. rcondf 176; auto => />. rcondt 180; auto => />. rcondf 188; auto => />. + rcondf 192; auto => />. + inline *. + do 2! unroll for ^while. by islossless. +qed. + +lemma ph_mul_rss_ref4 (_f _g : zp): + phoare [M.__mul4_rss : + inzpRep4 xa = _f /\ inzpRep4 ya = _g + ==> + inzpRep4 res = _f * _g] = 1%r. +proof. + by conseq ill_mul_rss_ref4 (h_mul_rss_ref4 _f _g). +qed. + +lemma ill_mul_pp_ref4 : islossless M._mul4_pp. +proof. + proc. + do 6! unroll for ^while. + rcondt 22. auto => />. rcondf 27; auto => />. rcondf 36; auto => />. rcondf 45; auto => />. + rcondt 60; auto => />. rcondf 68; auto => />. rcondt 72; auto => />. rcondf 80; auto => />. + rcondt 84; auto => />. rcondf 92; auto => />. rcondf 96; auto => />. rcondt 108; auto => />. + rcondf 116; auto => />. rcondt 120; auto => />. rcondf 128; auto => />. rcondt 132; auto => />. + rcondf 140; auto => />. rcondf 144; auto => />. rcondt 156; auto => />. rcondf 164; auto => />. + rcondt 168; auto => />. rcondf 176; auto => />. rcondt 180; auto => />. rcondf 188; auto => />. + rcondf 192; auto => />. + inline *. + do 3! unroll for ^while. by islossless. +qed. + +lemma ph_mul_pp_ref4 (_f _g : zp): + phoare [M._mul4_pp : + inzpRep4 xa = _f /\ inzpRep4 ya = _g + ==> + inzpRep4 res = _f * _g] = 1%r. +proof. + by conseq ill_mul_pp_ref4 (h_mul_pp_ref4 _f _g). +qed. + +lemma ill_sqr_rs_ref4 : islossless M.__sqr4_rs + by proc; inline *; do 2! unroll for ^while; islossless. + +lemma ph_sqr_rs_ref4 (_f: zp): + phoare [M.__sqr4_rs : + inzpRep4 xa = _f + ==> + inzpRep4 res = ZModpRing.exp _f 2] = 1%r. +proof. + by conseq ill_sqr_rs_ref4 (h_sqr_rs_ref4 _f). +qed. + +lemma ill_sqr_p_ref4 : islossless M._sqr4_p + by proc; inline *; do 3! unroll for ^while; islossless. + +lemma ph_sqr_p_ref4 (_f: zp): + phoare [M._sqr4_p : + inzpRep4 xa = _f + ==> + inzpRep4 res = ZModpRing.exp _f 2] = 1%r. +proof. + by conseq ill_sqr_p_ref4 (h_sqr_p_ref4 _f). +qed. + +(** step 0 : add sub mul sqr **) +equiv eq_spec_impl_add_rrs_ref4 : CurveProcedures.add ~ M.__add4_rrs: + f{1} = inzpRep4 f{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_add_rrs_ref4 (inzpRep4 f{2}) (inzpRep4 g{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_sub_rrs_ref4 : CurveProcedures.sub ~ M.__sub4_rrs: + f{1} = inzpRep4 f{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_sub_rrs_ref4 (inzpRep4 f{2}) (inzpRep4 gs{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_mul_a24_rs_ref4 : CurveProcedures.mul_a24 ~ M.__mul4_a24_rs: + f{1} = inzpRep4 xa{2} /\ + a24{1} = to_uint a24{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_mul_a24_ref4 (inzpRep4 xa{2}) (to_uint a24{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_mul_rss_ref4 : CurveProcedures.mul ~ M.__mul4_rss: + f{1} = inzpRep4 xa{2} /\ + g{1} = inzpRep4 ya{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_mul_rss_ref4 (inzpRep4 xa{2}) (inzpRep4 ya{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_sqr_ref4 : CurveProcedures.sqr ~ M.__sqr4_rs: + f{1} = inzpRep4 xa{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_sqr_rs_ref4 (inzpRep4 xa{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +(** step 0.5 : transitivity stuff **) +equiv eq_spec_impl_add_ssr_ref4 : CurveProcedures.add ~ M.__add4_ssr: + f{1} = inzpRep4 g{2} /\ + g{1} = inzpRep4 fs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call (eq_spec_impl_add_rrs_ref4). skip. auto => />. +qed. + +equiv eq_spec_impl_add_sss_ref4 : CurveProcedures.add ~ M.__add4_sss: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call eq_spec_impl_add_rrs_ref4. skip. auto => />. +qed. + +equiv eq_spec_impl_sub_sss_ref4 : CurveProcedures.sub ~ M.__sub4_sss: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 gs{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call eq_spec_impl_sub_rrs_ref4. skip. auto => />. +qed. + +equiv eq_spec_impl_sub_rrs_rsr_ref4 : M.__sub4_rrs ~ M.__sub4_rsr: + f{1} = fs{2} /\ + gs{1} = g{2} + ==> + res{1} = res{2}. +proof. + proc. + do 2! unroll for{1} ^while. + do 2! unroll for{2} ^while. + wp; skip => />. +qed. + +equiv eq_spec_impl_sub_rsr_ref4 : CurveProcedures.sub ~ M.__sub4_rsr: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + transitivity + M.__sub4_rrs + ( f{1} = inzpRep4 f{2} /\ g{1} = inzpRep4 gs{2} ==> res{1} = inzpRep4 res{2}) + ( f{1} = fs{2} /\ gs{1} = g{2} ==> res{1} = res{2}). + move => &1 &2 [H] H0. + exists(fs{2}, g{2}) => />. + move => &1 &m &2 H H0. by rewrite -H0 H. + proc *; call eq_spec_impl_sub_rrs_ref4. + by skip => />. + proc *; call eq_spec_impl_sub_rrs_rsr_ref4. + by done. +qed. + +equiv eq_spec_impl_sub_ssr_ref4 : CurveProcedures.sub ~ M.__sub4_ssr: + f{1} = inzpRep4 fs{2} /\ + g{1} = inzpRep4 g{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call eq_spec_impl_sub_rsr_ref4. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_a24_ss_ref4 : CurveProcedures.mul_a24 ~ M.__mul4_a24_ss: + f{1} = inzpRep4 xa{2} /\ + a24{1} = to_uint a24{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call eq_spec_impl_mul_a24_rs_ref4. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_pp_ref4 : CurveProcedures.mul ~ M._mul4_pp: + f{1} = inzpRep4 xa{2} /\ + g{1} = inzpRep4 ya{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_mul_pp_ref4 (inzpRep4 xa{2}) (inzpRep4 ya{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_mul_ss_ref4 : CurveProcedures.mul ~ M._mul4_ss_: + f{1} = inzpRep4 xa{2} /\ + g{1} = inzpRep4 ya{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call eq_spec_impl_mul_pp_ref4. skip. auto => />. +qed. + +equiv eq_spec_impl_mul_sss_ref4 : CurveProcedures.mul ~ M.__mul4_sss: + f{1} = inzpRep4 xa{2} /\ + g{1} = inzpRep4 ya{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call eq_spec_impl_mul_rss_ref4. skip. auto => />. +qed. + +equiv eq_spec_impl_sqr_rs__ss_ref4 : M.__sqr4_ss ~ M.__sqr4_rs: + xa{1} = xa{2} + ==> + res{1} = res{2}. +proof. + proc *. inline {1} 1; sp; wp. + conseq (_: r0{1} = r{2}). + sim. +qed. + +equiv eq_spec_impl_sqr__ss_ref4 : CurveProcedures.sqr ~ M.__sqr4_ss: + f{1} = inzpRep4 xa{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + transitivity + M.__sqr4_rs + ( f{1} = inzpRep4 xa{2} ==> res{1} = inzpRep4 res{2}) + ( xa{1} = xa{2} ==> res{1} = res{2}). + move => &1 &2 H. + exists(xa{2}) => />. + move => &1 &m &2 H H0. by rewrite -H0 H. + proc *; call eq_spec_impl_sqr_ref4. + by skip => />. symmetry. + proc *; call eq_spec_impl_sqr_rs__ss_ref4. + by done. +qed. + +equiv eq_spec_impl_sqr_p_ref4 : CurveProcedures.sqr ~ M._sqr4_p: + f{1} = inzpRep4 xa{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (ph_sqr_p_ref4 (inzpRep4 xa{2})). + inline *; wp; skip => />. + move => &2 H H0 => />. by rewrite H0. +qed. + +equiv eq_spec_impl_sqr_ss_ref4 : CurveProcedures.sqr ~ M._sqr4_ss_: + f{1} = inzpRep4 xa{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). + unroll for{2} ^while. + wp. sp. simplify. + call eq_spec_impl_sqr_p_ref4. skip. auto => />. + move => &2. + congr. apply Array4.ext_eq. move => H [H1] H2. + smt(Array4.get_setE). +qed. + +equiv eq_spec_impl_sqr_s_ref4 : CurveProcedures.sqr ~ M._sqr4_s_: + f{1} = inzpRep4 x{2} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline{2} (1). wp. sp. + call eq_spec_impl_sqr_p_ref4. skip. auto => />. + qed. + +(** setting last bit to 0 **) +lemma eq_set_last_bit_to_zero64_ref4 x : + hoare [ + M.__decode_u_coordinate4 : + u = x + ==> + res = Curve25519_Operations.last_bit_to_zero64 x + ]. +proof. + proc; wp; skip => />. + rewrite /last_bit_to_zero64 => />; congr. + pose X := x.[3]. + rewrite /of_int /int2bs /mkseq /to_list -iotaredE => />. + rewrite andE wordP => /> k K0 K1. + rewrite map2iE // get_bits2w //. + smt(W64.initE). +qed. + +lemma ill_set_last_bit_to_zero64: islossless M.__decode_u_coordinate4 by islossless. + +lemma eq_ph_set_last_bit_to_zero64 x: + phoare [ + M.__decode_u_coordinate4 : + u = x + ==> + res = Curve25519_Operations.last_bit_to_zero64 x + ] = 1%r. +proof. + by conseq ill_set_last_bit_to_zero64 (eq_set_last_bit_to_zero64_ref4 x). +qed. + +(** to bytes **) +lemma h_to_bytes_ref4 r: + hoare [M.__tobytes4 : + r = f + ==> + pack4 (to_list res) = (W256.of_int (asint (inzpRep4 r))) + ]. +proof. + proc. + admit. +qed. + +lemma ill_to_bytes_ref4 : islossless M.__tobytes4 by islossless. + +lemma ph_to_bytes_ref4 r: + phoare [M.__tobytes4 : + r = f + ==> + pack4 (to_list res) = (W256.of_int (asint (inzpRep4 r))) + ] = 1%r. +proof. + by conseq ill_to_bytes_ref4 (h_to_bytes_ref4 r). +qed. + +(** step 1 : decode_scalar **) +equiv eq_spec_impl_decode_scalar_25519_ref4 : CurveProcedures.decode_scalar ~ M.__decode_scalar: + k'{1} = pack4 (to_list k{2}) + ==> + res{1} = pack32 (to_list res{2}). +proof. + proc; wp; auto => />. + unroll for{2} ^while => />; wp; skip => /> &2. + rewrite !/set64_direct !/get8 !/init8 => />. + rewrite pack4E pack32E. + rewrite !/to_list /mkseq -!iotaredE => /> . + rewrite !of_intE modz_small. by apply bound_abs. rewrite !bits2wE /int2bs /mkseq -!iotaredE => />. + rewrite wordP => i rgi />. + rewrite !of_listE !bits8E //= => />. + rewrite !get_setE //= !orE !andE !map2E //=. + rewrite !initiE => />. + rewrite !initiE => />. smt(). smt(). + + case(i = 0) => /> *; case(i = 1) => /> *; case(i = 2) => /> *; case(i = 254) => /> *; case(i = 255) => /> *. + + case(i %/ 8 = 0) => /> *. + + rewrite initiE => /> . smt(). rewrite initiE => />. smt(). rewrite initiE => />. smt(). smt(). + + case(i %/ 8 - 1 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 2 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 3 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 4 = 0) => /> *. + rewrite initiE => /> /#. + + case(i %/ 8 - 5 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 6 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 7 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 8 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 9 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 10 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 11 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 12 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 13 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 14 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 15 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 16 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 17 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 18 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 19 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 20 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 21 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 22 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 23 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 24 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 25 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 26 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 27 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 28 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 29 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 30 = 0) => /> *. + + rewrite initiE => /> /#. + + case(i %/ 8 - 31 = 0) => /> *. + + rewrite !initiE => />. smt(). + + rewrite !initiE => />. smt(). + case(i %/ 64 = 0) => /> *. smt(). smt(). + + rewrite !initiE => /> /#. smt(). +qed. + +(** step 2 : decode_u_coordinate **) +equiv eq_spec_impl_decode_u_coordinate_ref4 : CurveProcedures.decode_u_coordinate ~ M.__decode_u_coordinate4: + u'{1} = pack4 (to_list u{2}) + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + ecall {2} (eq_ph_set_last_bit_to_zero64 u{2}). + inline *; wp; skip => /> &2. + rewrite inzpRep4E. congr. + rewrite to_uint_unpack4u64 valRep4E; congr; congr. + rewrite /last_bit_to_zero64 => />. + rewrite /to_list /mkseq /to_list -iotaredE => />. + do split. + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + + rewrite set_neqiE. smt(). + + rewrite pack4E => />. rewrite of_listE => />. + + rewrite initE => />. + + have ->: (0 <= i && i < 256) by smt(). auto => />. + + rewrite initE => />. have ->: 0 <= i %/ 64 by smt(). auto => />. + + case(i %/ 64 < 4) => /> *. smt(). smt(). + + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + + rewrite set_neqiE. smt(). + + rewrite pack4E => />. rewrite of_listE => />. + + rewrite initE => />. + + have ->: (0 <= 64 + i && 64 + i < 256) by smt(). auto => />. + + rewrite initE => />. have ->: 0 <= (64 + i) %/ 64 by smt(). auto => />. + + case((64 + i) %/ 64 < 4) => /> *. smt(). smt(). + + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + + rewrite set_neqiE. smt(). + + rewrite pack4E => />. rewrite of_listE => />. + + rewrite initE => />. + + have ->: (0 <= 128 + i && 128 + i < 256) by smt(). auto => />. + + rewrite initE => />. have ->: 0 <= (128 + i) %/ 64 by smt(). auto => />. + + case((128 + i) %/ 64 < 4) => /> *. smt(). smt(). + + rewrite !wordP => /> i I I0. rewrite !bits64iE => />. + + rewrite pack4E => />. rewrite of_listE => />. + rewrite !setE => />. rewrite initE => />. + have ->: (0 <= 192 + i && 192 + i < 256) by smt(). auto => />. + rewrite !initE => />. + have ->: (0 <= i && i < 64) by smt(). + have ->: (0 <= 192 + i && 192 + i < 256) by smt(). + auto => />. + case (i <> 63) => /> C. + have ->: 192 + i <> 255 by smt(). + auto => />. rewrite !initE. smt(). +qed. + +equiv eq_spec_impl_decode_u_coordinate_base_ref4 : + CurveProcedures.decode_u_coordinate_base ~ M.__decode_u_coordinate_base4: + true + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. + inline *; wp; skip => />. + rewrite inzpRep4E. congr. + rewrite to_uint_unpack4u64 valRep4E; congr; congr. + rewrite /last_bit_to_zero64 => />. + have !->: ((of_int 9))%W256.[255 <- false] = ((of_int 9))%W256. + rewrite !of_intE !bits2wE !/int2bs !/mkseq -iotaredE => />. + apply W256.ext_eq => />. move => X X0 X1. + rewrite get_setE //. case (X = 255) => /> C. + rewrite /to_list /mkseq /to_list -iotaredE => />. + qed. + +(** step 3 : ith_bit **) +equiv eq_spec_impl_ith_bit_ref4 : CurveProcedures.ith_bit ~ M.__ith_bit : + k'{1} = pack32 (to_list k{2}) /\ + ctr{1} = to_uint ctr{2} /\ + 0 <= ctr{1} < 256 + ==> + b2i res{1} = to_uint res{2}. +proof. + proc; wp; skip => /> &2 H H0. + rewrite (W64.and_mod 3 ctr{2}) //= (W64.and_mod 6 (of_int (to_uint ctr{2} %% 8))%W64) //= !to_uint_shr //= !shr_shrw. + smt(W64.to_uint_cmp W64.of_uintK W64.to_uintK). + rewrite /zeroextu64 /truncateu8 //= !of_uintK => />. + + rewrite of_intE modz_small. apply bound_abs. smt(W8.to_uint_cmp JUtils.powS_minus JUtils.pow2_0). + rewrite bits2wE /int2bs /mkseq -iotaredE => />. + auto => />. + rewrite (modz_small (to_uint ctr{2} %% 8) W64.modulus). apply bound_abs. smt(W64.to_uint_cmp). + rewrite (modz_small (to_uint ctr{2} %% 8) 64). apply bound_abs. smt(W64.to_uint_cmp). + rewrite (modz_small (to_uint ctr{2} %% 8) W64.modulus). apply bound_abs. smt(W64.to_uint_cmp). + pose ctr := to_uint ctr{2}. + rewrite pack32E of_listE /to_list !/mkseq !initiE // -!iotaredE => />. + rewrite !initiE //=. auto => />. smt(). + rewrite !/b2i !of_intE !bits2wE !/int2bs !/mkseq //=. + rewrite -!iotaredE => />. + rewrite !to_uintE !/bs2int !/w2bits !/mkseq /big /range !/predT -!iotaredE => />. + rewrite !b2i0 => />. + rewrite !initiE => />. smt(). auto => />. + + case(ctr %/ 8 = 0) => /> *. smt(). + + case(ctr %/ 8 - 1 = 0) => /> *. smt(). + + case(ctr %/ 8 - 2 = 0) => /> *. smt(). + + case(ctr %/ 8 - 3 = 0) => /> *. smt(). + + case(ctr %/ 8 - 4 = 0) => /> *. smt(). + + case(ctr %/ 8 - 5 = 0) => /> *. smt(). + + case(ctr %/ 8 - 6 = 0) => /> *. smt(). + + case(ctr %/ 8 - 7 = 0) => /> *. smt(). + + case(ctr %/ 8 - 8 = 0) => /> *. smt(). + + case(ctr %/ 8 - 9 = 0) => /> *. smt(). + + case(ctr %/ 8 - 10 = 0) => /> *. smt(). + + case(ctr %/ 8 - 11 = 0) => /> *. smt(). + + case(ctr %/ 8 - 12 = 0) => /> *. smt(). + + case(ctr %/ 8 - 13 = 0) => /> *. smt(). + + case(ctr %/ 8 - 14 = 0) => /> *. smt(). + + case(ctr %/ 8 - 15 = 0) => /> *. smt(). + + case(ctr %/ 8 - 16 = 0) => /> *. smt(). + + case(ctr %/ 8 - 17 = 0) => /> *. smt(). + + case(ctr %/ 8 - 18 = 0) => /> *. smt(). + + case(ctr %/ 8 - 19 = 0) => /> *. smt(). + + case(ctr %/ 8 - 20 = 0) => /> *. smt(). + + case(ctr %/ 8 - 21 = 0) => /> *. smt(). + + case(ctr %/ 8 - 22 = 0) => /> *. smt(). + + case(ctr %/ 8 - 23 = 0) => /> *. smt(). + + case(ctr %/ 8 - 24 = 0) => /> *. smt(). + + case(ctr %/ 8 - 25 = 0) => /> *. smt(). + + case(ctr %/ 8 - 26 = 0) => /> *. smt(). + + case(ctr %/ 8 - 27 = 0) => /> *. smt(). + + case(ctr %/ 8 - 28 = 0) => /> *. smt(). + + case(ctr %/ 8 - 29 = 0) => /> *. smt(). + + case(ctr %/ 8 - 30 = 0) => /> *. smt(). + + case(ctr %/ 8 - 31 = 0) => /> *. smt(). + + case(ctr %/ 8 - 32 = 0) => /> *. smt(). + smt(). +qed. + +equiv eq_spec_impl_init_points_ref4 : + CurveProcedures.init_points ~ M.__init_points4 : + init{1} = inzpRep4 initr{2} + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4. +proof. + proc. + wp. unroll for{2} ^while. wp. skip. move => &1 &2 H H0 H1 H2 H3 H4 H5 H6. + split; auto => />. rewrite /H4 /H0 /H2 /H3 /Zp.one /set0_64_ /inzpRep4 => />. + rewrite /valRep4 /to_list /mkseq -iotaredE => />. + split; auto => />. rewrite /H5 /H0 /H3 /H2 /Zp.zero /set0_64_ /inzpRep4 => />. + rewrite /valRep4 /to_list /mkseq -iotaredE => />. + rewrite /H6 /H0 /H3 /H2 /Zp.zero /set0_64_ /inzpRep4 // /valRep4 /to_list /mkseq -iotaredE => />. +qed. + +(** step 4 : cswap **) +equiv eq_spec_impl_cswap_ref4 : + CurveProcedures.cswap ~ M.__cswap4: + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z3{1} = inzpRep4 z3{2} /\ + b2i toswap{1} = to_uint toswap{2} + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4. +proof. +proc. +do 4! unroll for{2} ^while. +case: (toswap{1}). + rcondt {1} 1 => //. wp => /=. skip. + move => &1 &2 [#] 4!->> ??. + have mask_set : (set0_64.`6 - toswap{2}) = W64.onew. rewrite /set0_64_ /=. smt(W64.to_uint_cmp). + rewrite !mask_set /=. + have lxor1 : forall (x1 x2:W64.t), x1 `^` (x2 `^` x1) = x2. + move=> *. rewrite xorwC -xorwA xorwK xorw0 //. + have lxor2 : forall (x1 x2:W64.t), x1 `^` (x1 `^` x2) = x2. + move=> *. rewrite xorwA xorwK xor0w //. + rewrite !lxor1 !lxor2. + split. congr. apply Array4.ext_eq. smt(Array4.get_setE). + split. congr. apply Array4.ext_eq. smt(Array4.get_setE). + split. congr. apply Array4.ext_eq. smt(Array4.get_setE). + congr. apply Array4.ext_eq. rewrite /copy_64 => />. smt(Array4.get_setE). + rcondf {1} 1 => //. wp => /=; skip. + move => &1 &2 [#] 4!->> ??. + have mask_not_set : (set0_64.`6 - toswap{2}) = W64.zero. rewrite /set0_64_ => />. smt(). + rewrite !mask_not_set !andw0 !xorw0 !/copy_64 => />. + do split. + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). + congr. smt(Array4.initE Array4.ext_eq Array4.set_set_if). +qed. + +(** step 5 : add_and_double **) +equiv eq_spec_impl_add_and_double_ref4 : + CurveProcedures.add_and_double ~ M.__add_and_double4: + init{1} = inzpRep4 init{2} /\ + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z3{1} = inzpRep4 z3{2} + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4. +proof. +proc => /=; wp. + call eq_spec_impl_mul_rss_ref4; wp. + call eq_spec_impl_mul_sss_ref4; wp. + call eq_spec_impl_add_sss_ref4; wp. + call eq_spec_impl_sqr__ss_ref4; wp. + call eq_spec_impl_mul_a24_ss_ref4; wp. + call eq_spec_impl_sqr__ss_ref4; wp. + call eq_spec_impl_sub_sss_ref4; wp. + call eq_spec_impl_mul_sss_ref4; wp. + call eq_spec_impl_sub_sss_ref4; wp. + call eq_spec_impl_add_sss_ref4; wp. + call eq_spec_impl_sqr__ss_ref4; wp. + call eq_spec_impl_sqr__ss_ref4; wp. + call eq_spec_impl_mul_sss_ref4; wp. + call eq_spec_impl_mul_sss_ref4; wp. + call eq_spec_impl_add_sss_ref4; wp. + call eq_spec_impl_sub_sss_ref4; wp. + call eq_spec_impl_add_ssr_ref4; wp. + call eq_spec_impl_sub_ssr_ref4; wp. + done. +qed. + +(** step 6 : montgomery_ladder_step **) +equiv eq_spec_impl_montgomery_ladder_step_ref4 : + CurveProcedures.montgomery_ladder_step ~ M.__montgomery_ladder_step4: + k'{1} = pack32 (to_list k{2}) /\ + init'{1} = inzpRep4 init{2} /\ + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z3{1} = inzpRep4 z3{2} /\ + b2i swapped{1} = to_uint swapped{2} /\ + ctr'{1} = to_uint ctr{2} /\ + 0 <= ctr'{1} < 256 + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2 /\ + res{1}.`3 = inzpRep4 res{2}.`3 /\ + res{1}.`4 = inzpRep4 res{2}.`4 /\ + b2i res{1}.`5 = to_uint res{2}.`5. +proof. + proc => /=; wp. + call eq_spec_impl_add_and_double_ref4. wp. + call eq_spec_impl_cswap_ref4. wp. + call eq_spec_impl_ith_bit_ref4. wp; skip. + move => &1 &2 [H0] [H1] [H2] [H3] [H4] [H5] [H6] H7. split. + auto => />. rewrite H0. + move => [H8 H9] H10 H11 H12 H13 H14. + split; auto => />. rewrite /H14 /H13. + rewrite /b2i. + case: (swapped{1} ^^ H10). + move => *. smt(W64.to_uintK W64.xorw0 W64.xorwC). + move => *. smt(W64.ge2_modulus W64.to_uintK W64.of_uintK W64.xorwK). +qed. + +(** step 7 : montgomery_ladder **) +equiv eq_spec_impl_montgomery_ladder_ref4 : + CurveProcedures.montgomery_ladder ~ M.__montgomery_ladder4 : + init'{1} = inzpRep4 u{2} /\ + k'{1} = pack32 (to_list k{2}) + ==> + res{1}.`1 = inzpRep4 res{2}.`1 /\ + res{1}.`2 = inzpRep4 res{2}.`2. +proof. + proc. wp. sp. + unroll {1} 4. + rcondt {1} 4. auto => />. inline CurveProcedures.init_points. + wp. sp. skip. auto => />. + while( + k'{1} = pack32 (to_list k{2}) /\ + ctr{1} = to_uint ctr{2} /\ + -1 <= ctr{1} < 256 /\ + init'{1} = inzpRep4 us{2} /\ + x2{1} = inzpRep4 x2{2} /\ + x3{1} = inzpRep4 x3{2} /\ + z2{1} = inzpRep4 z2r{2} /\ + z3{1} = inzpRep4 z3{2} /\ + b2i swapped{1} = to_uint swapped{2}). + wp. sp. call eq_spec_impl_montgomery_ladder_step_ref4. skip. auto => />. + move => &1 &2 ctrR H H0 H1 H2 E3. split. + rewrite to_uintB. rewrite uleE to_uint1 => />. smt(). rewrite to_uint1 => />. + smt(W64.to_uint_cmp). + move => H3 H4 H5 H6 H7 H8 H9 H10 H11 H12. split. smt(W64.to_uint_cmp). + rewrite ultE to_uintB. rewrite uleE to_uint1. smt(). + rewrite to_uint1 to_uint0 //=. wp. + call eq_spec_impl_montgomery_ladder_step_ref4. wp. call eq_spec_impl_init_points_ref4. skip. done. +qed. + +(** step 8 : iterated square **) +equiv eq_spec_impl_it_sqr_ref4 : + CurveProcedures.it_sqr ~ M._it_sqr4_p: + f{1} = inzpRep4 x{2} /\ + i{1} = to_uint i{2} /\ + i{1} <= W32.modulus /\ + 2 <= to_uint i{2} /\ + 2 <= i{1} + ==> + res{1} = inzpRep4 res{2}. +proof. +proc. simplify. wp. sp. + while (h{1} = inzpRep4 x{2} /\ + ii{1} = to_uint i{2} /\ + ii{1} <= W32.modulus /\ + 0 <= ii{1} + ). + wp. call eq_spec_impl_sqr_p_ref4. conseq(_: _ ==> h{1} = inzpRep4 x{2}). + move => &1 &2 [[H][ H0] [H1] H2 [H3] H4 H5]. split. apply H5. + rewrite /DEC_32 /rflags_of_aluop_nocf_w /ZF_of => /=. + move => H6 H7 H8 H9. split. split. apply H9. split. + rewrite to_uintB. rewrite uleE => />. by smt(). rewrite to_uint1 H0 //. + split. move: H1. smt(). move: H2. smt(). split. rewrite H0. move => H10. + smt(W32.of_uintK W32.to_uintK W32.of_intN W32.to_uintN W32.of_intD). + smt(W32.of_uintK W32.to_uintK W32.of_intN W32.to_uintN W32.of_intD). + skip. auto => />. wp. + rewrite /DEC_32 /rflags_of_aluop_nocf_w /ZF_of => /=. + call eq_spec_impl_sqr_p_ref4. + skip. auto => />. move => &2 H H0. split. split. + rewrite to_uintB. rewrite uleE => />. move: H. smt(). + rewrite to_uint1 //. split. move: H0. smt(). move: H. smt(). + split. move => H1. + smt(W32.ge2_modulus W32.of_uintK W32.to_uintK W32.to_uintN W32.of_intD). + move => H1. move: H. smt(). +qed. + +equiv eq_spec_impl_it_sqr_s_ref4 : + CurveProcedures.it_sqr ~ M._it_sqr4_s_: + f{1} = inzpRep4 x{2} /\ + i{1} = to_uint i{2} /\ + 2 <= to_uint i{2} /\ + i{1} <= W32.modulus /\ + 2 <= i{1} ==> + res{1} = inzpRep4 res{2}. + proof. + proc *. inline M._it_sqr4_s_. wp. sp. + call eq_spec_impl_it_sqr_ref4. skip. auto => />. +qed. + +equiv eq_spec_impl_it_sqr_ss_ref4 : + CurveProcedures.it_sqr ~ M._it_sqr4_ss_: + f{1} = inzpRep4 x{2} /\ + i{1} = to_uint i{2} /\ + 2 <= to_uint i{2} /\ + i{1} <= W32.modulus /\ + 2 <= i{1} + ==> + res{1} = inzpRep4 res{2}. +proof. + proc *. inline M._it_sqr4_ss_. + unroll for{2} ^while. wp. sp. + call eq_spec_impl_it_sqr_ref4. skip. auto => />. move => &2 H H0. congr. + apply Array4.ext_eq. move => H1 [H2] H3. smt(Array4.get_setE). +qed. + + +(** step 9 : invert **) +equiv eq_spec_impl_invert_ref4 : + CurveProcedures.invert ~ M.__invert4 : + fs{1} = inzpRep4 fs{2} + ==> res{1} = inzpRep4 res{2}. +proof. + transitivity + CurveProcedures.invert_helper + ( fs{1} = fs{2} ==> res{1} = res{2}) + ( fs{1} = inzpRep4 fs{2} ==> res{1} = inzpRep4 res{2}). + move => &1 &2 H; exists(fs{1}) => />. + move => &1 &m &2 => />. + proc *. symmetry; call eq_proc_proc_invert; skip => />. + proc => /=; wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_sqr_s_ref4; wp. + call (eq_spec_impl_it_sqr_s_ref4). wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_it_sqr_s_ref4; wp. + call eq_spec_impl_mul_ss_ref4. wp. + call eq_spec_impl_it_sqr_ss_ref4. wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_it_sqr_ss_ref4. wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_it_sqr_s_ref4. wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_it_sqr_ss_ref4. wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_it_sqr_ss_ref4. wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_it_sqr_s_ref4. wp. + call eq_spec_impl_sqr_ss_ref4; wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_sqr_ss_ref4; wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_mul_ss_ref4; wp. + call eq_spec_impl_sqr_s_ref4; wp. + call eq_spec_impl_sqr_ss_ref4; wp. + call eq_spec_impl_sqr_ss_ref4. wp. skip. + done. +qed. + +(** step 10 : encode point **) +equiv eq_spec_impl_encode_point_ref4 : CurveProcedures.encode_point ~ M.__encode_point4: + x2{1} = inzpRep4 x2{2} /\ + z2{1} = inzpRep4 z2r{2} + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc => /=; wp. + ecall {2} (ph_to_bytes_ref4 (r{2})). wp. + call eq_spec_impl_mul_rss_ref4. wp. + call eq_spec_impl_invert_ref4. + wp; skip => /> &2 H H0 H1 H2. + by rewrite -H2. +qed. +equiv eq_spec_impl_scalarmult_internal_ref4 : + CurveProcedures.scalarmult_internal ~ M.__curve25519_internal_ref4: + k'{1} = pack32 (to_list k{2}) /\ + u''{1} = inzpRep4 u{2} + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc => /=; wp. + call eq_spec_impl_encode_point_ref4; wp. + call eq_spec_impl_montgomery_ladder_ref4. wp. skip. + done. +qed. + +(** step 11 : scalarmult **) +equiv eq_spec_impl_scalarmult_ref4 : + CurveProcedures.scalarmult ~ M._curve25519_ref4: + k'{1} = pack4 (to_list _k{2}) /\ + u'{1} = pack4 (to_list _u{2}) + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc => /=; wp. + call eq_spec_impl_scalarmult_internal_ref4 => />; wp. + call eq_spec_impl_decode_u_coordinate_ref4 => />; wp. + call eq_spec_impl_decode_scalar_25519_ref4 => />. + wp; skip => />. +qed. + +equiv eq_spec_impl_scalarmult_base_ref4 : + CurveProcedures.scalarmult_base ~ M._curve25519_ref4_base: + k'{1} = pack4 (to_list _k{2}) + ==> + res{1} = pack4 (to_list res{2}). +proof. + proc => /=; wp. + call eq_spec_impl_scalarmult_internal_ref4; wp. + call eq_spec_impl_decode_u_coordinate_base_ref4; wp. + call eq_spec_impl_decode_scalar_25519_ref4. + wp. skip. move => *. smt(Zp_limbs.valRep4ToPack_xy). +qed. + +lemma eq_spec_impl_scalarmult_jade_ref4 _qp _np _pp: + equiv [CurveProcedures.scalarmult ~ M.jade_scalarmult_curve25519_amd64_ref4: + qp{2} = _qp /\ + np{2} = _np /\ + pp{2} = _pp /\ + k'{1} = pack4 (to_list np{2}) /\ + u'{1} = pack4 (to_list pp{2}) + ==> + res{1} = pack4 (to_list res{2}.`1) /\ + res{2}.`2 = W64.zero]. +proof. + proc *. inline M.jade_scalarmult_curve25519_amd64_ref4; wp. + call (eq_spec_impl_scalarmult_ref4); wp; skip => />. +qed. + +lemma eq_spec_impl_scalarmult_jade_base_ref4 _qp _np: + equiv [CurveProcedures.scalarmult_base ~ M.jade_scalarmult_curve25519_amd64_ref4_base: + qp{2} = _qp /\ + np{2} = _np /\ + k'{1} = pack4 (to_list np{2}) + ==> + res{1} = pack4 (to_list res{2}.`1) /\ + res{2}.`2 = W64.zero]. +proof. + proc *. inline M.jade_scalarmult_curve25519_amd64_ref4_base. wp. sp. + call (eq_spec_impl_scalarmult_base_ref4). skip. done. +qed. + + + + +(* Below are proofs for an older implementation that utilises ptrs *) +(* +lemma eq_spec_impl_scalarmult_ptr_ref4 mem _rp _kp _up : + equiv [CurveProcedures.scalarmult ~ M.__curve25519_ref4_ptr: + valid_ptr (W64.to_uint _up) 32 /\ + valid_ptr (W64.to_uint _kp) 32 /\ + valid_ptr (W64.to_uint _rp) 32 /\ + Glob.mem{2} = mem /\ + rp{2} = _rp /\ + kp{2} = _kp /\ + up{2} = _up /\ + u'{1} = pack4 (load_array4 (mem) (W64.to_uint _up)) /\ + k'{1} = pack4 (load_array4 (mem) (W64.to_uint _kp)) + ==> + res{1} = pack4 (load_array4 Glob.mem{2} (W64.to_uint res{2}.`1)) /\ + res{2}.`2 = tt + ]. +proof. + proc *. + inline M.__curve25519_ref4_ptr. wp. sp. + inline M.__load4 M.__store4. + do 3! unroll for{2} ^while. + sp. wp. auto => />. + call eq_spec_impl_scalarmult_ref4. skip. auto => />. + move => &2 H H0 H1 H2 H3 H4. + do split. + congr. congr. + rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr. congr. rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + move => H5 H6 H7. + congr. congr. rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + apply (load_store_pos Glob.mem{2} rp{2} H7 0). + rewrite /valid_ptr; split => />. done. + apply (load_store_pos Glob.mem{2} rp{2} H7 8). + rewrite /valid_ptr; split => />. done. + apply (load_store_pos Glob.mem{2} rp{2} H7 16). + rewrite /valid_ptr; split => />. done. + apply (load_store_pos Glob.mem{2} rp{2} H7 24). + rewrite /valid_ptr; split => />. done. +qed. +*) + +(* +lemma eq_spec_impl_scalarmult_base_ptr_ref4 mem _rp _kp : + equiv [CurveProcedures.scalarmult_base ~ M.__curve25519_ref4_base_ptr: + valid_ptr (W64.to_uint _rp) 32 /\ + valid_ptr (W64.to_uint _kp) 32 /\ + Glob.mem{2} = mem /\ + rp{2} = _rp /\ + kp{2} = _kp /\ + k'{1} = pack4 (load_array4 (Glob.mem{2}) (W64.to_uint _kp)) + ==> + res{1} = pack4 (load_array4 Glob.mem{2} (W64.to_uint res{2}.`1)) /\ res{2}.`2 = tt]. +proof. + proc *. inline M.__curve25519_ref4_base_ptr M.__load4 M.__store4. + do 2! unroll for{2} ^while. + wp; call eq_spec_impl_scalarmult_base_ref4; wp; skip => />. + move => H H0 H1 H2. + do split. + congr; congr. + rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + congr; rewrite !to_uintD_small !to_uint_small => />. smt(). + move => H3 H4. + congr; congr. + rewrite /load_array4 /to_list /mkseq -iotaredE => />. + do split. + apply (load_store_pos mem _rp H4 0); rewrite /valid_ptr. smt(). smt(). + apply (load_store_pos mem _rp H4 8); rewrite /valid_ptr. smt(). smt(). + apply (load_store_pos mem _rp H4 16); rewrite /valid_ptr. smt(). smt(). + apply (load_store_pos mem _rp H4 24); rewrite /valid_ptr. smt(). smt(). +qed. +*) diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Operations.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Operations.ec new file mode 120000 index 0000000..5b3e5c8 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Operations.ec @@ -0,0 +1 @@ +../common/Curve25519_Operations.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_PHoare.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_PHoare.ec new file mode 120000 index 0000000..fd8d683 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_PHoare.ec @@ -0,0 +1 @@ +../common/Curve25519_PHoare.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Procedures.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Procedures.ec new file mode 120000 index 0000000..e19bf68 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Procedures.ec @@ -0,0 +1 @@ +../common/Curve25519_Procedures.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Spec.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Spec.ec new file mode 120000 index 0000000..e27e330 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/Curve25519_Spec.ec @@ -0,0 +1 @@ +../common/Curve25519_Spec.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/EClib.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/EClib.ec new file mode 120000 index 0000000..904ee5a --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/EClib.ec @@ -0,0 +1 @@ +../common/EClib.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/W64limbs.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/W64limbs.ec new file mode 120000 index 0000000..ad16992 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/W64limbs.ec @@ -0,0 +1 @@ +../common/W64limbs.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/Zp_25519.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/Zp_25519.ec new file mode 120000 index 0000000..9e3e196 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/Zp_25519.ec @@ -0,0 +1 @@ +../common/Zp_25519.ec \ No newline at end of file diff --git a/proof/crypto_scalarmult/curve25519/amd64/ref4/Zp_limbs.ec b/proof/crypto_scalarmult/curve25519/amd64/ref4/Zp_limbs.ec new file mode 120000 index 0000000..daf4f72 --- /dev/null +++ b/proof/crypto_scalarmult/curve25519/amd64/ref4/Zp_limbs.ec @@ -0,0 +1 @@ +../common/Zp_limbs.ec \ No newline at end of file