Skip to content

Commit

Permalink
k256: add impl of ReduceNonZero<U256> for Scalar (#501)
Browse files Browse the repository at this point in the history
  • Loading branch information
fjarri authored Jan 4, 2022
1 parent f60fc7f commit b4ebe62
Show file tree
Hide file tree
Showing 3 changed files with 107 additions and 7 deletions.
100 changes: 95 additions & 5 deletions k256/src/arithmetic/scalar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -589,6 +589,15 @@ impl Reduce<U512> for Scalar {
}
}

impl ReduceNonZero<U256> for Scalar {
fn from_uint_reduced_nonzero(w: U256) -> Self {
const ORDER_MINUS_ONE: U256 = ORDER.wrapping_sub(&U256::ONE);
let (r, underflow) = w.sbb(&ORDER_MINUS_ONE, Limb::ZERO);
let underflow = Choice::from((underflow.0 >> (Limb::BIT_SIZE - 1)) as u8);
Self(U256::conditional_select(&w, &r, !underflow).wrapping_add(&U256::ONE))
}
}

impl ReduceNonZero<U512> for Scalar {
fn from_uint_reduced_nonzero(w: U512) -> Self {
WideScalar(w).reduce_nonzero()
Expand Down Expand Up @@ -641,14 +650,16 @@ impl<'de> Deserialize<'de> for Scalar {
mod tests {
use super::Scalar;
use crate::arithmetic::dev::{biguint_to_bytes, bytes_to_biguint};
use crate::{NonZeroScalar, ORDER};
use elliptic_curve::{
bigint::U512,
bigint::{ArrayEncoding, U256, U512},
ff::{Field, PrimeField},
generic_array::GenericArray,
ops::Reduce,
IsHigh,
};
use num_bigint::{BigUint, ToBigUint};
use num_traits::Zero;
use proptest::prelude::*;

impl From<&BigUint> for Scalar {
Expand Down Expand Up @@ -762,13 +773,92 @@ mod tests {
assert_eq!((a - &a).is_zero().unwrap_u8(), 1);
}

#[test]
fn from_bytes_reduced() {
let m = Scalar::modulus_as_biguint();

fn reduce<T: Reduce<U256>>(arr: &[u8]) -> T {
T::from_be_bytes_reduced(GenericArray::clone_from_slice(arr))
}

// Regular reduction

let s = reduce::<Scalar>(&[0xffu8; 32]).to_biguint().unwrap();
assert!(s < m);

let s = reduce::<Scalar>(&[0u8; 32]).to_biguint().unwrap();
assert!(s.is_zero());

let s = reduce::<Scalar>(&ORDER.to_be_byte_array())
.to_biguint()
.unwrap();
assert!(s.is_zero());

// Reduction to a non-zero scalar

let s = reduce::<NonZeroScalar>(&[0xffu8; 32]).to_biguint().unwrap();
assert!(s < m);

let s = reduce::<NonZeroScalar>(&[0u8; 32]).to_biguint().unwrap();
assert!(s < m);
assert!(!s.is_zero());

let s = reduce::<NonZeroScalar>(&ORDER.to_be_byte_array())
.to_biguint()
.unwrap();
assert!(s < m);
assert!(!s.is_zero());

let s = reduce::<NonZeroScalar>(&(ORDER.wrapping_sub(&U256::ONE)).to_be_byte_array())
.to_biguint()
.unwrap();
assert!(s < m);
assert!(!s.is_zero());
}

#[test]
fn from_wide_bytes_reduced() {
let m = Scalar::modulus_as_biguint();
let b = [0xffu8; 64];
let s = <Scalar as Reduce<U512>>::from_be_bytes_reduced(GenericArray::clone_from_slice(&b));
let s_bu = s.to_biguint().unwrap();
assert!(s_bu < m);

fn reduce<T: Reduce<U512>>(arr: &[u8]) -> T {
let mut full_arr = [0u8; 64];
full_arr[(64 - arr.len())..64].copy_from_slice(arr);
T::from_be_bytes_reduced(GenericArray::clone_from_slice(&full_arr))
}

// Regular reduction

let s = reduce::<Scalar>(&[0xffu8; 64]).to_biguint().unwrap();
assert!(s < m);

let s = reduce::<Scalar>(&[0u8; 64]).to_biguint().unwrap();
assert!(s.is_zero());

let s = reduce::<Scalar>(&ORDER.to_be_byte_array())
.to_biguint()
.unwrap();
assert!(s.is_zero());

// Reduction to a non-zero scalar

let s = reduce::<NonZeroScalar>(&[0xffu8; 64]).to_biguint().unwrap();
assert!(s < m);

let s = reduce::<NonZeroScalar>(&[0u8; 64]).to_biguint().unwrap();
assert!(s < m);
assert!(!s.is_zero());

let s = reduce::<NonZeroScalar>(&ORDER.to_be_byte_array())
.to_biguint()
.unwrap();
assert!(s < m);
assert!(!s.is_zero());

let s = reduce::<NonZeroScalar>(&(ORDER.wrapping_sub(&U256::ONE)).to_be_byte_array())
.to_biguint()
.unwrap();
assert!(s < m);
assert!(!s.is_zero());
}

prop_compose! {
Expand Down
7 changes: 6 additions & 1 deletion k256/src/arithmetic/scalar/wide32.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,11 @@ impl WideScalar {
} else {
NEG_MODULUS[0]
};
let modulus = if modulus_minus_one {
ORDER.wrapping_sub(&U256::ONE)
} else {
ORDER
};

let w = self.0.to_uint_array();
let n0 = w[8];
Expand Down Expand Up @@ -392,7 +397,7 @@ impl WideScalar {

// Final reduction of r.
let r = U256::from([r0, r1, r2, r3, r4, r5, r6, r7]);
let (r2, underflow) = r.sbb(&ORDER, Limb::ZERO);
let (r2, underflow) = r.sbb(&modulus, Limb::ZERO);
let high_bit = Choice::from(c as u8);
let underflow = Choice::from((underflow.0 >> 31) as u8);
Scalar(U256::conditional_select(&r, &r2, !underflow | high_bit))
Expand Down
7 changes: 6 additions & 1 deletion k256/src/arithmetic/scalar/wide64.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,11 @@ impl WideScalar {
} else {
NEG_MODULUS[0]
};
let modulus = if modulus_minus_one {
ORDER.wrapping_sub(&U256::ONE)
} else {
ORDER
};

let w = self.0.to_uint_array();
let n0 = w[4];
Expand Down Expand Up @@ -200,7 +205,7 @@ impl WideScalar {

// Final reduction of r.
let r = U256::from([r0, r1, r2, r3]);
let (r2, underflow) = r.sbb(&ORDER, Limb::ZERO);
let (r2, underflow) = r.sbb(&modulus, Limb::ZERO);
let high_bit = Choice::from(c as u8);
let underflow = Choice::from((underflow.0 >> 63) as u8);
Scalar(U256::conditional_select(&r, &r2, !underflow | high_bit))
Expand Down

0 comments on commit b4ebe62

Please sign in to comment.