Skip to content

Commit

Permalink
feat: using WithHash<T> in SharedMutable + fixing slotallocation
Browse files Browse the repository at this point in the history
  • Loading branch information
benesjan committed Feb 4, 2025
1 parent df24234 commit 5e7fc3f
Show file tree
Hide file tree
Showing 6 changed files with 148 additions and 167 deletions.
174 changes: 45 additions & 129 deletions noir-projects/aztec-nr/aztec/src/state_vars/shared_mutable.nr
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
use dep::protocol_types::{
address::AztecAddress,
hash::{poseidon2_hash, poseidon2_hash_with_separator},
traits::{FromField, Packable, ToField},
utils::arrays::array_concat,
};

use crate::context::{PrivateContext, PublicContext, UnconstrainedContext};
use crate::oracle::storage::storage_read;
use crate::state_vars::{
shared_mutable::{
scheduled_delay_change::ScheduledDelayChange, scheduled_value_change::ScheduledValueChange,
use dep::protocol_types::traits::Packable;

use crate::{
context::{PrivateContext, PublicContext, UnconstrainedContext},
state_vars::{
shared_mutable::{
scheduled_delay_change::ScheduledDelayChange,
scheduled_value_change::ScheduledValueChange,
shared_mutable_values::SharedMutableValues,
},
storage::Storage,
},
storage::Storage,
utils::with_hash::WithHash,
};
use dep::std::mem::zeroed;

pub(crate) mod shared_mutable_values;
pub(crate) mod scheduled_delay_change;
pub(crate) mod scheduled_value_change;
mod test;
Expand All @@ -24,21 +23,11 @@ pub struct SharedMutable<T, let INITIAL_DELAY: u32, Context> {
storage_slot: Field,
}

// Separators separating storage slot of different values within the same state variable
global VALUE_CHANGE_SEPARATOR: u32 = 0;
global DELAY_CHANGE_SEPARATOR: u32 = 1;
global HASH_SEPARATOR: u32 = 2;

// This will make the Aztec macros require that T implements the Serialize<N> trait, and allocate N storage slots to
// this state variable. This is incorrect, since what we actually store is:
// - a ScheduledValueChange<T>, which requires 1 + 2 * M storage slots, where M is the serialization length of T
// - a ScheduledDelayChange, which requires another storage slot
//
// TODO https://github.com/AztecProtocol/aztec-packages/issues/5736: change the storage allocation scheme so that we
// can actually use it here
// This will make the Aztec macros require that T implements the Packable<N> and Eq traits, and allocate `N` storage
// slots to this state variable.
impl<T, let INITIAL_DELAY: u32, Context, let N: u32> Storage<N> for SharedMutable<T, INITIAL_DELAY, Context>
where
T: Packable<N>,
WithHash<SharedMutableValues<T, INITIAL_DELAY>, _>: Packable<N>,
{
fn get_storage_slot(self) -> Field {
self.storage_slot
Expand All @@ -56,39 +45,27 @@ where
// future, so that they can guarantee the value will not have possibly changed by then (because of the delay).
// The delay for changing a value is initially equal to INITIAL_DELAY, but can be changed by calling
// `schedule_delay_change`.
impl<T, let INITIAL_DELAY: u32, Context> SharedMutable<T, INITIAL_DELAY, Context>
impl<T, let INITIAL_DELAY: u32, let N: u32, Context> SharedMutable<T, INITIAL_DELAY, Context>
where
T: ToField + FromField + Eq,
T: Packable<N> + Eq,
{
pub fn new(context: Context, storage_slot: Field) -> Self {
assert(storage_slot != 0, "Storage slot 0 not allowed. Storage slots must start from 1.");
Self { context, storage_slot }
}

// Since we can't rely on the native storage allocation scheme, we hash the storage slot to get a unique location in
// which we can safely store as much data as we need.
// See https://github.com/AztecProtocol/aztec-packages/issues/5492 and
// https://github.com/AztecProtocol/aztec-packages/issues/5736
// We store three things in public storage:
// - a ScheduledValueChange
// - a ScheduledDelaChange
// - the hash of both of these (via `hash_scheduled_data`)
fn get_value_change_storage_slot(self) -> Field {
poseidon2_hash_with_separator([self.storage_slot], VALUE_CHANGE_SEPARATOR)
SharedMutableValues::<T, INITIAL_DELAY>::get_value_change_storage_slot(self.storage_slot)
}

fn get_delay_change_storage_slot(self) -> Field {
poseidon2_hash_with_separator([self.storage_slot], DELAY_CHANGE_SEPARATOR)
}

fn get_hash_storage_slot(self) -> Field {
poseidon2_hash_with_separator([self.storage_slot], HASH_SEPARATOR)
SharedMutableValues::<T, INITIAL_DELAY>::get_delay_change_storage_slot(self.storage_slot)
}
}

impl<T, let INITIAL_DELAY: u32> SharedMutable<T, INITIAL_DELAY, &mut PublicContext>
impl<T, let INITIAL_DELAY: u32, let N: u32> SharedMutable<T, INITIAL_DELAY, &mut PublicContext>
where
T: ToField + FromField + Eq,
T: Packable<N> + Eq,
{

pub fn schedule_value_change(self, new_value: T) {
Expand Down Expand Up @@ -147,24 +124,22 @@ where
value_change: ScheduledValueChange<T>,
delay_change: ScheduledDelayChange<INITIAL_DELAY>,
) {
// Whenever we write to public storage, we write both the value change and delay change as well as the hash of
// them both. This guarantees that the hash is always kept up to date.
// While this makes for more costly writes, it also makes private proofs much simpler because they only need to
// produce a historical proof for the hash, which results in a single inclusion proof (as opposed to 4 in the
// best case scenario in which T is a single field). Private shared mutable reads are assumed to be much more
// frequent than public writes, so this tradeoff makes sense.
self.context.storage_write(self.get_value_change_storage_slot(), value_change);
self.context.storage_write(self.get_delay_change_storage_slot(), delay_change);
self.context.storage_write(
self.get_hash_storage_slot(),
SharedMutable::hash_scheduled_data(value_change, delay_change),
);
// Whenever we write to public storage, we write both the value change and delay change to storage at once.
// We do so by wrapping them in a single struct (`SharedMutableValues`). Then we wrap the resulting struct in
// `WithHash`.
// Wrapping in `WithHash` makes for more costly writes but it also makes private proofs much simpler because
// they only need to produce a historical proof for the hash, which results in a single inclusion proof (as
// opposed to 4 in the best case scenario in which T is a single field). Private shared mutable reads are
// assumed to be much more frequent than public writes, so this tradeoff makes sense.
let values = WithHash::new(SharedMutableValues::new(value_change, delay_change));

self.context.storage_write(self.storage_slot, values);
}
}

impl<T, let INITIAL_DELAY: u32> SharedMutable<T, INITIAL_DELAY, &mut PrivateContext>
impl<T, let INITIAL_DELAY: u32, let N: u32> SharedMutable<T, INITIAL_DELAY, &mut PrivateContext>
where
T: ToField + FromField + Eq,
T: Packable<N> + Eq,
{
pub fn get_current_value(self) -> T {
// When reading the current value in private we construct a historical state proof for the public value.
Expand Down Expand Up @@ -200,83 +175,24 @@ where

let historical_block_number = header.global_variables.block_number as u32;

// We could simply produce historical inclusion proofs for both the ScheduledValueChange and
// ScheduledDelayChange, but that'd require one full sibling path per storage slot (since due to kernel siloing
// the storage is not contiguous), and in the best case in which T is a single field that'd be 4 slots.
// Instead, we get an oracle to provide us the correct values for both the value and delay changes, and instead
// prove inclusion of their hash, which is both a much smaller proof (a single slot), and also independent of
// the size of T.
/// Safety: The hints are checked to be a preimage of a hash obtained from constrained context.
let (value_change_hint, delay_change_hint) = unsafe {
get_public_storage_hints(address, self.storage_slot, historical_block_number)
};

// Ideally the following would be simply public_storage::read_historical, but we can't implement that yet.
let hash = header.public_storage_historical_read(self.get_hash_storage_slot(), address);

if hash != 0 {
assert_eq(
hash,
SharedMutable::hash_scheduled_data(value_change_hint, delay_change_hint),
"Hint values do not match hash",
);
} else {
// The hash slot can only hold a zero if it is uninitialized, meaning no value or delay change was ever
// scheduled. Therefore, the hints must then correspond to uninitialized scheduled changes.
assert_eq(
value_change_hint,
ScheduledValueChange::unpack(zeroed()),
"Non-zero value change for zero hash",
);
assert_eq(
delay_change_hint,
ScheduledDelayChange::unpack(zeroed()),
"Non-zero delay change for zero hash",
);
};

(value_change_hint, delay_change_hint, historical_block_number)
}
let values: SharedMutableValues<T, INITIAL_DELAY> =
WithHash::historical_public_storage_read(header, address, self.storage_slot);

fn hash_scheduled_data(
value_change: ScheduledValueChange<T>,
delay_change: ScheduledDelayChange<INITIAL_DELAY>,
) -> Field {
let concatenated: [Field; 4] = array_concat(value_change.pack(), delay_change.pack());
poseidon2_hash(concatenated)
(values.svc, values.sdc, historical_block_number)
}
}

impl<T, let INITIAL_DELAY: u32> SharedMutable<T, INITIAL_DELAY, UnconstrainedContext>
impl<T, let INITIAL_DELAY: u32, let N: u32> SharedMutable<T, INITIAL_DELAY, UnconstrainedContext>
where
T: ToField + FromField + Eq,
T: Packable<N> + Eq,
{
pub unconstrained fn get_current_value(self) -> T {
let block_number = self.context.block_number() as u32;
self.read_value_change().get_current_at(block_number)
}
let value_change: ScheduledValueChange<T> = WithHash::unconstrained_public_storage_read(
self.context,
self.get_value_change_storage_slot(),
);

unconstrained fn read_value_change(self) -> ScheduledValueChange<T> {
self.context.storage_read(self.get_value_change_storage_slot())
let block_number = self.context.block_number() as u32;
value_change.get_current_at(block_number)
}
}

unconstrained fn get_public_storage_hints<T, let INITIAL_DELAY: u32>(
address: AztecAddress,
storage_slot: Field,
block_number: u32,
) -> (ScheduledValueChange<T>, ScheduledDelayChange<INITIAL_DELAY>)
where
T: ToField + FromField + Eq,
{
// This function cannot be part of the &mut PrivateContext impl because that'd mean that by passing `self` we'd also
// be passing a mutable reference to an unconstrained function, which is not allowed. We therefore create a dummy
// state variable here so that we can access the methods to compute storage slots. This will all be removed in the
// future once we do proper storage slot allocation (#5492).
let dummy: SharedMutable<T, INITIAL_DELAY, ()> = SharedMutable::new((), storage_slot);

(
storage_read(address, dummy.get_value_change_storage_slot(), block_number),
storage_read(address, dummy.get_delay_change_storage_slot(), block_number),
)
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ use std::cmp::min;

mod test;

pub(crate) global SCHEDULED_DELAY_CHANGE_PCKD_LEN: u32 = 1;

// This data structure is used by SharedMutable to store the minimum delay with which a ScheduledValueChange object can
// schedule a change.
// This delay is initially equal to INITIAL_DELAY, and can be safely mutated to any other value over time. This mutation
Expand Down Expand Up @@ -125,8 +127,8 @@ impl<let INITIAL_DELAY: u32> ScheduledDelayChange<INITIAL_DELAY> {
}
}

impl<let INITIAL_DELAY: u32> Packable<1> for ScheduledDelayChange<INITIAL_DELAY> {
fn pack(self) -> [Field; 1] {
impl<let INITIAL_DELAY: u32> Packable<SCHEDULED_DELAY_CHANGE_PCKD_LEN> for ScheduledDelayChange<INITIAL_DELAY> {
fn pack(self) -> [Field; SCHEDULED_DELAY_CHANGE_PCKD_LEN] {
// We pack all three u32 values into a single U128, which is made up of two u64 limbs.
// Low limb: [ pre_inner: u32 | post_inner: u32 ]
// High limb: [ empty | pre_is_some: u8 | post_is_some: u8 | block_of_change: u32 ]
Expand All @@ -142,7 +144,7 @@ impl<let INITIAL_DELAY: u32> Packable<1> for ScheduledDelayChange<INITIAL_DELAY>
[packed.to_integer()]
}

fn unpack(input: [Field; 1]) -> Self {
fn unpack(input: [Field; SCHEDULED_DELAY_CHANGE_PCKD_LEN]) -> Self {
let packed = U128::from_integer(input[0]);

// We use division and modulo to clear the bits that correspond to other values when unpacking.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use dep::protocol_types::traits::{FromField, Packable, ToField};
use crate::utils::array;
use dep::protocol_types::traits::Packable;
use std::cmp::min;

mod test;
Expand Down Expand Up @@ -133,19 +134,28 @@ impl<T> ScheduledValueChange<T> {
}
}

impl<T> Packable<3> for ScheduledValueChange<T>
impl<T, let N: u32> Packable<2 * N + 1> for ScheduledValueChange<T>
where
T: ToField + FromField,
T: Packable<N>,
{
fn pack(self) -> [Field; 3] {
[self.pre.to_field(), self.post.to_field(), self.block_of_change.to_field()]
fn pack(self) -> [Field; 2 * N + 1] {
let mut packed = [0; 2 * N + 1];
let pre_packed = self.pre.pack();
let post_packed = self.post.pack();

for i in 0..N {
packed[i] = pre_packed[i];
packed[N + i] = post_packed[i];
}
packed[2 * N] = self.block_of_change.to_field();
packed
}

fn unpack(input: [Field; 3]) -> Self {
fn unpack(input: [Field; 2 * N + 1]) -> Self {
Self {
pre: FromField::from_field(input[0]),
post: FromField::from_field(input[1]),
block_of_change: FromField::from_field(input[2]),
pre: T::unpack(array::subarray(input, 0)),
post: T::unpack(array::subarray(input, N)),
block_of_change: input[2 * N] as u32,
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
use crate::state_vars::shared_mutable::{
scheduled_delay_change::{SCHEDULED_DELAY_CHANGE_PCKD_LEN, ScheduledDelayChange},
scheduled_value_change::ScheduledValueChange,
};
use crate::utils::array;
use dep::protocol_types::{traits::Packable, utils::arrays::array_concat};
use std::meta::derive;

/// SharedMutableValues is just a wrapper around ScheduledValueChange and ScheduledDelayChange that then allows us
/// to wrap both of these values in WithHash. WithHash allows for efficient read of values in private.
///
/// Note that the WithHash optimization does not work in public (due to there being no unconstrained). But we also want
/// to be able to read the values efficiently in public and we want to be able to read each value separately. For that
/// reason we expose `get_delay_change_storage_slot` and `get_value_change_storage_slot` which point to the correct
/// location in the storage. This is "hacky" as we pack and store the values together but there is no way around it.
#[derive(Eq)]
pub(crate) struct SharedMutableValues<T, let INITIAL_DELAY: u32> {
svc: ScheduledValueChange<T>,
sdc: ScheduledDelayChange<INITIAL_DELAY>,
}

impl<T, let INITIAL_DELAY: u32> SharedMutableValues<T, INITIAL_DELAY> {
pub(crate) fn new(
svc: ScheduledValueChange<T>,
sdc: ScheduledDelayChange<INITIAL_DELAY>,
) -> Self {
SharedMutableValues { svc, sdc }
}

pub fn get_delay_change_storage_slot(shared_mutable_storage_slot: Field) -> Field {
shared_mutable_storage_slot
}

pub fn get_value_change_storage_slot(shared_mutable_storage_slot: Field) -> Field {
shared_mutable_storage_slot + (SCHEDULED_DELAY_CHANGE_PCKD_LEN as Field)
}
}

// We pack to `2 * N + 1 + SCHEDULED_DELAY_CHANGE_PCKD_LEN` fields because ScheduledValueChange contains T twice
// + 1 field for block_of_change + SCHEDULED_DELAY_CHANGE_PCKD_LEN fields for ScheduledDelayChange
impl<T, let INITIAL_DELAY: u32, let N: u32> Packable<2 * N + 1 + SCHEDULED_DELAY_CHANGE_PCKD_LEN> for SharedMutableValues<T, INITIAL_DELAY>
where
T: Packable<N>,
{
fn pack(self) -> [Field; 2 * N + 1 + SCHEDULED_DELAY_CHANGE_PCKD_LEN] {
array_concat(self.sdc.pack(), self.svc.pack())
}

fn unpack(fields: [Field; 2 * N + 1 + SCHEDULED_DELAY_CHANGE_PCKD_LEN]) -> Self {
SharedMutableValues {
sdc: Packable::unpack(array::subarray(fields, 0)),
svc: Packable::unpack(array::subarray(fields, SCHEDULED_DELAY_CHANGE_PCKD_LEN)),
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ unconstrained fn test_get_current_value_in_private_bad_zero_hash_value_hints() {
let _ = state_var.get_current_value();
}

#[test(should_fail_with = "Non-zero delay change for zero hash")]
#[test(should_fail_with = "Non-zero value change for zero hash")]
unconstrained fn test_get_current_value_in_private_bad_zero_hash_delay_hints() {
let mut env = setup();

Expand Down
Loading

0 comments on commit 5e7fc3f

Please sign in to comment.