Skip to content

Commit

Permalink
Test utils: allow queueing >2 persistence update results
Browse files Browse the repository at this point in the history
  • Loading branch information
valentinewallace committed Dec 22, 2022
1 parent 41d1730 commit c116cd4
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 19 deletions.
2 changes: 1 addition & 1 deletion lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1959,7 +1959,7 @@ fn test_path_paused_mpp() {
// Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
// (for the path 0 -> 2 -> 3) fails.
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
chanmon_cfgs[0].persister.set_next_update_ret(Some(ChannelMonitorUpdateStatus::InProgress));
chanmon_cfgs[0].persister.set_next_update_ret(ChannelMonitorUpdateStatus::InProgress);

// Now check that we get the right return value, indicating that the first path succeeded but
// the second got a MonitorUpdateInProgress err. This implies
Expand Down
39 changes: 21 additions & 18 deletions lightning/src/util/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,10 +224,9 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
}

pub struct TestPersister {
pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
/// If this is set to Some(), after the next return, we'll always return this until update_ret
/// is changed:
pub next_update_ret: Mutex<Option<chain::ChannelMonitorUpdateStatus>>,
/// The queue of update statuses we'll return. If only one is queued, we'll always return it. If
/// none are queued, ::Completed will always be returned.
pub update_rets: Mutex<VecDeque<chain::ChannelMonitorUpdateStatus>>,
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
/// MonitorUpdateId here.
pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
Expand All @@ -238,35 +237,39 @@ pub struct TestPersister {
impl TestPersister {
pub fn new() -> Self {
Self {
update_ret: Mutex::new(chain::ChannelMonitorUpdateStatus::Completed),
next_update_ret: Mutex::new(None),
update_rets: Mutex::new(VecDeque::new()),
chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
offchain_monitor_updates: Mutex::new(HashMap::new()),
}
}

/// Clear the queue of update statuses and set the one we'll always return.
pub fn set_update_ret(&self, ret: chain::ChannelMonitorUpdateStatus) {
*self.update_ret.lock().unwrap() = ret;
let mut update_rets = self.update_rets.lock().unwrap();
update_rets.clear();
update_rets.push_front(ret);
}

pub fn set_next_update_ret(&self, next_ret: Option<chain::ChannelMonitorUpdateStatus>) {
*self.next_update_ret.lock().unwrap() = next_ret;
/// Queue an update status to return.
pub fn set_next_update_ret(&self, next_ret: chain::ChannelMonitorUpdateStatus) {
self.update_rets.lock().unwrap().push_back(next_ret);
}
}
impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let ret = self.update_ret.lock().unwrap().clone();
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
*self.update_ret.lock().unwrap() = next_ret;
}
ret
let mut update_rets = self.update_rets.lock().unwrap();
if update_rets.len() > 1 { return update_rets.pop_front().unwrap() }
else if update_rets.len() == 1 { return *update_rets.front().clone().unwrap() }
chain::ChannelMonitorUpdateStatus::Completed
}

fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
let ret = self.update_ret.lock().unwrap().clone();
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
*self.update_ret.lock().unwrap() = next_ret;
}
let ret = {
let mut update_rets = self.update_rets.lock().unwrap();
if update_rets.len() > 1 { update_rets.pop_front().unwrap() }
else if update_rets.len() == 1 { *update_rets.front().clone().unwrap() }
else { chain::ChannelMonitorUpdateStatus::Completed }
};
if update.is_none() {
self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
} else {
Expand Down

0 comments on commit c116cd4

Please sign in to comment.