Skip to content

Commit

Permalink
Decay historical liquidity tracking when no new data is added
Browse files Browse the repository at this point in the history
To avoid scoring based on incredibly old historical liquidity data,
we add a new half-life here which is used to (very slowly) decay
historical liquidity tracking buckets.
  • Loading branch information
TheBlueMatt committed Aug 22, 2022
1 parent 5cc2182 commit 116e7b9
Showing 1 changed file with 40 additions and 6 deletions.
46 changes: 40 additions & 6 deletions lightning/src/routing/scoring.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ use util::logger::Logger;
use util::time::Time;

use prelude::*;
use core::fmt;
use core::{cmp, fmt};
use core::cell::{RefCell, RefMut};
use core::convert::TryInto;
use core::ops::{Deref, DerefMut};
Expand Down Expand Up @@ -429,6 +429,16 @@ pub struct ProbabilisticScoringParameters {
/// [`liquidity_penalty_multiplier_msat`]: Self::liquidity_penalty_multiplier_msat
pub historical_liquidity_penalty_amount_multiplier_msat: u64,

/// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
/// tracking can simply live on with increasingly stale data. Instead, when a channel has not
/// seen a liquidity estimate update for this amount of time, the historical datapoints are
/// decayed by half.
///
/// Note that 16 or more half lives guarantees that all historical data will be wiped.
///
/// Default value: 14 days
pub no_updates_historical_half_life: Duration,

/// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
/// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
/// considered during path finding.
Expand Down Expand Up @@ -502,6 +512,11 @@ impl HistoricalBucketRangeTracker {
}
}
}
fn decay_data(&mut self, half_lives: u32) {
for e in self.buckets.iter_mut() {
*e = e.checked_shr(half_lives).unwrap_or(0);
}
}
}

impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
Expand Down Expand Up @@ -638,6 +653,7 @@ impl ProbabilisticScoringParameters {
liquidity_penalty_amount_multiplier_msat: 0,
historical_liquidity_penalty_multiplier_msat: 0,
historical_liquidity_penalty_amount_multiplier_msat: 0,
no_updates_historical_half_life: Duration::from_secs(60 * 60 * 24 * 14),
manual_node_penalties: HashMap::new(),
anti_probing_penalty_msat: 0,
considered_impossible_penalty_msat: 0,
Expand All @@ -663,6 +679,7 @@ impl Default for ProbabilisticScoringParameters {
liquidity_penalty_amount_multiplier_msat: 192,
historical_liquidity_penalty_multiplier_msat: 10_000,
historical_liquidity_penalty_amount_multiplier_msat: 64,
no_updates_historical_half_life: Duration::from_secs(60 * 60 * 24 * 14),
manual_node_penalties: HashMap::new(),
anti_probing_penalty_msat: 250,
considered_impossible_penalty_msat: 1_0000_0000_000,
Expand Down Expand Up @@ -802,14 +819,25 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
// sending less than 1/16th of a channel's capacity, or 1/8th if we used the top of the
// bucket.
let mut total_valid_points_tracked = 0;
let decays = self.now.duration_since(*self.last_updated).as_secs()
.checked_div(params.no_updates_historical_half_life.as_secs())
.map(|decays| cmp::min(decays, u32::max_value() as u64) as u32)
.unwrap_or(u32::max_value());
// Rather than actually decaying the individual buckets, which would lose precision, we
// simply track whether any buckets have data which wouldn't be decayed to zero, and if
// there are none, treat it as if we had no data.
let mut is_fully_decayed = true;
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
if min_bucket.checked_shr(decays).unwrap_or(0) > 0 { is_fully_decayed = false; }
for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
if max_bucket.checked_shr(decays).unwrap_or(0) > 0 { is_fully_decayed = false; }
}
}
if total_valid_points_tracked == 0 {
// If we don't have any valid points, redo the non-historical calculation with no
// liquidity bounds tracked and the historical penalty multipliers.
if total_valid_points_tracked.checked_shr(decays).unwrap_or(0) < 32*32 || is_fully_decayed {
// If we don't have any valid points (or, once decayed, we have less than a full
// point), redo the non-historical calculation with no liquidity bounds tracked and
// the historical penalty multipliers.
let max_capacity = self.capacity_msat.saturating_sub(amount_msat).saturating_add(1);
let negative_log10_times_2048 =
approx::negative_log10_times_2048(max_capacity, self.capacity_msat.saturating_add(1));
Expand Down Expand Up @@ -917,6 +945,12 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
}

fn update_history_buckets(&mut self) {
let half_lives = self.now.duration_since(*self.last_updated).as_secs()
.checked_div(self.params.no_updates_historical_half_life.as_secs())
.map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
self.min_liquidity_offset_history.decay_data(half_lives);
self.max_liquidity_offset_history.decay_data(half_lives);

debug_assert!(*self.min_liquidity_offset_msat <= self.capacity_msat);
self.min_liquidity_offset_history.track_datapoint(
(self.min_liquidity_offset_msat.saturating_sub(1) * 8 / self.capacity_msat)
Expand All @@ -935,8 +969,8 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
} else {
self.decayed_offset_msat(*self.max_liquidity_offset_msat)
};
*self.last_updated = self.now;
self.update_history_buckets();
*self.last_updated = self.now;
}

/// Adjusts the upper bound of the channel liquidity balance in this direction.
Expand All @@ -947,8 +981,8 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
} else {
self.decayed_offset_msat(*self.min_liquidity_offset_msat)
};
*self.last_updated = self.now;
self.update_history_buckets();
*self.last_updated = self.now;
}
}

Expand Down

0 comments on commit 116e7b9

Please sign in to comment.