Skip to content

Commit

Permalink
Revert "(XXX: Migration) Reduce bucket size somewhat"
Browse files Browse the repository at this point in the history
This reverts commit 31c12a6.
  • Loading branch information
TheBlueMatt committed Dec 18, 2024
1 parent 935ff14 commit 1945337
Showing 1 changed file with 15 additions and 17 deletions.
32 changes: 15 additions & 17 deletions lightning/src/routing/scoring.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1695,40 +1695,38 @@ mod bucketed_history {
buckets: [u16; 32],
}

/// Buckets are stored in fixed point numbers with a 4 bit fractional part. Thus, the value
/// "one" is 16, or this constant.
pub const BUCKET_FIXED_POINT_ONE: u16 = 16;
/// Buckets are stored in fixed point numbers with a 5 bit fractional part. Thus, the value
/// "one" is 32, or this constant.
pub const BUCKET_FIXED_POINT_ONE: u16 = 32;

impl HistoricalBucketRangeTracker {
pub(super) fn new() -> Self { Self { buckets: [0; 32] } }
fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
// We have 32 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
// we spend in each bucket as a 13-bit fixed-point number with a 4 bit fractional part.
// we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
//
// Each time we update our liquidity estimate, we add 16 (1.0 in our fixed-point system) to
// Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
// the buckets for the current min and max liquidity offset positions.
//
// We then decay each bucket by multiplying by 511/512 (avoiding dividing by a
// non-power-of-two). This ensures we can't actually overflow the u13 - when we get to
// 8,176 adding 16 and decaying by 511/512 leaves us back at 8,176.
// We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
// non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
// 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
//
// In total, this allows us to track data for the last 1,000 or so payments across a given
// channel per bucket.
// In total, this allows us to track data for the last 8,000 or so payments across a given
// channel.
//
// These constants are a balance - we try to fit in 2 bytes per bucket to reduce
// overhead and must fit in 13 bits to allow us to square bucket weights without
// overflowing into a 128-bit integer to track total points. We also need to balance
// having more bits in the decimal part (to ensure decay isn't too non-linear) with
// having too few bits in the mantissa, causing us to not store very many datapoints.
// These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
// and need to balance having more bits in the decimal part (to ensure decay isn't too
// non-linear) with having too few bits in the mantissa, causing us to not store very many
// datapoints.
//
// The constants were picked experimentally, selecting a decay amount that restricts us
// from overflowing buckets without having to cap them manually.

let pos: u16 = amount_to_pos(liquidity_offset_msat, capacity_msat);
if pos < POSITION_TICKS {
for e in self.buckets.iter_mut() {
*e = ((*e as u32) * 511 / 512) as u16;
debug_assert!(*e < (1 << 11));
*e = ((*e as u32) * 2047 / 2048) as u16;
}
let bucket = pos_to_bucket(pos);
self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE);
Expand Down

0 comments on commit 1945337

Please sign in to comment.