Skip to content

Commit

Permalink
Modularize Normalisation a bit
Browse files Browse the repository at this point in the history
  • Loading branch information
JasonLG1979 committed May 12, 2023
1 parent 626a51a commit 1416b09
Showing 1 changed file with 166 additions and 133 deletions.
299 changes: 166 additions & 133 deletions playback/src/player.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,10 @@ struct PlayerInternal {
sink_status: SinkStatus,
sink_event_callback: Option<SinkEventCallback>,
volume_getter: Box<dyn VolumeGetter + Send>,
normaliser: Normaliser,
event_senders: Vec<mpsc::UnboundedSender<PlayerEvent>>,
converter: Converter,

normalisation_integrator: f64,
normalisation_peak: f64,

auto_normalise_as_album: bool,
}

Expand Down Expand Up @@ -215,6 +213,166 @@ pub fn coefficient_to_duration(coefficient: f64) -> Duration {
Duration::from_secs_f64(-1.0 / f64::ln(coefficient) / SAMPLES_PER_SECOND as f64)
}

struct DynamicNormalisation {
threshold_db: f64,
attack_cf: f64,
release_cf: f64,
knee_db: f64,
integrator: f64,
peak: f64,
}

impl DynamicNormalisation {
fn normalise(&mut self, samples: &[f64], volume: f64, factor: f64) -> Vec<f64> {
samples
.iter()
.map(|sample| {
let mut sample = sample * factor;

// Feedforward limiter in the log domain
// After: Giannoulis, D., Massberg, M., & Reiss, J.D. (2012). Digital Dynamic
// Range Compressor Design—A Tutorial and Analysis. Journal of The Audio
// Engineering Society, 60, 399-408.

// Some tracks have samples that are precisely 0.0. That's silence
// and we know we don't need to limit that, in which we can spare
// the CPU cycles.
//
// Also, calling `ratio_to_db(0.0)` returns `inf` and would get the
// peak detector stuck. Also catch the unlikely case where a sample
// is decoded as `NaN` or some other non-normal value.
let limiter_db = if sample.is_normal() {
// step 1-4: half-wave rectification and conversion into dB
// and gain computer with soft knee and subtractor
let bias_db = ratio_to_db(sample.abs()) - self.threshold_db;
let knee_boundary_db = bias_db * 2.0;

if knee_boundary_db < -self.knee_db {
0.0
} else if knee_boundary_db.abs() <= self.knee_db {
// The textbook equation:
// ratio_to_db(sample.abs()) - (ratio_to_db(sample.abs()) - (bias_db + knee_db / 2.0).powi(2) / (2.0 * knee_db))
// Simplifies to:
// ((2.0 * bias_db) + knee_db).powi(2) / (8.0 * knee_db)
// Which in our case further simplifies to:
// (knee_boundary_db + knee_db).powi(2) / (8.0 * knee_db)
// because knee_boundary_db is 2.0 * bias_db.
(knee_boundary_db + self.knee_db).powi(2) / (8.0 * self.knee_db)
} else {
// Textbook:
// ratio_to_db(sample.abs()) - threshold_db, which is already our bias_db.
bias_db
}
} else {
0.0
};

// Spare the CPU unless (1) the limiter is engaged, (2) we
// were in attack or (3) we were in release, and that attack/
// release wasn't finished yet.
if limiter_db > 0.0 || self.integrator > 0.0 || self.peak > 0.0 {
// step 5: smooth, decoupled peak detector
// Textbook:
// release_cf * integrator + (1.0 - release_cf) * limiter_db
// Simplifies to:
// release_cf * integrator - release_cf * limiter_db + limiter_db
self.integrator = limiter_db.max(
self.release_cf * self.integrator - self.release_cf * limiter_db
+ limiter_db,
);
// Textbook:
// attack_cf * peak + (1.0 - attack_cf) * integrator
// Simplifies to:
// attack_cf * peak - attack_cf * integrator + integrator
self.peak = self.attack_cf * self.peak - self.attack_cf * self.integrator
+ self.integrator;

// step 6: make-up gain applied later (volume attenuation)
// Applying the standard normalisation factor here won't work,
// because there are tracks with peaks as high as 6 dB above
// the default threshold, so that would clip.

// steps 7-8: conversion into level and multiplication into gain stage
sample *= db_to_ratio(-self.peak);
}

sample * volume
})
.collect()
}
}

enum Normaliser {
No,
Basic,
Dynamic(DynamicNormalisation),
}

impl Normaliser {
fn new(config: &PlayerConfig) -> Self {
if config.normalisation {
debug!("Normalisation Type: {:?}", config.normalisation_type);
debug!(
"Normalisation Pregain: {:.1} dB",
config.normalisation_pregain_db
);
debug!(
"Normalisation Threshold: {:.1} dBFS",
config.normalisation_threshold_dbfs
);
debug!("Normalisation Method: {:?}", config.normalisation_method);

if config.normalisation_method == NormalisationMethod::Dynamic {
// as_millis() has rounding errors (truncates)
debug!(
"Normalisation Attack: {:.0} ms",
coefficient_to_duration(config.normalisation_attack_cf).as_secs_f64() * 1000.
);
debug!(
"Normalisation Release: {:.0} ms",
coefficient_to_duration(config.normalisation_release_cf).as_secs_f64() * 1000.
);

Normaliser::Dynamic(DynamicNormalisation {
threshold_db: config.normalisation_threshold_dbfs,
attack_cf: config.normalisation_attack_cf,
release_cf: config.normalisation_release_cf,
knee_db: config.normalisation_knee_db,
integrator: 0.0,
peak: 0.0,
})
} else {
Normaliser::Basic
}
} else {
Normaliser::No
}
}

fn normalise(&mut self, samples: &[f64], volume: f64, factor: f64) -> Vec<f64> {
match self {
Normaliser::Dynamic(d) => d.normalise(samples, volume, factor),
Normaliser::No => {
if volume < 1.0 {
samples.iter().map(|sample| sample * volume).collect()
} else {
samples.to_vec()
}
}
Normaliser::Basic => {
if volume < 1.0 || factor < 1.0 {
samples
.iter()
.map(|sample| sample * factor * volume)
.collect()
} else {
samples.to_vec()
}
}
}
}
}

#[derive(Clone, Copy, Debug)]
pub struct NormalisationData {
track_gain_db: f64,
Expand Down Expand Up @@ -330,31 +488,7 @@ impl Player {
let (cmd_tx, cmd_rx) = mpsc::unbounded_channel();
let (event_sender, event_receiver) = mpsc::unbounded_channel();

if config.normalisation {
debug!("Normalisation Type: {:?}", config.normalisation_type);
debug!(
"Normalisation Pregain: {:.1} dB",
config.normalisation_pregain_db
);
debug!(
"Normalisation Threshold: {:.1} dBFS",
config.normalisation_threshold_dbfs
);
debug!("Normalisation Method: {:?}", config.normalisation_method);

if config.normalisation_method == NormalisationMethod::Dynamic {
// as_millis() has rounding errors (truncates)
debug!(
"Normalisation Attack: {:.0} ms",
coefficient_to_duration(config.normalisation_attack_cf).as_secs_f64() * 1000.
);
debug!(
"Normalisation Release: {:.0} ms",
coefficient_to_duration(config.normalisation_release_cf).as_secs_f64() * 1000.
);
debug!("Normalisation Knee: {} dB", config.normalisation_knee_db);
}
}
let normaliser = Normaliser::new(&config);

let handle = thread::spawn(move || {
debug!("new Player[{}]", session.session_id());
Expand All @@ -372,12 +506,10 @@ impl Player {
sink_status: SinkStatus::Closed,
sink_event_callback: None,
volume_getter,
normaliser,
event_senders: [event_sender].to_vec(),
converter,

normalisation_peak: 0.0,
normalisation_integrator: 0.0,

auto_normalise_as_album: false,
};

Expand Down Expand Up @@ -1325,108 +1457,9 @@ impl PlayerInternal {
// always be 1.0 (no change).
let volume = self.volume_getter.attenuation_factor();

// For the basic normalisation method, a normalisation factor of 1.0 indicates that
// there is nothing to normalise (all samples should pass unaltered). For the
// dynamic method, there may still be peaks that we want to shave off.
// No matter the case we apply volume attenuation last if there is any.
if !self.config.normalisation {
if volume < 1.0 {
for sample in data.iter_mut() {
*sample *= volume;
}
}
} else if self.config.normalisation_method == NormalisationMethod::Basic
&& (normalisation_factor < 1.0 || volume < 1.0)
{
for sample in data.iter_mut() {
*sample *= normalisation_factor * volume;
}
} else if self.config.normalisation_method == NormalisationMethod::Dynamic {
// zero-cost shorthands
let threshold_db = self.config.normalisation_threshold_dbfs;
let knee_db = self.config.normalisation_knee_db;
let attack_cf = self.config.normalisation_attack_cf;
let release_cf = self.config.normalisation_release_cf;

for sample in data.iter_mut() {
*sample *= normalisation_factor;

// Feedforward limiter in the log domain
// After: Giannoulis, D., Massberg, M., & Reiss, J.D. (2012). Digital Dynamic
// Range Compressor Design—A Tutorial and Analysis. Journal of The Audio
// Engineering Society, 60, 399-408.

// Some tracks have samples that are precisely 0.0. That's silence
// and we know we don't need to limit that, in which we can spare
// the CPU cycles.
//
// Also, calling `ratio_to_db(0.0)` returns `inf` and would get the
// peak detector stuck. Also catch the unlikely case where a sample
// is decoded as `NaN` or some other non-normal value.
let limiter_db = if sample.is_normal() {
// step 1-4: half-wave rectification and conversion into dB
// and gain computer with soft knee and subtractor
let bias_db = ratio_to_db(sample.abs()) - threshold_db;
let knee_boundary_db = bias_db * 2.0;

if knee_boundary_db < -knee_db {
0.0
} else if knee_boundary_db.abs() <= knee_db {
// The textbook equation:
// ratio_to_db(sample.abs()) - (ratio_to_db(sample.abs()) - (bias_db + knee_db / 2.0).powi(2) / (2.0 * knee_db))
// Simplifies to:
// ((2.0 * bias_db) + knee_db).powi(2) / (8.0 * knee_db)
// Which in our case further simplifies to:
// (knee_boundary_db + knee_db).powi(2) / (8.0 * knee_db)
// because knee_boundary_db is 2.0 * bias_db.
(knee_boundary_db + knee_db).powi(2) / (8.0 * knee_db)
} else {
// Textbook:
// ratio_to_db(sample.abs()) - threshold_db, which is already our bias_db.
bias_db
}
} else {
0.0
};

// Spare the CPU unless (1) the limiter is engaged, (2) we
// were in attack or (3) we were in release, and that attack/
// release wasn't finished yet.
if limiter_db > 0.0
|| self.normalisation_integrator > 0.0
|| self.normalisation_peak > 0.0
{
// step 5: smooth, decoupled peak detector
// Textbook:
// release_cf * self.normalisation_integrator + (1.0 - release_cf) * limiter_db
// Simplifies to:
// release_cf * self.normalisation_integrator - release_cf * limiter_db + limiter_db
self.normalisation_integrator = f64::max(
limiter_db,
release_cf * self.normalisation_integrator
- release_cf * limiter_db
+ limiter_db,
);
// Textbook:
// attack_cf * self.normalisation_peak + (1.0 - attack_cf) * self.normalisation_integrator
// Simplifies to:
// attack_cf * self.normalisation_peak - attack_cf * self.normalisation_integrator + self.normalisation_integrator
self.normalisation_peak = attack_cf * self.normalisation_peak
- attack_cf * self.normalisation_integrator
+ self.normalisation_integrator;

// step 6: make-up gain applied later (volume attenuation)
// Applying the standard normalisation factor here won't work,
// because there are tracks with peaks as high as 6 dB above
// the default threshold, so that would clip.

// steps 7-8: conversion into level and multiplication into gain stage
*sample *= db_to_ratio(-self.normalisation_peak);
}

*sample *= volume;
}
}
*data = self
.normaliser
.normalise(data, volume, normalisation_factor);
}

if let Err(e) = self.sink.write(packet, &mut self.converter) {
Expand Down

0 comments on commit 1416b09

Please sign in to comment.