Skip to content

Commit

Permalink
Merge branch 'main' into deploy
Browse files Browse the repository at this point in the history
  • Loading branch information
microwavedcola1 committed Apr 18, 2024
2 parents 32d7ba8 + a87be8e commit fb6311e
Show file tree
Hide file tree
Showing 77 changed files with 3,600 additions and 930 deletions.
30 changes: 27 additions & 3 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,33 @@ Update this for each program release and mainnet deployment.

## not on mainnet

### v0.23.0, 2024-3-
### v0.24.0, 2024-4-

- Allow skipping banks and invalid oracles when computing health (#891)

This is only possible when we know for sure that the operation would not put the account into negative health zone.

- Add support for Raydium CLMM as oracle fallback (#856)

- Add a `TokenBalanceLog` when charging collateral fees (#894)

- Withdraw instruction: remove overflow error and return appropriate error message instead (#910)

- Banks: add more safety checks (#895)

- Add a health check instruction (#913)

Assert in a transaction that operation run on a mango account does not reduce it's health below a specified amount.

- Add a sequence check instruction (#909)

Assert that a transaction was emitted and run with a correct view of the current mango state.

## mainnet

### v0.23.0, 2024-3-8

Deployment: Mar 8, 2024 at 12:10:52 Central European Standard Time, https://explorer.solana.com/tx/6MXGookZoYGMYb7tWrrmgZzVA13HJimHNqwHRVFeqL9YpQD7YasH1pQn4MSQTK1o13ixKTGFxwZsviUzmHzzP9m

- Allow disabling asset liquidations for tokens (#867)

Expand All @@ -26,8 +52,6 @@ Update this for each program release and mainnet deployment.
- Flash loan: Add a "swap without flash loan fees" option (#882)
- Cleanup, tests and minor (#878, #875, #854, #838, #895)

## mainnet

### v0.22.0, 2024-3-3

Deployment: Mar 3, 2024 at 23:52:08 Central European Standard Time, https://explorer.solana.com/tx/3MpEMU12Pv7RpSnwfShoM9sbyr41KAEeJFCVx9ypkq8nuK8Q5vm7CRLkdhH3u91yQ4k44a32armZHaoYguX6NqsY
Expand Down
20 changes: 19 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 6 additions & 7 deletions RELEASING.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,18 @@
- 4MangoMjqJ2firMokCjjGgoK8d4MXcrgL7XJaL3w6fVg is the address of the Mango v4 Program
- FP4PxqHTVzeG2c6eZd7974F9WvKUSdBeduUK3rjYyvBw is the address of the Mango v4 Program Governance

- Check out the latest version of the `dev` branch
- Assuming there's a release branch (like release/program-v0.22.0)
with a completed audit and an updated changelog.

- Update the changelog

git log program-v0.11.0..HEAD -- programs/mango-v4/
- Check out the release branch

- Make sure the version is bumped in programs/mango-v4/Cargo.toml

- Update the idl ./update-local-idl.sh
- Update the idl ./update-local-idl.sh and verify that there's no difference

- Run the tests to double check
- Run the tests to double check there are no failures

- Tag and push
- Tag (`git tag program-v0.xy.z HEAD`) and push it (`git push <tag>`)

- Do a verifiable build

Expand Down
Binary file added audits/Audit_OtterSec_Mango_v0.22.0.pdf
Binary file not shown.
Binary file added audits/Audit_OtterSec_Mango_v0.23.0.pdf
Binary file not shown.
Binary file added audits/Audit_OtterSec_Mango_v0.24.0.pdf
Binary file not shown.
7 changes: 4 additions & 3 deletions bin/cli/src/save_snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,13 @@ pub async fn save_snapshot(

let group_context = MangoGroupContext::new_from_rpc(client.rpc_async(), mango_group).await?;

let oracles_and_vaults = group_context
let extra_accounts = group_context
.tokens
.values()
.map(|value| value.oracle)
.chain(group_context.perp_markets.values().map(|p| p.oracle))
.chain(group_context.tokens.values().flat_map(|value| value.vaults))
.chain(group_context.address_lookup_tables.iter().copied())
.unique()
.filter(|pk| *pk != Pubkey::default())
.collect::<Vec<Pubkey>>();
Expand All @@ -55,7 +56,7 @@ pub async fn save_snapshot(
serum_programs,
open_orders_authority: mango_group,
},
oracles_and_vaults.clone(),
extra_accounts.clone(),
account_update_sender.clone(),
);

Expand All @@ -75,7 +76,7 @@ pub async fn save_snapshot(
snapshot_interval: Duration::from_secs(6000),
min_slot: first_websocket_slot + 10,
},
oracles_and_vaults,
extra_accounts,
account_update_sender,
);
tokio::spawn(async move {
Expand Down
1 change: 1 addition & 0 deletions bin/liquidator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,4 @@ tokio-stream = { version = "0.1.9"}
tokio-tungstenite = "0.16.1"
tracing = "0.1"
regex = "1.9.5"
hdrhistogram = "7.5.4"
84 changes: 82 additions & 2 deletions bin/liquidator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,8 @@ async fn main() -> anyhow::Result<()> {

let mut metric_account_update_queue_len =
metrics.register_u64("account_update_queue_length".into());
let mut metric_chain_update_latency =
metrics.register_latency("in-memory chain update".into());
let mut metric_mango_accounts = metrics.register_u64("mango_accounts".into());

let mut mint_infos = HashMap::<TokenIndex, Pubkey>::new();
Expand All @@ -299,13 +301,23 @@ async fn main() -> anyhow::Result<()> {
.recv()
.await
.expect("channel not closed");
let current_time = Instant::now();
metric_account_update_queue_len.set(account_update_receiver.len() as u64);

message.update_chain_data(&mut chain_data.write().unwrap());

match message {
Message::Account(account_write) => {
let mut state = shared_state.write().unwrap();
let reception_time = account_write.reception_time;
state.oldest_chain_event_reception_time = Some(
state
.oldest_chain_event_reception_time
.unwrap_or(reception_time),
);

metric_chain_update_latency.push(current_time - reception_time);

if is_mango_account(&account_write.account, &mango_group).is_some() {
// e.g. to render debug logs RUST_LOG="liquidator=debug"
debug!(
Expand All @@ -320,8 +332,21 @@ async fn main() -> anyhow::Result<()> {
}
Message::Snapshot(snapshot) => {
let mut state = shared_state.write().unwrap();
let mut reception_time = None;

// Track all mango account pubkeys
for update in snapshot.iter() {
reception_time = Some(
update
.reception_time
.min(reception_time.unwrap_or(update.reception_time)),
);
state.oldest_chain_event_reception_time = Some(
state
.oldest_chain_event_reception_time
.unwrap_or(update.reception_time),
);

if is_mango_account(&update.account, &mango_group).is_some() {
state.mango_accounts.insert(update.pubkey);
}
Expand All @@ -335,6 +360,11 @@ async fn main() -> anyhow::Result<()> {
oracles.insert(perp_market.oracle);
}
}

if reception_time.is_some() {
metric_chain_update_latency
.push(current_time - reception_time.unwrap());
}
metric_mango_accounts.set(state.mango_accounts.len() as u64);

state.one_snapshot_done = true;
Expand Down Expand Up @@ -374,35 +404,82 @@ async fn main() -> anyhow::Result<()> {
let liquidation_job = tokio::spawn({
let mut interval =
mango_v4_client::delay_interval(Duration::from_millis(cli.check_interval_ms));
let mut metric_liquidation_check = metrics.register_latency("liquidation_check".into());
let mut metric_liquidation_start_end =
metrics.register_latency("liquidation_start_end".into());

let mut liquidation_start_time = None;
let mut tcs_start_time = None;

let shared_state = shared_state.clone();
async move {
loop {
interval.tick().await;

let account_addresses = {
let state = shared_state.write().unwrap();
let mut state = shared_state.write().unwrap();
if !state.one_snapshot_done {
// discard first latency info as it will skew data too much
state.oldest_chain_event_reception_time = None;
continue;
}
if state.oldest_chain_event_reception_time.is_none()
&& liquidation_start_time.is_none()
{
// no new update, skip computing
continue;
}

state.mango_accounts.iter().cloned().collect_vec()
};

liquidation.errors.update();
liquidation.oracle_errors.update();

if liquidation_start_time.is_none() {
liquidation_start_time = Some(Instant::now());
}

let liquidated = liquidation
.maybe_liquidate_one(account_addresses.iter())
.await;

if !liquidated {
// This will be incorrect if we liquidate the last checked account
// (We will wait for next full run, skewing latency metrics)
// Probability is very low, might not need to be fixed

let mut state = shared_state.write().unwrap();
let reception_time = state.oldest_chain_event_reception_time.unwrap();
let current_time = Instant::now();

state.oldest_chain_event_reception_time = None;

metric_liquidation_check.push(current_time - reception_time);
metric_liquidation_start_end
.push(current_time - liquidation_start_time.unwrap());
liquidation_start_time = None;
}

let mut took_tcs = false;
if !liquidated && cli.take_tcs == BoolArg::True {
tcs_start_time = Some(tcs_start_time.unwrap_or(Instant::now()));

took_tcs = liquidation
.maybe_take_token_conditional_swap(account_addresses.iter())
.await
.unwrap_or_else(|err| {
error!("error during maybe_take_token_conditional_swap: {err}");
false
})
});

if !took_tcs {
let current_time = Instant::now();
let mut metric_tcs_start_end =
metrics.register_latency("tcs_start_end".into());
metric_tcs_start_end.push(current_time - tcs_start_time.unwrap());
tcs_start_time = None;
}
}

if liquidated || took_tcs {
Expand Down Expand Up @@ -483,6 +560,9 @@ struct SharedState {

/// Is the first snapshot done? Only start checking account health when it is.
one_snapshot_done: bool,

/// Oldest chain event not processed yet
oldest_chain_event_reception_time: Option<Instant>,
}

#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
Expand Down
38 changes: 38 additions & 0 deletions bin/liquidator/src/metrics.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use hdrhistogram::Histogram;
use std::time::Duration;
use {
std::collections::HashMap,
std::sync::{atomic, Arc, Mutex, RwLock},
Expand All @@ -10,6 +12,7 @@ enum Value {
U64(Arc<atomic::AtomicU64>),
I64(Arc<atomic::AtomicI64>),
String(Arc<Mutex<String>>),
Latency(Arc<Mutex<Histogram<u64>>>),
}

#[derive(Debug)]
Expand Down Expand Up @@ -49,6 +52,18 @@ impl MetricU64 {
}
}

#[derive(Clone)]
pub struct MetricLatency {
value: Arc<Mutex<Histogram<u64>>>,
}
impl MetricLatency {
pub fn push(&mut self, duration: std::time::Duration) {
let mut guard = self.value.lock().unwrap();
let ns: u64 = duration.as_nanos().try_into().unwrap();
guard.record(ns).expect("latency error");
}
}

#[derive(Clone)]
pub struct MetricI64 {
value: Arc<atomic::AtomicI64>,
Expand Down Expand Up @@ -110,6 +125,19 @@ impl Metrics {
}
}

pub fn register_latency(&self, name: String) -> MetricLatency {
let mut registry = self.registry.write().unwrap();
let value = registry.entry(name).or_insert_with(|| {
Value::Latency(Arc::new(Mutex::new(Histogram::<u64>::new(3).unwrap())))
});
MetricLatency {
value: match value {
Value::Latency(v) => v.clone(),
_ => panic!("bad metric type"),
},
}
}

pub fn register_string(&self, name: String) -> MetricString {
let mut registry = self.registry.write().unwrap();
let value = registry
Expand Down Expand Up @@ -187,6 +215,16 @@ pub fn start() -> Metrics {
);
}
}
Value::Latency(v) => {
let hist = v.lock().unwrap();

info!(
"metric: {}: 99'th percentile: {:?}, 99,9'th percentile: {:?}",
name,
Duration::from_nanos(hist.value_at_quantile(0.99)),
Duration::from_nanos(hist.value_at_quantile(0.999))
);
}
}
}
}
Expand Down
Loading

0 comments on commit fb6311e

Please sign in to comment.