Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Don't sleep after the last try and rename retry_count to try_count #28

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 33 additions & 18 deletions src/lock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ pub enum LockError {
#[derive(Debug, Clone)]
pub struct LockManager {
lock_manager_inner: Arc<LockManagerInner>,
retry_count: u32,
try_count: u32,
retry_delay: Duration,
}

Expand Down Expand Up @@ -141,7 +141,7 @@ impl LockManager {
servers: clients,
quorum,
}),
retry_count: DEFAULT_RETRY_COUNT,
try_count: DEFAULT_RETRY_COUNT,
retry_delay: DEFAULT_RETRY_DELAY,
}
}
Expand All @@ -153,15 +153,22 @@ impl LockManager {
Ok(buf.to_vec())
}

/// Set retry count and retry delay.
/// Set total number of tries and maximum retry delay.
///
/// Retries will be delayed by a random amount of time between `0` and `delay`.
///
/// Retry count defaults to `3`.
/// Retry delay defaults to `200`.
pub fn set_retry(&mut self, count: u32, delay: Duration) {
self.retry_count = count;
/// Try count defaults to `3`.
/// Retry delay defaults to `200 ms`.
pub fn set_retry_policy(&mut self, count: u32, delay: Duration) {
self.try_count = count;
self.retry_delay = delay;
}

#[deprecated(note = "Please use `set_retry_policy` instead")]
pub fn set_retry(&mut self, count: u32, delay: Duration) {
self.set_retry_policy(count, delay);
}

async fn lock_instance(
client: &redis::Client,
resource: &[u8],
Expand Down Expand Up @@ -235,7 +242,9 @@ impl LockManager {
T: Fn(&'a Client) -> Fut,
Fut: Future<Output = bool>,
{
for _ in 0..self.retry_count {
let mut current_try = 1;

loop {
let start_time = Instant::now();
let n = join_all(self.lock_manager_inner.servers.iter().map(&lock))
.await
Expand Down Expand Up @@ -271,13 +280,18 @@ impl LockManager {
.await;
}

let retry_delay: u64 = self
.retry_delay
.as_millis()
.try_into()
.map_err(|_| LockError::TtlTooLarge)?;
let n = thread_rng().gen_range(0..retry_delay);
tokio::time::sleep(Duration::from_millis(n)).await
if current_try < self.try_count {
current_try += 1;
let retry_delay: u64 = self
.retry_delay
.as_millis()
.try_into()
.map_err(|_| LockError::TtlTooLarge)?;
let n = thread_rng().gen_range(0..retry_delay);
tokio::time::sleep(Duration::from_millis(n)).await
} else {
break;
}
}

Err(LockError::Unavailable)
Expand Down Expand Up @@ -323,10 +337,11 @@ impl LockManager {
/// Acquire the lock for the given resource and the requested TTL.
///
/// If it succeeds, a `Lock` instance is returned,
/// including the value and the validity time
/// including the value and the validity time.
///
/// If it fails. `None` is returned.
/// If it fails, `LockError::Unavailable` is returned.
/// A user should retry after a short wait time.
/// See `LockManger::set_retry_policy` for automatic retries.
///
/// May return `LockError::TtlTooLarge` if `ttl` is too large.
pub async fn lock(&self, resource: &[u8], ttl: Duration) -> Result<Lock, LockError> {
Expand Down Expand Up @@ -863,7 +878,7 @@ mod tests {

let mut rl = LockManager::new(addresses.clone());
// Set a high retry count to ensure retries happen
rl.set_retry(10, Duration::from_millis(10)); // Retry 10 times with 10 milliseconds delay
rl.set_retry_policy(10, Duration::from_millis(10)); // Retry 10 times with 10 milliseconds delay

let key = rl.get_unique_lock_id()?;

Expand Down