p2p_chat/sync/
retry.rs

1//! This module provides a flexible retry mechanism with exponential backoff and jitter.
2use anyhow::Result;
3use std::time::Duration;
4use tokio::time::sleep;
5
6/// Defines a policy for retrying failed operations.
7pub struct RetryPolicy {
8    /// The maximum number of attempts to make.
9    pub max_attempts: u32,
10    /// The base delay between retries.
11    pub base_delay: Duration,
12    /// The maximum delay between retries.
13    pub max_delay: Duration,
14}
15
16impl RetryPolicy {
17    /// Creates a new `RetryPolicy`.
18    ///
19    /// # Arguments
20    ///
21    /// * `max_attempts` - The maximum number of attempts.
22    /// * `base_delay` - The initial delay before the first retry.
23    /// * `max_delay` - The maximum allowed delay between retries.
24    pub fn new(max_attempts: u32, base_delay: Duration, max_delay: Duration) -> Self {
25        Self {
26            max_attempts,
27            base_delay,
28            max_delay,
29        }
30    }
31
32    /// Creates a fast retry policy suitable for responsive mailbox operations.
33    pub fn fast_mailbox() -> Self {
34        Self::new(4, Duration::from_millis(50), Duration::from_millis(500))
35    }
36
37    /// Calculates an exponential backoff delay based on the attempt number.
38    ///
39    /// The delay increases exponentially with each attempt, up to `max_delay`.
40    ///
41    /// # Arguments
42    ///
43    /// * `attempt` - The current attempt number (0-indexed).
44    pub fn exponential_backoff(&self, attempt: u32) -> Duration {
45        let delay = self.base_delay.as_millis() as u64 * 2_u64.pow(attempt);
46        Duration::from_millis(delay.min(self.max_delay.as_millis() as u64))
47    }
48
49    /// Calculates an exponential backoff delay with added jitter.
50    ///
51    /// Jitter helps to prevent "thundering herd" problems when many clients
52    /// retry simultaneously.
53    ///
54    /// # Arguments
55    ///
56    /// * `attempt` - The current attempt number (0-indexed).
57    pub fn exponential_backoff_with_jitter(&self, attempt: u32) -> Duration {
58        let base_delay = self.exponential_backoff(attempt);
59        let jitter_ms = rand::random::<u64>() % (base_delay.as_millis() as u64 / 4 + 1);
60        Duration::from_millis(base_delay.as_millis() as u64 + jitter_ms)
61    }
62
63    /// Retries an asynchronous operation using the defined retry policy.
64    ///
65    /// The operation `op` will be retried `max_attempts` times, with exponential
66    /// backoff and jitter between attempts.
67    ///
68    /// # Arguments
69    ///
70    /// * `op` - A closure that returns a `Future` representing the operation to retry.
71    ///
72    /// # Returns
73    ///
74    /// The `Result` of the operation if successful, or the last error encountered.
75    pub async fn retry_with_jitter<F, T, Fut>(&self, mut op: F) -> Result<T>
76    where
77        F: FnMut() -> Fut,
78        Fut: std::future::Future<Output = Result<T>>,
79    {
80        for attempt in 0..self.max_attempts {
81            match op().await {
82                Ok(result) => return Ok(result),
83                Err(e) => {
84                    if attempt + 1 >= self.max_attempts {
85                        return Err(e);
86                    }
87                    let delay = self.exponential_backoff_with_jitter(attempt);
88                    sleep(delay).await;
89                }
90            }
91        }
92        unreachable!() // This should not be reachable as either Ok is returned or Err is returned after max_attempts
93    }
94}