diff --git a/Cargo.lock b/Cargo.lock index 5722032b8..26fb919af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3086,9 +3086,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" diff --git a/cSpell.json b/cSpell.json index 6d8b68c92..da11cd29a 100644 --- a/cSpell.json +++ b/cSpell.json @@ -36,6 +36,7 @@ "Containerfile", "curr", "Cyberneering", + "dashmap", "datagram", "datetime", "debuginfo", diff --git a/packages/torrent-repository-benchmarks/src/benches/asyn.rs b/packages/torrent-repository-benchmarks/src/benches/asyn.rs index d36de9695..737a99f3c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/asyn.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::core::torrent::repository::tokio_sync::RepositoryTokioRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -12,7 +12,8 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -38,8 +39,8 @@ where // Add one torrent ten thousand times in parallel (depending on the set worker threads) pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -88,8 +89,8 @@ where // Add ten thousand torrents in parallel (depending on the set worker threads) pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -133,8 +134,8 @@ where // Async update ten thousand torrents in parallel (depending on the set worker threads) pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where - T: Send + Sync + 'static, - RepositoryTokioRwLock: RepositoryAsync + UpdateTorrentAsync, + T: Default + Send + Sync + 'static, + RepositoryTokioRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync.rs b/packages/torrent-repository-benchmarks/src/benches/sync.rs index 3dee93421..ea694a38c 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; -use torrust_tracker::core::torrent::UpdateTorrentSync; +use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentSync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -14,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -39,7 +39,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -85,7 +85,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -128,7 +128,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentSync, + RepositoryStdRwLock: UpdateTorrentSync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs index 11ce6ed0c..8efed9856 100644 --- a/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs +++ b/packages/torrent-repository-benchmarks/src/benches/sync_asyn.rs @@ -3,8 +3,8 @@ use std::time::Duration; use clap::Parser; use futures::stream::FuturesUnordered; -use torrust_tracker::core::torrent::repository_sync::{RepositoryStdRwLock, RepositorySync}; -use torrust_tracker::core::torrent::UpdateTorrentAsync; +use torrust_tracker::core::torrent::repository::std_sync::RepositoryStdRwLock; +use torrust_tracker::core::torrent::repository::UpdateTorrentAsync; use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::args::Args; @@ -14,7 +14,7 @@ use crate::benches::utils::{generate_unique_info_hashes, get_average_and_adjuste #[must_use] pub async fn add_one_torrent(samples: usize) -> (Duration, Duration) where - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let mut results: Vec = Vec::with_capacity(samples); @@ -41,7 +41,7 @@ where pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -91,7 +91,7 @@ where pub async fn add_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); @@ -136,7 +136,7 @@ where pub async fn update_multiple_torrents_in_parallel(runtime: &tokio::runtime::Runtime, samples: usize) -> (Duration, Duration) where T: Send + Sync + 'static, - RepositoryStdRwLock: RepositorySync + UpdateTorrentAsync, + RepositoryStdRwLock: UpdateTorrentAsync + Default, { let args = Args::parse(); let mut results: Vec = Vec::with_capacity(samples); diff --git a/packages/torrent-repository-benchmarks/src/main.rs b/packages/torrent-repository-benchmarks/src/main.rs index 4a293b832..d7291afe2 100644 --- a/packages/torrent-repository-benchmarks/src/main.rs +++ b/packages/torrent-repository-benchmarks/src/main.rs @@ -1,7 +1,7 @@ use clap::Parser; use torrust_torrent_repository_benchmarks::args::Args; use torrust_torrent_repository_benchmarks::benches::{asyn, sync, sync_asyn}; -use torrust_tracker::core::torrent::{Entry, EntryMutexStd, EntryMutexTokio}; +use torrust_tracker::core::torrent::entry::{Entry, MutexStd, MutexTokio}; #[allow(clippy::too_many_lines)] #[allow(clippy::print_literal)] @@ -68,22 +68,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - sync::add_one_torrent::(1_000_000) + sync::add_one_torrent::(1_000_000) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -92,22 +92,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) + rt.block_on(sync_asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(sync_asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -116,22 +116,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); println!(); @@ -140,22 +140,22 @@ fn main() { println!( "{}: Avg/AdjAvg: {:?}", "add_one_torrent", - rt.block_on(asyn::add_one_torrent::(1_000_000)) + rt.block_on(asyn::add_one_torrent::(1_000_000)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_one_torrent_in_parallel", - rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_one_torrent_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "add_multiple_torrents_in_parallel", - rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::add_multiple_torrents_in_parallel::(&rt, 10)) ); println!( "{}: Avg/AdjAvg: {:?}", "update_multiple_torrents_in_parallel", - rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) + rt.block_on(asyn::update_multiple_torrents_in_parallel::(&rt, 10)) ); } } diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index b80b11987..b3dcdd48e 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -56,6 +56,8 @@ use self::error::Error; use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::info_hash::InfoHash; +pub type PersistentTorrents = Vec<(InfoHash, u32)>; + struct Builder where T: Database, @@ -125,7 +127,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_persistent_torrents(&self) -> Result, Error>; + async fn load_persistent_torrents(&self) -> Result; /// It saves the torrent metrics data into the database. /// diff --git a/src/core/mod.rs b/src/core/mod.rs index 56b30f955..b070f90db 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -102,11 +102,11 @@ //! //! pub struct AnnounceData { //! pub peers: Vec, -//! pub swarm_stats: SwarmStats, +//! pub swarm_stats: SwarmMetadata, //! pub policy: AnnouncePolicy, // the tracker announce policy. //! } //! -//! pub struct SwarmStats { +//! pub struct SwarmMetadata { //! pub completed: u32, // The number of peers that have ever completed downloading //! pub seeders: u32, // The number of active peers that have completed downloading (seeders) //! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) @@ -232,16 +232,11 @@ //! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) //! } //! -//! pub struct SwarmStats { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } //! ``` //! //! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". //! -//! `SwarmStats` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmStats` +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` //! is used for the rest of cases. //! //! Refer to [`torrent`] module for more details about these data structures. @@ -439,14 +434,13 @@ pub mod services; pub mod statistics; pub mod torrent; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; use derive_more::Constructor; -use futures::future::join_all; use log::debug; use tokio::sync::mpsc::error::SendError; use torrust_tracker_configuration::{AnnouncePolicy, Configuration}; @@ -455,10 +449,11 @@ use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; -use self::torrent::repository_asyn::{RepositoryAsync, RepositoryTokioRwLock}; -use self::torrent::{Entry, UpdateTorrentAsync}; +use self::torrent::entry::{Entry, ReadInfo, ReadPeers}; +use self::torrent::repository::tokio_sync::RepositoryTokioRwLock; +use self::torrent::repository::{Repository, UpdateTorrentAsync}; use crate::core::databases::Database; -use crate::core::torrent::{SwarmMetadata, SwarmStats}; +use crate::core::torrent::SwarmMetadata; use crate::shared::bit_torrent::info_hash::InfoHash; /// The maximum number of returned peers for a torrent. @@ -515,9 +510,9 @@ pub struct TrackerPolicy { pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. - pub peers: Vec, + pub peers: Vec>, /// Swarm statistics - pub stats: SwarmStats, + pub stats: SwarmMetadata, pub policy: AnnouncePolicy, } @@ -685,10 +680,8 @@ impl Tracker { /// It returns the data for a `scrape` response. async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - let torrents = self.torrents.get_torrents().await; - - match torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + match self.torrents.get(info_hash).await { + Some(torrent_entry) => torrent_entry.get_stats(), None => SwarmMetadata::default(), } } @@ -704,47 +697,25 @@ impl Tracker { pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = torrent::Entry { - peers: BTreeMap::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } + self.torrents.import_persistent(&persistent_torrents).await; Ok(()) } - async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + async fn get_torrent_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec> { + match self.torrents.get(info_hash).await { None => vec![], - Some(entry) => entry - .get_peers_for_peer(peer, TORRENT_PEERS_LIMIT) - .into_iter() - .copied() - .collect(), + Some(entry) => entry.get_peers_for_peer(peer, Some(TORRENT_PEERS_LIMIT)), } } /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.get_torrents().await; - - match read_lock.get(info_hash) { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash).await { None => vec![], - Some(entry) => entry.get_peers(TORRENT_PEERS_LIMIT).into_iter().copied().collect(), + Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), } } @@ -753,11 +724,15 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { + pub async fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> torrent::SwarmMetadata { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` - let (stats, stats_updated) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let (stats_updated, stats) = self.torrents.update_torrent_with_peer_and_get_stats(info_hash, peer).await; if self.policy.persistent_torrent_completed_stat && stats_updated { let completed = stats.downloaded; @@ -777,71 +752,18 @@ impl Tracker { /// # Panics /// Panics if unable to get the torrent metrics. pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - let arc_torrents_metrics = Arc::new(tokio::sync::Mutex::new(TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0, - })); - - let db = self.torrents.get_torrents().await.clone(); - - let futures = db - .values() - .map(|torrent_entry| { - let torrent_entry = torrent_entry.clone(); - let torrents_metrics = arc_torrents_metrics.clone(); - - async move { - tokio::spawn(async move { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrents_metrics.lock().await.seeders += u64::from(seeders); - torrents_metrics.lock().await.completed += u64::from(completed); - torrents_metrics.lock().await.leechers += u64::from(leechers); - torrents_metrics.lock().await.torrents += 1; - }) - .await - .expect("Error torrent_metrics spawn"); - } - }) - .collect::>(); - - join_all(futures).await; - - let torrents_metrics = Arc::try_unwrap(arc_torrents_metrics).expect("Could not unwrap arc_torrents_metrics"); - - torrents_metrics.into_inner() + self.torrents.get_metrics().await } /// Remove inactive peers and (optionally) peerless torrents /// /// # Context: Tracker pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.get_torrents_mut().await; - // If we don't need to remove torrents we will use the faster iter if self.policy.remove_peerless_torrents { - let mut cleaned_torrents_map: BTreeMap = BTreeMap::new(); - - for (info_hash, torrent_entry) in &mut *torrents_lock { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - - if torrent_entry.peers.is_empty() { - continue; - } - - if self.policy.persistent_torrent_completed_stat && torrent_entry.completed == 0 { - continue; - } - - cleaned_torrents_map.insert(*info_hash, torrent_entry.clone()); - } - - *torrents_lock = cleaned_torrents_map; + self.torrents.remove_peerless_torrents(&self.policy).await; } else { - for torrent_entry in (*torrents_lock).values_mut() { - torrent_entry.remove_inactive_peers(self.policy.max_peer_timeout); - } + self.torrents.remove_inactive_peers(self.policy.max_peer_timeout).await; } } @@ -1093,6 +1015,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; @@ -1233,7 +1156,7 @@ mod tests { let peers = tracker.get_torrent_peers(&info_hash).await; - assert_eq!(peers, vec![peer]); + assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] @@ -1275,6 +1198,8 @@ mod tests { mod handling_an_announce_request { + use std::sync::Arc; + use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; @@ -1400,7 +1325,7 @@ mod tests { let mut peer = sample_peer_2(); let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.peers, vec![previously_announced_peer]); + assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } mod it_should_update_the_swarm_stats_for_the_torrent { @@ -1755,7 +1680,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; - use crate::core::torrent::repository_asyn::RepositoryAsync; + use crate::core::torrent::repository::Repository; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { @@ -1774,14 +1699,15 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.get_torrents_mut().await.remove(&info_hash); + tracker.torrents.remove(&info_hash).await; tracker.load_torrents_from_database().await.unwrap(); - let torrents = tracker.torrents.get_torrents().await; - assert!(torrents.contains_key(&info_hash)); - - let torrent_entry = torrents.get(&info_hash).unwrap(); + let torrent_entry = tracker + .torrents + .get(&info_hash) + .await + .expect("it should be able to get entry"); // It persists the number of completed peers. assert_eq!(torrent_entry.completed, 1); diff --git a/src/core/peer.rs b/src/core/peer.rs index 16aa1fe56..eb2b7b759 100644 --- a/src/core/peer.rs +++ b/src/core/peer.rs @@ -22,6 +22,7 @@ //! ``` use std::net::{IpAddr, SocketAddr}; use std::panic::Location; +use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde::Serialize; @@ -85,6 +86,58 @@ pub struct Peer { pub event: AnnounceEvent, } +pub trait ReadInfo { + fn is_seeder(&self) -> bool; + fn get_event(&self) -> AnnounceEvent; + fn get_id(&self) -> Id; + fn get_updated(&self) -> DurationSinceUnixEpoch; + fn get_address(&self) -> SocketAddr; +} + +impl ReadInfo for Peer { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + +impl ReadInfo for Arc { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + impl Peer { #[must_use] pub fn is_seeder(&self) -> bool { diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index eca6cbf3b..b265066f0 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -9,7 +9,8 @@ use std::sync::Arc; use serde::Deserialize; use crate::core::peer::Peer; -use crate::core::torrent::repository_asyn::RepositoryAsync; +use crate::core::torrent::entry::{self, ReadInfo}; +use crate::core::torrent::repository::Repository; use crate::core::Tracker; use crate::shared::bit_torrent::info_hash::InfoHash; @@ -94,41 +95,37 @@ impl Default for Pagination { /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let db = tracker.torrents.get_torrents().await; - - let torrent_entry_option = db.get(info_hash); + let torrent_entry_option = tracker.torrents.get(info_hash).await; let torrent_entry = torrent_entry_option?; - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let stats = entry::ReadInfo::get_stats(&torrent_entry); - let peers = torrent_entry.get_all_peers(); + let peers = entry::ReadPeers::get_peers(&torrent_entry, None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); Some(Info { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), peers, }) } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) -> Vec { - let db = tracker.torrents.get_torrents().await; - +pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in db.iter().skip(pagination.offset as usize).take(pagination.limit as usize) { - let (seeders, completed, leechers) = torrent_entry.get_stats(); + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination).await { + let stats = entry::ReadInfo::get_stats(&torrent_entry); basic_infos.push(BasicInfo { - info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } @@ -137,19 +134,15 @@ pub async fn get_torrents_page(tracker: Arc, pagination: &Pagination) - /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { - let db = tracker.torrents.get_torrents().await; - let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(entry) = db.get(info_hash) { - let (seeders, completed, leechers) = entry.get_stats(); - + if let Some(stats) = tracker.torrents.get(info_hash).await.map(|t| t.get_stats()) { basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), }); } } @@ -254,7 +247,7 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -270,7 +263,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, @@ -302,7 +295,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -326,7 +319,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -356,7 +349,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) .await; - let torrents = get_torrents_page(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, diff --git a/src/core/torrent/entry.rs b/src/core/torrent/entry.rs new file mode 100644 index 000000000..619cce9b3 --- /dev/null +++ b/src/core/torrent/entry.rs @@ -0,0 +1,241 @@ +use std::fmt::Debug; +use std::sync::Arc; +use std::time::Duration; + +use aquatic_udp_protocol::AnnounceEvent; +use serde::{Deserialize, Serialize}; + +use super::SwarmMetadata; +use crate::core::peer::{self, ReadInfo as _}; +use crate::core::TrackerPolicy; +use crate::shared::clock::{Current, TimeNow}; + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Serialize, Deserialize, Clone, Debug, Default)] +pub struct Entry { + /// The swarm: a network of peers that are all trying to download the torrent associated to this entry + #[serde(skip)] + pub peers: std::collections::BTreeMap>, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub completed: u32, +} + +pub type MutexStd = Arc>; +pub type MutexTokio = Arc>; + +pub trait ReadInfo { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_stats(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool; +} + +pub trait ReadPeers { + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec>; +} + +pub trait ReadAsync { + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_peer( + &self, + client: &peer::Peer, + limit: Option, + ) -> impl std::future::Future>> + Send; +} + +pub trait Update { + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool; + + // It preforms a combined operation of `insert_or_update_peer` and `get_stats`. + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata); + + /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds + fn remove_inactive_peers(&mut self, max_peer_timeout: u32); +} + +pub trait UpdateSync { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool; + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata); + fn remove_inactive_peers(&self, max_peer_timeout: u32); +} + +pub trait UpdateAsync { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> impl std::future::Future + Send; + + fn insert_or_update_peer_and_get_stats( + &self, + peer: &peer::Peer, + ) -> impl std::future::Future + std::marker::Send; + + fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; +} + +impl ReadInfo for Entry { + #[allow(clippy::cast_possible_truncation)] + fn get_stats(&self) -> SwarmMetadata { + let complete: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let incomplete: u32 = self.peers.len() as u32 - complete; + + SwarmMetadata { + downloaded: self.completed, + complete, + incomplete, + } + } + + fn is_not_zombie(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.completed > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.peers.is_empty() { + return false; + } + + true + } +} + +impl ReadPeers for Entry { + fn get_peers(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.get_address() != client.get_address()) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.get_address() != client.get_address()) + .cloned() + .collect(), + } + } +} + +impl ReadPeers for MutexStd { + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_peer(client, limit) + } +} + +impl ReadAsync for MutexTokio { + async fn get_peers(&self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_peer(&self, client: &peer::Peer, limit: Option) -> Vec> { + self.lock().await.get_peers_for_peer(client, limit) + } +} + +impl Update for Entry { + fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { + let mut did_torrent_stats_change: bool = false; + + match peer.get_event() { + AnnounceEvent::Stopped => { + drop(self.peers.remove(&peer.get_id())); + } + AnnounceEvent::Completed => { + let peer_old = self.peers.insert(peer.get_id(), Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.completed += 1; + did_torrent_stats_change = true; + } + } + _ => { + drop(self.peers.insert(peer.get_id(), Arc::new(*peer))); + } + } + + did_torrent_stats_change + } + + fn insert_or_update_peer_and_get_stats(&mut self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let changed = self.insert_or_update_peer(peer); + let stats = self.get_stats(); + (changed, stats) + } + + fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { + let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); + self.peers.retain(|_, peer| peer.get_updated() > current_cutoff); + } +} + +impl UpdateSync for MutexStd { + fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").insert_or_update_peer(peer) + } + + fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock() + .expect("it should lock the entry") + .insert_or_update_peer_and_get_stats(peer) + } + + fn remove_inactive_peers(&self, max_peer_timeout: u32) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(max_peer_timeout); + } +} + +impl UpdateAsync for MutexTokio { + async fn insert_or_update_peer(&self, peer: &peer::Peer) -> bool { + self.lock().await.insert_or_update_peer(peer) + } + + async fn insert_or_update_peer_and_get_stats(&self, peer: &peer::Peer) -> (bool, SwarmMetadata) { + self.lock().await.insert_or_update_peer_and_get_stats(peer) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + self.lock().await.remove_inactive_peers(max_peer_timeout); + } +} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 49c1f61f8..608765cf8 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -27,49 +27,11 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! -//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmStats`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -pub mod repository_asyn; -pub mod repository_sync; +//! > **NOTICE**: that both [`SwarmMetadata`] and [`SwarmMetadata`] contain the same information. [`SwarmMetadata`] is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). +pub mod entry; +pub mod repository; -use std::sync::Arc; -use std::time::Duration; - -use aquatic_udp_protocol::AnnounceEvent; use derive_more::Constructor; -use serde::{Deserialize, Serialize}; - -use super::peer::{self, Peer}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::clock::{Current, TimeNow}; - -pub trait UpdateTorrentSync { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool); -} - -pub trait UpdateTorrentAsync { - fn update_torrent_with_peer_and_get_stats( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - ) -> impl std::future::Future + Send; -} - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct Entry { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] - pub peers: std::collections::BTreeMap, - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub completed: u32, -} - -pub type EntryMutexTokio = Arc>; -pub type EntryMutexStd = Arc>; /// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. @@ -92,122 +54,6 @@ impl SwarmMetadata { } } -/// [`SwarmStats`] has the same form as [`SwarmMetadata`] -pub type SwarmStats = SwarmMetadata; - -impl Entry { - #[must_use] - pub fn new() -> Entry { - Entry { - peers: std::collections::BTreeMap::new(), - completed: 0, - } - } - - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - pub fn insert_or_update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; - - match peer.event { - AnnounceEvent::Stopped => { - let _: Option = self.peers.remove(&peer.peer_id); - } - AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.peer_id, *peer); - // Don't count if peer was not previously known and not already completed. - if peer_old.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.completed += 1; - did_torrent_stats_change = true; - } - } - _ => { - let _: Option = self.peers.insert(peer.peer_id, *peer); - } - } - - did_torrent_stats_change - } - - /// Get all swarm peers. - #[must_use] - pub fn get_all_peers(&self) -> Vec<&peer::Peer> { - self.peers.values().collect() - } - - /// Get swarm peers, limiting the result. - #[must_use] - pub fn get_peers(&self, limit: usize) -> Vec<&peer::Peer> { - self.peers.values().take(limit).collect() - } - - /// It returns the list of peers for a given peer client. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_all_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - .collect() - } - - /// It returns the list of peers for a given peer client, limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_peers_for_peer(&self, client: &Peer, limit: usize) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - // Limit the number of peers on the result - .take(limit) - .collect() - } - - /// It returns the swarm metadata (statistics) as a tuple: - /// - /// `(seeders, completed, leechers)` - #[allow(clippy::cast_possible_truncation)] - #[must_use] - pub fn get_stats(&self) -> (u32, u32, u32) { - let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let leechers: u32 = self.peers.len() as u32 - seeders; - (seeders, self.completed, leechers) - } - - /// It returns the swarm metadata (statistics) as an struct - #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { - // code-review: consider using always this function instead of `get_stats`. - let (seeders, completed, leechers) = self.get_stats(); - SwarmMetadata { - complete: seeders, - downloaded: completed, - incomplete: leechers, - } - } - - /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds - pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); - self.peers.retain(|_, peer| peer.updated > current_cutoff); - } -} - -impl Default for Entry { - fn default() -> Self { - Self::new() - } -} - #[cfg(test)] mod tests { @@ -215,11 +61,12 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Sub; + use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::core::torrent::Entry; + use crate::core::torrent::entry::{self, ReadInfo, ReadPeers, Update}; use crate::core::{peer, TORRENT_PEERS_LIMIT}; use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; @@ -291,59 +138,59 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = Entry::new(); + let torrent_entry = entry::Entry::default(); - assert_eq!(torrent_entry.get_all_peers().len(), 0); + assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); - assert_eq!(torrent_entry.get_all_peers().len(), 1); + assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); + assert_eq!(torrent_entry.get_peers(None).len(), 1); } #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer - assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); + assert_eq!(torrent_entry.get_peers(None), vec![Arc::new(torrent_peer)]); } #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Completed; // Update the peer torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); + assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); } #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer torrent_peer.event = AnnounceEvent::Stopped; // Update the peer torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_all_peers().len(), 0); + assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -357,7 +204,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -369,20 +216,20 @@ mod tests { #[test] fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add peer // Get peers excluding the one we have just added - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer); + let peers = torrent_entry.get_peers_for_peer(&torrent_peer, None); assert_eq!(peers.len(), 0); } #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); @@ -399,7 +246,7 @@ mod tests { torrent_entry.insert_or_update_peer(&torrent_peer_2); // Get peers for peer 1 - let peers = torrent_entry.get_all_peers_for_peer(&torrent_peer_1); + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1, None); // The peer 2 using the same IP but different port should be included assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); @@ -416,7 +263,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -426,35 +273,35 @@ mod tests { torrent_entry.insert_or_update_peer(&torrent_peer); } - let peers = torrent_entry.get_peers(TORRENT_PEERS_LIMIT); + let peers = torrent_entry.get_peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_seeder = a_torrent_seeder(); torrent_entry.insert_or_update_peer(&torrent_seeder); // Add seeder - assert_eq!(torrent_entry.get_stats().0, 1); + assert_eq!(torrent_entry.get_stats().complete, 1); } #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_leecher = a_torrent_leecher(); torrent_entry.insert_or_update_peer(&torrent_leecher); // Add leecher - assert_eq!(torrent_entry.get_stats().2, 1); + assert_eq!(torrent_entry.get_stats().incomplete, 1); } #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.insert_or_update_peer(&torrent_peer); // Add the peer @@ -462,28 +309,28 @@ mod tests { torrent_peer.event = AnnounceEvent::Completed; torrent_entry.insert_or_update_peer(&torrent_peer); // Update the peer - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().complete; assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); } #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. // It's the first event announced from this peer. torrent_entry.insert_or_update_peer(&torrent_peer_announcing_complete_event); // Add the peer - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().downloaded; assert_eq!(number_of_peers_with_completed_torrent, 0); } #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = Entry::new(); + let mut torrent_entry = entry::Entry::default(); let timeout = 120u32; diff --git a/src/core/torrent/repository/mod.rs b/src/core/torrent/repository/mod.rs new file mode 100644 index 000000000..3af33aebe --- /dev/null +++ b/src/core/torrent/repository/mod.rs @@ -0,0 +1,30 @@ +use super::SwarmMetadata; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub mod std_sync; +pub mod tokio_sync; + +pub trait Repository: Default { + fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn remove_inactive_peers(&self, max_peer_timeout: u32) -> impl std::future::Future + Send; + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; +} + +pub trait UpdateTorrentSync { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata); +} + +pub trait UpdateTorrentAsync { + fn update_torrent_with_peer_and_get_stats( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + ) -> impl std::future::Future + Send; +} diff --git a/src/core/torrent/repository/std_sync.rs b/src/core/torrent/repository/std_sync.rs new file mode 100644 index 000000000..ba38db6ed --- /dev/null +++ b/src/core/torrent/repository/std_sync.rs @@ -0,0 +1,365 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::executor::block_on; +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync, UpdateTorrentSync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata}; +use crate::core::{peer, TorrentsMetrics}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +#[derive(Default)] +pub struct RepositoryStdRwLock { + torrents: std::sync::RwLock>, +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentAsync for RepositoryStdRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer).await + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents(); + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) + }); + + block_on(join_all(futures)); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl UpdateTorrentSync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); + + let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { + existing_torrent_entry + } else { + let mut torrents_lock = self.get_torrents_mut(); + let entry = torrents_lock + .entry(*info_hash) + .or_insert(Arc::new(std::sync::Mutex::new(Entry::default()))); + entry.clone() + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer) + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().expect("it should lock the entry").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents(); + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + }) + }); + + block_on(join_all(futures)); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} + +impl RepositoryStdRwLock { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl UpdateTorrentSync for RepositoryStdRwLock { + fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut torrents = self.torrents.write().unwrap(); + + let torrent_entry = match torrents.entry(*info_hash) { + std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::default()), + std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + torrent_entry.insert_or_update_peer_and_get_stats(peer) + } +} +impl Repository for RepositoryStdRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents(); + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + block_on(join_all(futures)); + + *metrics.blocking_lock_owned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = Entry { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut(); + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &crate::core::TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository/tokio_sync.rs b/src/core/torrent/repository/tokio_sync.rs new file mode 100644 index 000000000..83edf1188 --- /dev/null +++ b/src/core/torrent/repository/tokio_sync.rs @@ -0,0 +1,378 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use futures::future::join_all; + +use super::{Repository, UpdateTorrentAsync}; +use crate::core::databases::PersistentTorrents; +use crate::core::services::torrent::Pagination; +use crate::core::torrent::entry::{Entry, ReadInfo, Update, UpdateAsync, UpdateSync}; +use crate::core::torrent::{entry, SwarmMetadata}; +use crate::core::{peer, TorrentsMetrics, TrackerPolicy}; +use crate::shared::bit_torrent::info_hash::InfoHash; + +#[derive(Default)] +pub struct RepositoryTokioRwLock { + torrents: tokio::sync::RwLock>, +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>( + &'a self, + ) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_torrent; + { + let db = self.torrents.read().await; + maybe_torrent = db.get(info_hash).cloned(); + } + + let torrent = if let Some(torrent) = maybe_torrent { + torrent + } else { + let entry = entry::MutexTokio::default(); + let mut db = self.torrents.write().await; + db.insert(*info_hash, entry.clone()); + entry + }; + + torrent.insert_or_update_peer_and_get_stats(peer).await + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().await.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexTokio::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents().await; + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { entry.lock().await.remove_inactive_peers(max_peer_timeout) }) + }); + + join_all(futures).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.blocking_lock().is_not_zombie(policy)); + } +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let maybe_torrent; + { + let db = self.torrents.read().await; + maybe_torrent = db.get(info_hash).cloned(); + } + + let torrent = if let Some(torrent) = maybe_torrent { + torrent + } else { + let entry = entry::MutexStd::default(); + let mut db = self.torrents.write().await; + db.insert(*info_hash, entry.clone()); + entry + }; + + torrent.insert_or_update_peer_and_get_stats(peer) + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, entry::MutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.lock().expect("it should lock the entry").get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = entry::MutexStd::new( + Entry { + peers: BTreeMap::default(), + completed: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let db = self.get_torrents().await; + + let futures = db.values().map(|e| { + let entry = e.clone(); + tokio::spawn(async move { + entry + .lock() + .expect("it should get lock for entry") + .remove_inactive_peers(max_peer_timeout); + }) + }); + + join_all(futures).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").is_not_zombie(policy)); + } +} + +impl RepositoryTokioRwLock { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl UpdateTorrentAsync for RepositoryTokioRwLock { + async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (bool, SwarmMetadata) { + let mut db = self.torrents.write().await; + + let torrent = db.entry(*info_hash).or_insert(Entry::default()); + + torrent.insert_or_update_peer_and_get_stats(peer) + } +} + +impl Repository for RepositoryTokioRwLock { + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Entry)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let db = self.get_torrents().await; + let metrics: Arc> = Arc::default(); + + let futures = db.values().map(|e| { + let metrics = metrics.clone(); + let entry = e.clone(); + + tokio::spawn(async move { + let stats = entry.get_stats(); + metrics.lock().await.seeders += u64::from(stats.complete); + metrics.lock().await.completed += u64::from(stats.downloaded); + metrics.lock().await.leechers += u64::from(stats.incomplete); + metrics.lock().await.torrents += 1; + }) + }); + + join_all(futures).await; + + *metrics.lock_owned().await + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = Entry { + peers: BTreeMap::default(), + completed: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, max_peer_timeout: u32) { + let mut db = self.get_torrents_mut().await; + + drop(db.values_mut().map(|e| e.remove_inactive_peers(max_peer_timeout))); + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.is_not_zombie(policy)); + } +} diff --git a/src/core/torrent/repository_asyn.rs b/src/core/torrent/repository_asyn.rs deleted file mode 100644 index ad10f85b4..000000000 --- a/src/core/torrent/repository_asyn.rs +++ /dev/null @@ -1,187 +0,0 @@ -use std::sync::Arc; - -use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync}; -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait RepositoryAsync: Default { - fn get_torrents<'a>( - &'a self, - ) -> impl std::future::Future>> + Send - where - std::collections::BTreeMap: 'a; - - fn get_torrents_mut<'a>( - &'a self, - ) -> impl std::future::Future>> + Send - where - std::collections::BTreeMap: 'a; -} - -pub struct RepositoryTokioRwLock { - torrents: tokio::sync::RwLock>, -} -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().await.get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut().await; - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} - -impl UpdateTorrentAsync for RepositoryTokioRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let (stats, stats_updated) = { - let mut torrents_lock = self.torrents.write().await; - let torrent_entry = torrents_lock.entry(*info_hash).or_insert(Entry::new()); - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} - -impl RepositoryAsync for RepositoryTokioRwLock { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>(&'a self) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl Default for RepositoryTokioRwLock { - fn default() -> Self { - Self { - torrents: tokio::sync::RwLock::default(), - } - } -} diff --git a/src/core/torrent/repository_sync.rs b/src/core/torrent/repository_sync.rs deleted file mode 100644 index 3b01eb8be..000000000 --- a/src/core/torrent/repository_sync.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::sync::{Arc, RwLock}; - -use super::{EntryMutexStd, EntryMutexTokio, UpdateTorrentAsync, UpdateTorrentSync}; -use crate::core::peer; -use crate::core::torrent::{Entry, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; - -pub trait RepositorySync: Default { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a; - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a; -} - -pub struct RepositoryStdRwLock { - torrents: std::sync::RwLock>, -} - -impl UpdateTorrentAsync for RepositoryStdRwLock { - async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(tokio::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().await; - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let maybe_existing_torrent_entry = self.get_torrents().get(info_hash).cloned(); - - let torrent_entry: Arc> = if let Some(existing_torrent_entry) = maybe_existing_torrent_entry { - existing_torrent_entry - } else { - let mut torrents_lock = self.get_torrents_mut(); - let entry = torrents_lock - .entry(*info_hash) - .or_insert(Arc::new(std::sync::Mutex::new(Entry::new()))); - entry.clone() - }; - - let (stats, stats_updated) = { - let mut torrent_entry_lock = torrent_entry.lock().unwrap(); - let stats_updated = torrent_entry_lock.insert_or_update_peer(peer); - let stats = torrent_entry_lock.get_stats(); - - (stats, stats_updated) - }; - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} - -impl UpdateTorrentSync for RepositoryStdRwLock { - fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> (SwarmStats, bool) { - let mut torrents = self.torrents.write().unwrap(); - - let torrent_entry = match torrents.entry(*info_hash) { - std::collections::btree_map::Entry::Vacant(vacant) => vacant.insert(Entry::new()), - std::collections::btree_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.insert_or_update_peer(peer); - let stats = torrent_entry.get_stats(); - - ( - SwarmStats { - downloaded: stats.1, - complete: stats.0, - incomplete: stats.2, - }, - stats_updated, - ) - } -} -impl RepositorySync for RepositoryStdRwLock { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Default for RepositoryStdRwLock { - fn default() -> Self { - Self { - torrents: RwLock::default(), - } - } -} diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index dcb92dec3..999580da7 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -82,7 +82,7 @@ pub async fn get_torrents_handler(State(tracker): State>, paginatio torrent_list_response( &get_torrents_page( tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), ) .await, ) diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index b1b474ea9..619632ae4 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -79,7 +79,7 @@ impl From for Normal { incomplete: data.stats.incomplete.into(), interval: data.policy.interval.into(), min_interval: data.policy.interval_min.into(), - peers: data.peers.into_iter().collect(), + peers: data.peers.iter().map(AsRef::as_ref).copied().collect(), } } } @@ -116,7 +116,7 @@ pub struct Compact { impl From for Compact { fn from(data: AnnounceData) -> Self { - let compact_peers: Vec = data.peers.into_iter().collect(); + let compact_peers: Vec = data.peers.iter().map(AsRef::as_ref).copied().collect(); let (peers, peers6): (Vec>, Vec>) = compact_peers.into_iter().collect(); @@ -313,12 +313,13 @@ impl FromIterator> for CompactPeersEncoded { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use torrust_tracker_configuration::AnnouncePolicy; use crate::core::peer::fixture::PeerBuilder; use crate::core::peer::Id; - use crate::core::torrent::SwarmStats; + use crate::core::torrent::SwarmMetadata; use crate::core::AnnounceData; use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; @@ -350,8 +351,8 @@ mod tests { )) .build(); - let peers = vec![peer_ipv4, peer_ipv6]; - let stats = SwarmStats::new(333, 333, 444); + let peers = vec![Arc::new(peer_ipv4), Arc::new(peer_ipv6)]; + let stats = SwarmMetadata::new(333, 333, 444); AnnounceData::new(peers, stats, policy) } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index b791defd7..b53697eed 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -98,7 +98,7 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::core::peer::Peer; - use crate::core::torrent::SwarmStats; + use crate::core::torrent::SwarmMetadata; use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -113,7 +113,7 @@ mod tests { let expected_announce_data = AnnounceData { peers: vec![], - stats: SwarmStats { + stats: SwarmMetadata { downloaded: 0, complete: 1, incomplete: 0, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 91a371a7b..f42e11424 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -642,7 +642,7 @@ mod tests { .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] @@ -770,6 +770,7 @@ mod tests { mod from_a_loopback_ip { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; @@ -809,7 +810,7 @@ mod tests { .with_peer_addr(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } } } @@ -863,7 +864,7 @@ mod tests { .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test]