Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(kad): remove deprecated module record #4035

Merged
merged 8 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions examples/distributed-key-value-store/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
use async_std::io;
use futures::{prelude::*, select};
use libp2p::kad;
use libp2p::kad::record::store::MemoryStore;
use libp2p::kad::store::MemoryStore;
use libp2p::kad::Mode;
use libp2p::{
mdns, noise,
Expand Down Expand Up @@ -148,7 +148,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour<MemoryStore>, line: String) {
Some("GET") => {
let key = {
match args.next() {
Some(key) => kad::record::Key::new(&key),
Some(key) => kad::RecordKey::new(&key),
None => {
eprintln!("Expected key");
return;
Expand All @@ -160,7 +160,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour<MemoryStore>, line: String) {
Some("GET_PROVIDERS") => {
let key = {
match args.next() {
Some(key) => kad::record::Key::new(&key),
Some(key) => kad::RecordKey::new(&key),
None => {
eprintln!("Expected key");
return;
Expand All @@ -172,7 +172,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour<MemoryStore>, line: String) {
Some("PUT") => {
let key = {
match args.next() {
Some(key) => kad::record::Key::new(&key),
Some(key) => kad::RecordKey::new(&key),
None => {
eprintln!("Expected key");
return;
Expand Down Expand Up @@ -201,7 +201,7 @@ fn handle_input_line(kademlia: &mut kad::Behaviour<MemoryStore>, line: String) {
Some("PUT_PROVIDER") => {
let key = {
match args.next() {
Some(key) => kad::record::Key::new(&key),
Some(key) => kad::RecordKey::new(&key),
None => {
eprintln!("Expected key");
return;
Expand Down
2 changes: 1 addition & 1 deletion examples/file-sharing/src/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ impl EventLoop {
#[derive(NetworkBehaviour)]
struct Behaviour {
request_response: request_response::cbor::Behaviour<FileRequest, FileResponse>,
kademlia: kad::Behaviour<kad::record::store::MemoryStore>,
kademlia: kad::Behaviour<kad::store::MemoryStore>,
}

#[derive(Debug)]
Expand Down
4 changes: 2 additions & 2 deletions misc/server/src/behaviour.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub(crate) struct Behaviour {
relay: relay::Behaviour,
ping: ping::Behaviour,
identify: identify::Behaviour,
pub(crate) kademlia: Toggle<kad::Behaviour<kad::record::store::MemoryStore>>,
pub(crate) kademlia: Toggle<kad::Behaviour<kad::store::MemoryStore>>,
autonat: Toggle<autonat::Behaviour>,
}

Expand All @@ -39,7 +39,7 @@ impl Behaviour {
kademlia_config.set_provider_record_ttl(Some(Duration::from_secs(0)));
let mut kademlia = kad::Behaviour::with_config(
pub_key.to_peer_id(),
kad::record::store::MemoryStore::new(pub_key.to_peer_id()),
kad::store::MemoryStore::new(pub_key.to_peer_id()),
kademlia_config,
);
let bootaddr = Multiaddr::from_str("/dnsaddr/bootstrap.libp2p.io").unwrap();
Expand Down
2 changes: 2 additions & 0 deletions protocols/kad/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
See [PR 4659](https://github.com/libp2p/rust-libp2p/pull/4659).
- Emit `ModeChanged` event whenever we automatically reconfigure the mode.
See [PR 4503](https://github.com/libp2p/rust-libp2p/pull/4503).
- Make previously "deprecated" `record` module private.
See [PR 4035](https://github.com/libp2p/rust-libp2p/pull/4035).

## 0.44.6

Expand Down
64 changes: 32 additions & 32 deletions protocols/kad/src/behaviour.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use crate::jobs::*;
use crate::kbucket::{self, Distance, KBucketsTable, NodeStatus};
use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig};
use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState};
use crate::record_priv::{
use crate::record::{
self,
store::{self, RecordStore},
ProviderRecord, Record,
Expand Down Expand Up @@ -148,7 +148,7 @@ pub enum BucketInserts {
/// This can be used for e.g. signature verification or validating
/// the accompanying [`Key`].
///
/// [`Key`]: crate::record_priv::Key
/// [`Key`]: crate::record::Key
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum StoreInserts {
/// Whenever a (provider) record is received,
Expand Down Expand Up @@ -680,7 +680,7 @@ where
///
/// The result of this operation is delivered in a
/// [`Event::OutboundQueryProgressed{QueryResult::GetRecord}`].
pub fn get_record(&mut self, key: record_priv::Key) -> QueryId {
pub fn get_record(&mut self, key: record::Key) -> QueryId {
let record = if let Some(record) = self.store.get(&key) {
if record.is_expired(Instant::now()) {
self.store.remove(&key);
Expand Down Expand Up @@ -830,7 +830,7 @@ where
/// This is a _local_ operation. However, it also has the effect that
/// the record will no longer be periodically re-published, allowing the
/// record to eventually expire throughout the DHT.
pub fn remove_record(&mut self, key: &record_priv::Key) {
pub fn remove_record(&mut self, key: &record::Key) {
if let Some(r) = self.store.get(key) {
if r.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) {
self.store.remove(key)
Expand Down Expand Up @@ -900,7 +900,7 @@ where
///
/// The results of the (repeated) provider announcements sent by this node are
/// reported via [`Event::OutboundQueryProgressed{QueryResult::StartProviding}`].
pub fn start_providing(&mut self, key: record_priv::Key) -> Result<QueryId, store::Error> {
pub fn start_providing(&mut self, key: record::Key) -> Result<QueryId, store::Error> {
// Note: We store our own provider records locally without local addresses
// to avoid redundant storage and outdated addresses. Instead these are
// acquired on demand when returning a `ProviderRecord` for the local node.
Expand Down Expand Up @@ -928,7 +928,7 @@ where
///
/// This is a local operation. The local node will still be considered as a
/// provider for the key by other nodes until these provider records expire.
pub fn stop_providing(&mut self, key: &record_priv::Key) {
pub fn stop_providing(&mut self, key: &record::Key) {
self.store
.remove_provider(key, self.kbuckets.local_key().preimage());
}
Expand All @@ -937,7 +937,7 @@ where
///
/// The result of this operation is delivered in a
/// reported via [`Event::OutboundQueryProgressed{QueryResult::GetProviders}`].
pub fn get_providers(&mut self, key: record_priv::Key) -> QueryId {
pub fn get_providers(&mut self, key: record::Key) -> QueryId {
let providers: HashSet<_> = self
.store
.providers(&key)
Expand Down Expand Up @@ -1122,7 +1122,7 @@ where
}

/// Collects all peers who are known to be providers of the value for a given `Multihash`.
fn provider_peers(&mut self, key: &record_priv::Key, source: &PeerId) -> Vec<KadPeer> {
fn provider_peers(&mut self, key: &record::Key, source: &PeerId) -> Vec<KadPeer> {
let kbuckets = &mut self.kbuckets;
let connected = &mut self.connected_peers;
let listen_addresses = &self.listen_addresses;
Expand Down Expand Up @@ -1179,7 +1179,7 @@ where
}

/// Starts an iterative `ADD_PROVIDER` query for the given key.
fn start_add_provider(&mut self, key: record_priv::Key, context: AddProviderContext) {
fn start_add_provider(&mut self, key: record::Key, context: AddProviderContext) {
let info = QueryInfo::AddProvider {
context,
key: key.clone(),
Expand Down Expand Up @@ -1519,7 +1519,7 @@ where
get_closest_peers_stats,
},
} => {
let mk_result = |key: record_priv::Key| {
let mk_result = |key: record::Key| {
if success.len() >= quorum.get() {
Ok(PutRecordOk { key })
} else {
Expand Down Expand Up @@ -1817,7 +1817,7 @@ where
}

/// Processes a provider record received from a peer.
fn provider_received(&mut self, key: record_priv::Key, provider: KadPeer) {
fn provider_received(&mut self, key: record::Key, provider: KadPeer) {
if &provider.node_id != self.kbuckets.local_key().preimage() {
let record = ProviderRecord {
key,
Expand Down Expand Up @@ -2792,22 +2792,22 @@ pub enum GetRecordOk {
pub enum GetRecordError {
#[error("the record was not found")]
NotFound {
key: record_priv::Key,
key: record::Key,
closest_peers: Vec<PeerId>,
},
#[error("the quorum failed; needed {quorum} peers")]
QuorumFailed {
key: record_priv::Key,
key: record::Key,
records: Vec<PeerRecord>,
quorum: NonZeroUsize,
},
#[error("the request timed out")]
Timeout { key: record_priv::Key },
Timeout { key: record::Key },
}

impl GetRecordError {
/// Gets the key of the record for which the operation failed.
pub fn key(&self) -> &record_priv::Key {
pub fn key(&self) -> &record::Key {
match self {
GetRecordError::QuorumFailed { key, .. } => key,
GetRecordError::Timeout { key, .. } => key,
Expand All @@ -2817,7 +2817,7 @@ impl GetRecordError {

/// Extracts the key of the record for which the operation failed,
/// consuming the error.
pub fn into_key(self) -> record_priv::Key {
pub fn into_key(self) -> record::Key {
match self {
GetRecordError::QuorumFailed { key, .. } => key,
GetRecordError::Timeout { key, .. } => key,
Expand All @@ -2832,22 +2832,22 @@ pub type PutRecordResult = Result<PutRecordOk, PutRecordError>;
/// The successful result of [`Behaviour::put_record`].
#[derive(Debug, Clone)]
pub struct PutRecordOk {
pub key: record_priv::Key,
pub key: record::Key,
}

/// The error result of [`Behaviour::put_record`].
#[derive(Debug, Clone, Error)]
pub enum PutRecordError {
#[error("the quorum failed; needed {quorum} peers")]
QuorumFailed {
key: record_priv::Key,
key: record::Key,
/// [`PeerId`]s of the peers the record was successfully stored on.
success: Vec<PeerId>,
quorum: NonZeroUsize,
},
#[error("the request timed out")]
Timeout {
key: record_priv::Key,
key: record::Key,
/// [`PeerId`]s of the peers the record was successfully stored on.
success: Vec<PeerId>,
quorum: NonZeroUsize,
Expand All @@ -2856,7 +2856,7 @@ pub enum PutRecordError {

impl PutRecordError {
/// Gets the key of the record for which the operation failed.
pub fn key(&self) -> &record_priv::Key {
pub fn key(&self) -> &record::Key {
match self {
PutRecordError::QuorumFailed { key, .. } => key,
PutRecordError::Timeout { key, .. } => key,
Expand All @@ -2865,7 +2865,7 @@ impl PutRecordError {

/// Extracts the key of the record for which the operation failed,
/// consuming the error.
pub fn into_key(self) -> record_priv::Key {
pub fn into_key(self) -> record::Key {
match self {
PutRecordError::QuorumFailed { key, .. } => key,
PutRecordError::Timeout { key, .. } => key,
Expand Down Expand Up @@ -2934,7 +2934,7 @@ pub type GetProvidersResult = Result<GetProvidersOk, GetProvidersError>;
#[derive(Debug, Clone)]
pub enum GetProvidersOk {
FoundProviders {
key: record_priv::Key,
key: record::Key,
/// The new set of providers discovered.
providers: HashSet<PeerId>,
},
Expand All @@ -2948,22 +2948,22 @@ pub enum GetProvidersOk {
pub enum GetProvidersError {
#[error("the request timed out")]
Timeout {
key: record_priv::Key,
key: record::Key,
closest_peers: Vec<PeerId>,
},
}

impl GetProvidersError {
/// Gets the key for which the operation failed.
pub fn key(&self) -> &record_priv::Key {
pub fn key(&self) -> &record::Key {
match self {
GetProvidersError::Timeout { key, .. } => key,
}
}

/// Extracts the key for which the operation failed,
/// consuming the error.
pub fn into_key(self) -> record_priv::Key {
pub fn into_key(self) -> record::Key {
match self {
GetProvidersError::Timeout { key, .. } => key,
}
Expand All @@ -2976,26 +2976,26 @@ pub type AddProviderResult = Result<AddProviderOk, AddProviderError>;
/// The successful result of publishing a provider record.
#[derive(Debug, Clone)]
pub struct AddProviderOk {
pub key: record_priv::Key,
pub key: record::Key,
}

/// The possible errors when publishing a provider record.
#[derive(Debug, Clone, Error)]
pub enum AddProviderError {
#[error("the request timed out")]
Timeout { key: record_priv::Key },
Timeout { key: record::Key },
}

impl AddProviderError {
/// Gets the key for which the operation failed.
pub fn key(&self) -> &record_priv::Key {
pub fn key(&self) -> &record::Key {
match self {
AddProviderError::Timeout { key, .. } => key,
}
}

/// Extracts the key for which the operation failed,
pub fn into_key(self) -> record_priv::Key {
pub fn into_key(self) -> record::Key {
match self {
AddProviderError::Timeout { key, .. } => key,
}
Expand Down Expand Up @@ -3094,7 +3094,7 @@ pub enum QueryInfo {
/// A (repeated) query initiated by [`Behaviour::get_providers`].
GetProviders {
/// The key for which to search for providers.
key: record_priv::Key,
key: record::Key,
/// The number of providers found so far.
providers_found: usize,
/// Current index of events.
Expand All @@ -3104,7 +3104,7 @@ pub enum QueryInfo {
/// A (repeated) query initiated by [`Behaviour::start_providing`].
AddProvider {
/// The record key.
key: record_priv::Key,
key: record::Key,
/// The current phase of the query.
phase: AddProviderPhase,
/// The execution context of the query.
Expand All @@ -3125,7 +3125,7 @@ pub enum QueryInfo {
/// A (repeated) query initiated by [`Behaviour::get_record`].
GetRecord {
/// The key to look for.
key: record_priv::Key,
key: record::Key,
/// Current index of events.
step: ProgressStep,
/// Did we find at least one record?
Expand Down
10 changes: 5 additions & 5 deletions protocols/kad/src/behaviour/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
use super::*;

use crate::kbucket::Distance;
use crate::record_priv::{store::MemoryStore, Key};
use crate::record::{store::MemoryStore, Key};
use crate::{K_VALUE, SHA_256_MH};
use futures::{executor::block_on, future::poll_fn, prelude::*};
use futures_timer::Delay;
Expand Down Expand Up @@ -449,7 +449,7 @@ fn get_record_not_found() {
.map(|(_addr, swarm)| swarm)
.collect::<Vec<_>>();

let target_key = record_priv::Key::from(random_multihash());
let target_key = record::Key::from(random_multihash());
let qid = swarms[0].behaviour_mut().get_record(target_key.clone());

block_on(poll_fn(move |ctx| {
Expand Down Expand Up @@ -858,7 +858,7 @@ fn get_record_many() {
/// network where X is equal to the configured replication factor.
#[test]
fn add_provider() {
fn prop(keys: Vec<record_priv::Key>, seed: Seed) {
fn prop(keys: Vec<record::Key>, seed: Seed) {
let mut rng = StdRng::from_seed(seed.0);
let replication_factor =
NonZeroUsize::new(rng.gen_range(1..(K_VALUE.get() / 2) + 1)).unwrap();
Expand Down Expand Up @@ -1372,7 +1372,7 @@ fn network_behaviour_on_address_change() {

#[test]
fn get_providers_single() {
fn prop(key: record_priv::Key) {
fn prop(key: record::Key) {
let (_, mut single_swarm) = build_node();
single_swarm
.behaviour_mut()
Expand Down Expand Up @@ -1426,7 +1426,7 @@ fn get_providers_single() {
}

fn get_providers_limit<const N: usize>() {
fn prop<const N: usize>(key: record_priv::Key) {
fn prop<const N: usize>(key: record::Key) {
let mut swarms = build_nodes(3);

// Let first peer know of second peer and second peer know of third peer.
Expand Down
Loading