Skip to content

Commit 37e2859

Browse files
committed
ClusterState: Accepts &dyn SerializeRow instead of SerializedValues
There is no easy way for the users to create SerializedValues, which makes the current APIs cumbersome to use. Instead they should accept `&dyn SerializeRow` and perform serialization based on table metadata. Worth noting that those methods can now return serialization error, so the error type had to be change to PartitionKeyError because does not have a variant with SerializationError.
1 parent af5f53a commit 37e2859

File tree

2 files changed

+17
-19
lines changed

2 files changed

+17
-19
lines changed

scylla/src/client/session_test.rs

+3-10
Original file line numberDiff line numberDiff line change
@@ -218,9 +218,6 @@ async fn test_prepared_statement() {
218218
.unwrap();
219219

220220
let values = (17_i32, 16_i32, "I'm prepared!!!");
221-
let serialized_values_complex_pk = prepared_complex_pk_statement
222-
.serialize_values(&values)
223-
.unwrap();
224221

225222
session
226223
.execute_unpaged(&prepared_statement, &values)
@@ -245,12 +242,9 @@ async fn test_prepared_statement() {
245242
let prepared_token = Murmur3Partitioner
246243
.hash_one(&prepared_statement.compute_partition_key(&values).unwrap());
247244
assert_eq!(token, prepared_token);
248-
let mut pk = SerializedValues::new();
249-
pk.add_value(&17_i32, &ColumnType::Native(NativeType::Int))
250-
.unwrap();
251245
let cluster_state_token = session
252246
.get_cluster_state()
253-
.compute_token(&ks, "t2", &pk)
247+
.compute_token(&ks, "t2", &(values.0,))
254248
.unwrap();
255249
assert_eq!(token, cluster_state_token);
256250
}
@@ -272,7 +266,7 @@ async fn test_prepared_statement() {
272266
assert_eq!(token, prepared_token);
273267
let cluster_state_token = session
274268
.get_cluster_state()
275-
.compute_token(&ks, "complex_pk", &serialized_values_complex_pk)
269+
.compute_token(&ks, "complex_pk", &values)
276270
.unwrap();
277271
assert_eq!(token, cluster_state_token);
278272
}
@@ -608,7 +602,6 @@ async fn test_token_calculation() {
608602
s.push('a');
609603
}
610604
let values = (&s,);
611-
let serialized_values = prepared_statement.serialize_values(&values).unwrap();
612605
session
613606
.execute_unpaged(&prepared_statement, &values)
614607
.await
@@ -631,7 +624,7 @@ async fn test_token_calculation() {
631624
assert_eq!(token, prepared_token);
632625
let cluster_state_token = session
633626
.get_cluster_state()
634-
.compute_token(&ks, "t3", &serialized_values)
627+
.compute_token(&ks, "t3", &values)
635628
.unwrap();
636629
assert_eq!(token, cluster_state_token);
637630
}

scylla/src/cluster/state.rs

+14-9
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
use crate::errors::ConnectionPoolError;
22
use crate::network::{Connection, PoolConfig, VerifiedKeyspaceName};
33
use crate::policies::host_filter::HostFilter;
4-
use crate::prepared_statement::TokenCalculationError;
4+
use crate::prepared_statement::PartitionKeyError;
55
use crate::routing::locator::tablets::{RawTablet, Tablet, TabletsInfo};
66
use crate::routing::locator::ReplicaLocator;
77
use crate::routing::partitioner::{calculate_token_for_partition_key, PartitionerName};
88
use crate::routing::{Shard, Token};
99

1010
use itertools::Itertools;
1111
use scylla_cql::frame::response::result::TableSpec;
12-
use scylla_cql::serialize::row::SerializedValues;
12+
use scylla_cql::serialize::row::{RowSerializationContext, SerializeRow, SerializedValues};
1313
use std::collections::{HashMap, HashSet};
1414
use std::sync::Arc;
1515
use tracing::{debug, warn};
@@ -203,17 +203,22 @@ impl ClusterState {
203203
&self,
204204
keyspace: &str,
205205
table: &str,
206-
partition_key: &SerializedValues,
207-
) -> Result<Token, TokenCalculationError> {
208-
let partitioner = self
206+
partition_key: &dyn SerializeRow,
207+
) -> Result<Token, PartitionKeyError> {
208+
let table = self
209209
.keyspaces
210210
.get(keyspace)
211-
.and_then(|k| k.tables.get(table))
211+
.and_then(|k| k.tables.get(table));
212+
let values = SerializedValues::from_serializable(
213+
&RowSerializationContext::from_specs(table.unwrap().pk_column_specs.as_slice()),
214+
partition_key,
215+
)?;
216+
let partitioner = table
212217
.and_then(|t| t.partitioner.as_deref())
213218
.and_then(PartitionerName::from_str)
214219
.unwrap_or_default();
215220

216-
calculate_token_for_partition_key(partition_key, &partitioner)
221+
calculate_token_for_partition_key(&values, &partitioner).map_err(PartitionKeyError::from)
217222
}
218223

219224
/// Access to replicas owning a given token
@@ -250,8 +255,8 @@ impl ClusterState {
250255
&self,
251256
keyspace: &str,
252257
table: &str,
253-
partition_key: &SerializedValues,
254-
) -> Result<Vec<(Arc<Node>, Shard)>, TokenCalculationError> {
258+
partition_key: &dyn SerializeRow,
259+
) -> Result<Vec<(Arc<Node>, Shard)>, PartitionKeyError> {
255260
let token = self.compute_token(keyspace, table, partition_key)?;
256261
Ok(self.get_token_endpoints(keyspace, table, token))
257262
}

0 commit comments

Comments
 (0)