Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve group by hash performance: avoid group-key/-state clones for hash-groupby #4651

Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 35 additions & 16 deletions datafusion/core/src/physical_plan/aggregates/hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

//! Defines the execution plan for the hash aggregate operation

use std::collections::VecDeque;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::vec;
Expand Down Expand Up @@ -91,7 +92,7 @@ struct GroupedHashAggregateStreamInner {
schema: SchemaRef,
input: SendableRecordBatchStream,
mode: AggregateMode,
accumulators: Accumulators,
accumulators: Option<Accumulators>,
aggregate_expressions: Vec<Vec<Arc<dyn PhysicalExpr>>>,

aggr_expr: Vec<Arc<dyn AggregateExpr>>,
Expand Down Expand Up @@ -133,15 +134,15 @@ impl GroupedHashAggregateStream {
group_by,
baseline_metrics,
aggregate_expressions,
accumulators: Accumulators {
accumulators: Some(Accumulators {
memory_consumer: MemoryConsumerProxy::new(
"GroupBy Hash Accumulators",
MemoryConsumerId::new(partition),
Arc::clone(&context.runtime_env().memory_manager),
),
map: RawTable::with_capacity(0),
group_states: Vec::with_capacity(0),
},
}),
random_state: Default::default(),
finished: false,
};
Expand All @@ -157,13 +158,15 @@ impl GroupedHashAggregateStream {
let result = match this.input.next().await {
Some(Ok(batch)) => {
let timer = elapsed_compute.timer();
let accumulators =
this.accumulators.as_mut().expect("not yet finished");
let result = group_aggregate_batch(
&this.mode,
&this.random_state,
&this.group_by,
&this.aggr_expr,
batch,
&mut this.accumulators,
accumulators,
&this.aggregate_expressions,
);

Expand All @@ -174,7 +177,7 @@ impl GroupedHashAggregateStream {
// overshooting a bit. Also this means we either store the whole record batch or not.
let result = match result {
Ok(allocated) => {
this.accumulators.memory_consumer.alloc(allocated).await
accumulators.memory_consumer.alloc(allocated).await
}
Err(e) => Err(e),
};
Expand All @@ -190,7 +193,8 @@ impl GroupedHashAggregateStream {
let timer = this.baseline_metrics.elapsed_compute().timer();
let result = create_batch_from_map(
&this.mode,
&this.accumulators,
std::mem::take(&mut this.accumulators)
.expect("not yet finished"),
this.group_by.expr.len(),
&this.schema,
)
Expand Down Expand Up @@ -475,7 +479,7 @@ impl std::fmt::Debug for Accumulators {
/// ```
fn create_batch_from_map(
mode: &AggregateMode,
accumulators: &Accumulators,
accumulators: Accumulators,
num_group_expr: usize,
output_schema: &Schema,
) -> ArrowResult<RecordBatch> {
Expand All @@ -498,14 +502,26 @@ fn create_batch_from_map(
}
}

// make group states mutable
let (mut group_by_values_vec, mut accumulator_set_vec): (Vec<_>, Vec<_>) =
accumulators
.group_states
.into_iter()
.map(|group_state| {
(
VecDeque::from(group_state.group_by_values.to_vec()),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🤔 maybe we could use a VecDeque always and could avoid this copy too 🤔

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not a copy. Due to move semantics, Rust Std should just reuse the pointer to the allocated backing array.

Copy link
Contributor Author

@crepererum crepererum Dec 16, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

VecDeque::from(group_state.accumulator_set),
)
})
.unzip();

// First, output all group by exprs
let mut columns = (0..num_group_expr)
.map(|i| {
.map(|_| {
ScalarValue::iter_to_array(
accumulators
.group_states
.iter()
.map(|group_state| group_state.group_by_values[i].clone()),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

group_by_values_vec
.iter_mut()
.map(|x| x.pop_front().expect("invalid group_by_values")),
)
})
.collect::<Result<Vec<_>>>()?;
Expand All @@ -516,8 +532,8 @@ fn create_batch_from_map(
match mode {
AggregateMode::Partial => {
let res = ScalarValue::iter_to_array(
accumulators.group_states.iter().map(|group_state| {
group_state.accumulator_set[x]
accumulator_set_vec.iter().map(|accumulator_set| {
accumulator_set[x]
.state()
.map(|x| x[y].clone())
.expect("unexpected accumulator state in hash aggregate")
Expand All @@ -528,8 +544,11 @@ fn create_batch_from_map(
}
AggregateMode::Final | AggregateMode::FinalPartitioned => {
let res = ScalarValue::iter_to_array(
accumulators.group_states.iter().map(|group_state| {
group_state.accumulator_set[x].evaluate().unwrap()
accumulator_set_vec.iter_mut().map(|x| {
x.pop_front()
.expect("invalid accumulator_set")
.evaluate()
.unwrap()
}),
)?;
columns.push(res);
Expand Down