Skip to content

Commit

Permalink
Allow non-btree operator pushdown in UPDATE/DELETE queries on compres…
Browse files Browse the repository at this point in the history
…sed chunks

When pushing down expressions into the compressed scan we assumed
all valid expressions use btree operators and dropped any that weren't.
This patch changes the behaviour to keep those expressions and use
them as heap filter on the compressed scan for UPDATE and DELETE
on compressed chunks.
  • Loading branch information
svenklemm committed Feb 4, 2025
1 parent 253a7cb commit 0819713
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 32 deletions.
66 changes: 44 additions & 22 deletions tsl/src/compression/compression_scankey.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

static Oid deduce_filter_subtype(BatchFilter *filter, Oid att_typoid);
static bool create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name,
StrategyNumber strategy, Oid subtype,
StrategyNumber strategy, Oid subtype, Oid opcode,
ScanKeyData *scankeys, int *num_scankeys,
Bitmapset **null_columns, Datum value, bool is_null_check,
bool is_array_op);
Expand Down Expand Up @@ -191,6 +191,7 @@ build_heap_scankeys(Oid hypertable_relid, Relation in_rel, Relation out_rel,
attname,
BTEqualStrategyNumber,
InvalidOid,
InvalidOid,
scankeys,
&key_index,
null_columns,
Expand All @@ -212,6 +213,7 @@ build_heap_scankeys(Oid hypertable_relid, Relation in_rel, Relation out_rel,
column_segment_min_name(index),
BTLessEqualStrategyNumber,
InvalidOid,
InvalidOid,
scankeys,
&key_index,
null_columns,
Expand All @@ -223,6 +225,7 @@ build_heap_scankeys(Oid hypertable_relid, Relation in_rel, Relation out_rel,
column_segment_max_name(index),
BTGreaterEqualStrategyNumber,
InvalidOid,
InvalidOid,
scankeys,
&key_index,
null_columns,
Expand Down Expand Up @@ -461,6 +464,7 @@ build_update_delete_scankeys(Relation in_rel, List *heap_filters, int *num_scank
NameStr(filter->column_name),
filter->strategy,
deduce_filter_subtype(filter, typoid),
filter->opcode,
scankeys,
&key_index,
null_columns,
Expand All @@ -482,9 +486,9 @@ build_update_delete_scankeys(Relation in_rel, List *heap_filters, int *num_scank

static bool
create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name,
StrategyNumber strategy, Oid subtype, ScanKeyData *scankeys,
int *num_scankeys, Bitmapset **null_columns, Datum value,
bool is_null_check, bool is_array_op)
StrategyNumber strategy, Oid subtype, Oid opcode,
ScanKeyData *scankeys, int *num_scankeys, Bitmapset **null_columns,
Datum value, bool is_null_check, bool is_array_op)
{
AttrNumber cmp_attno = get_attnum(in_rel->rd_id, segment_filter_col_name);
Assert(cmp_attno != InvalidAttrNumber);
Expand All @@ -510,30 +514,48 @@ create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name,
return false;
}

Oid atttypid = in_rel->rd_att->attrs[AttrNumberGetAttrOffset(cmp_attno)].atttypid;

TypeCacheEntry *tce = lookup_type_cache(atttypid, TYPECACHE_BTREE_OPFAMILY);
if (!OidIsValid(tce->btree_opf))
elog(ERROR, "no btree opfamily for type \"%s\"", format_type_be(atttypid));

Oid opr = get_opfamily_member(tce->btree_opf, atttypid, atttypid, strategy);

Oid opr;
/*
* Fall back to btree operator input type when it is binary compatible with
* the column type and no operator for column type could be found.
* All btree operators will have a valid strategy here. For
* non-btree operators e.g. <> we directly take the opcode
* here. We could do the same for btree in certain cases
* but some filters get transformed to min/max filters and
* won't keep the initial opcode so we would need to disambiguate
* between them.
*/
if (!OidIsValid(opr) && IsBinaryCoercible(atttypid, tce->btree_opintype))
if (strategy == InvalidStrategy)
{
opr =
get_opfamily_member(tce->btree_opf, tce->btree_opintype, tce->btree_opintype, strategy);
opr = opcode;
}
else
{
Oid atttypid = in_rel->rd_att->attrs[AttrNumberGetAttrOffset(cmp_attno)].atttypid;

/* No operator could be found so we can't create the scankey. */
if (!OidIsValid(opr))
return false;
TypeCacheEntry *tce = lookup_type_cache(atttypid, TYPECACHE_BTREE_OPFAMILY);
if (!OidIsValid(tce->btree_opf))
elog(ERROR, "no btree opfamily for type \"%s\"", format_type_be(atttypid));

opr = get_opfamily_member(tce->btree_opf, atttypid, atttypid, strategy);

/*
* Fall back to btree operator input type when it is binary compatible with
* the column type and no operator for column type could be found.
*/
if (!OidIsValid(opr) && IsBinaryCoercible(atttypid, tce->btree_opintype))
{
opr = get_opfamily_member(tce->btree_opf,
tce->btree_opintype,
tce->btree_opintype,
strategy);
}

/* No operator could be found so we can't create the scankey. */
if (!OidIsValid(opr))
return false;

Check warning on line 554 in tsl/src/compression/compression_scankey.c

View check run for this annotation

Codecov / codecov/patch

tsl/src/compression/compression_scankey.c#L554

Added line #L554 was not covered by tests

opr = get_opcode(opr);
}

opr = get_opcode(opr);
Assert(OidIsValid(opr));
/* We should never end up here but: no opcode, no optimization */
if (!OidIsValid(opr))
return false;
Expand Down
16 changes: 6 additions & 10 deletions tsl/test/shared/expected/compression_dml.out
Original file line number Diff line number Diff line change
Expand Up @@ -489,14 +489,12 @@ BEGIN;
:ANALYZE DELETE FROM direct_delete WHERE reading <> 'r2';
QUERY PLAN
Custom Scan (HypertableModify) (actual rows=0 loops=1)
Batches decompressed: 8
Tuples decompressed: 8
Batches deleted: 4
-> Delete on direct_delete (actual rows=0 loops=1)
Delete on _hyper_X_X_chunk direct_delete_1
-> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=4 loops=1)
-> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=0 loops=1)
Filter: (reading <> 'r2'::text)
Rows Removed by Filter: 4
(8 rows)
(6 rows)

-- 4 tuples should still be there
SELECT count(*) FROM direct_delete;
Expand Down Expand Up @@ -569,14 +567,12 @@ BEGIN;
:ANALYZE DELETE FROM direct_delete WHERE reading NOT IN ('r1');
QUERY PLAN
Custom Scan (HypertableModify) (actual rows=0 loops=1)
Batches decompressed: 8
Tuples decompressed: 8
Batches deleted: 4
-> Delete on direct_delete (actual rows=0 loops=1)
Delete on _hyper_X_X_chunk direct_delete_1
-> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=4 loops=1)
-> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=0 loops=1)
Filter: (reading <> 'r1'::text)
Rows Removed by Filter: 4
(8 rows)
(6 rows)

-- 4 tuples should still be there
SELECT count(*) FROM direct_delete;
Expand Down

0 comments on commit 0819713

Please sign in to comment.