Skip to content

Commit

Permalink
Add error for DELETE triggers with transition tables
Browse files Browse the repository at this point in the history
DELETE triggers cannot be supported for compressed tables not using the
hypercore table access method since it can delete an entire compressed
segment and a transition table is not built for this case, so
generating an error for this case.

This commit also adds a bunch of tests for triggers with transition
tables for normal compressed tables and extend the hypercore trigger
tests.
  • Loading branch information
mkindahl committed Jan 15, 2025
1 parent d1348af commit 9de9eec
Show file tree
Hide file tree
Showing 9 changed files with 496 additions and 15 deletions.
4 changes: 4 additions & 0 deletions src/hypertable.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ Hypertable *
ts_hypertable_from_tupleinfo(const TupleInfo *ti)
{
Hypertable *h = MemoryContextAllocZero(ti->mctx, sizeof(Hypertable));
char relkind;

ts_hypertable_formdata_fill(&h->fd, ti);
h->main_table_relid =
Expand All @@ -249,6 +250,9 @@ ts_hypertable_from_tupleinfo(const TupleInfo *ti)
ts_subspace_store_init(h->space, ti->mctx, ts_guc_max_cached_chunks_per_hypertable);
h->chunk_sizing_func = get_chunk_sizing_func_oid(&h->fd);

if (OidIsValid(h->main_table_relid))
ts_get_rel_info(h->main_table_relid, &h->amoid, &relkind);

if (ts_guc_enable_chunk_skipping)
{
h->range_space =
Expand Down
1 change: 1 addition & 0 deletions src/hypertable.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ typedef struct Hypertable
FormData_hypertable fd;
Oid main_table_relid;
Oid chunk_sizing_func;
Oid amoid;
Hyperspace *space;
SubspaceStore *chunk_cache;
ChunkRangeSpace *range_space;
Expand Down
17 changes: 17 additions & 0 deletions src/process_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -4482,6 +4482,23 @@ process_create_trigger_start(ProcessUtilityArgs *args)
errmsg("ROW triggers with transition tables are not supported on hypertables")));
}

/*
* We currently cannot support delete triggers with transition tables on
* compressed tables that are not using hypercore table access method
* since deleting a complete segment will not build a transition table for
* the delete.
*/
if (stmt->transitionRels && TRIGGER_FOR_DELETE(tgtype) &&
TS_HYPERTABLE_HAS_COMPRESSION_ENABLED(ht) && !ts_is_hypercore_am(ht->amoid))
{
ts_cache_release(hcache);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DELETE triggers with transition tables not supported"),
errdetail("Compressed hypertables not using \"hypercore\" access method are not "
"supported if the trigger use transition tables.")));
}

add_hypertable_to_process_args(args, ht);

/*
Expand Down
42 changes: 41 additions & 1 deletion tsl/src/compression/create.c
Original file line number Diff line number Diff line change
Expand Up @@ -860,7 +860,6 @@ validate_hypertable_for_compression(Hypertable *ht)
"cannot compress tables with reserved column prefix '%s'",
COMPRESSION_COLUMN_METADATA_PREFIX);
}
table_close(rel, AccessShareLock);

if (row_size > MaxHeapTupleSize)
{
Expand All @@ -871,6 +870,47 @@ validate_hypertable_for_compression(Hypertable *ht)
row_size,
MaxHeapTupleSize)));
}

/*
* Check that all triggers are ok for compressed tables.
*/
Relation pg_trigger = table_open(TriggerRelationId, AccessShareLock);
HeapTuple tuple;

ScanKeyData key;
ScanKeyInit(&key,
Anum_pg_trigger_tgrelid,
BTEqualStrategyNumber,
F_OIDEQ,
ObjectIdGetDatum(ht->main_table_relid));

SysScanDesc scan = systable_beginscan(pg_trigger, TriggerRelidNameIndexId, true, NULL, 1, &key);

while (HeapTupleIsValid(tuple = systable_getnext(scan)))
{
bool oldtable_isnull;
Form_pg_trigger trigrec = (Form_pg_trigger) GETSTRUCT(tuple);

/*
* We currently cannot support transition tables for DELETE triggers
* on compressed tables that are not using hypercore table access
* method since deleting a complete segment will not build a
* transition table for the delete.
*/
fastgetattr(tuple, Anum_pg_trigger_tgoldtable, pg_trigger->rd_att, &oldtable_isnull);
if (!oldtable_isnull && !TRIGGER_FOR_ROW(trigrec->tgtype) &&
TRIGGER_FOR_DELETE(trigrec->tgtype) && !ts_is_hypercore_am(ht->amoid))
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DELETE triggers with transition tables not supported"),
errdetail(
"Compressed hypertables not using \"hypercore\" access method are not "
"supported if the trigger use transition tables."));
}

systable_endscan(scan);
table_close(pg_trigger, AccessShareLock);
table_close(rel, AccessShareLock);
}

/*
Expand Down
193 changes: 193 additions & 0 deletions tsl/test/expected/compression_trigger.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- This is copied from hypercore_trigger.sql
set client_min_messages to warning;
create table readings(
metric_id serial,
created_at timestamptz not null unique,
location_id smallint, --segmentby attribute with index
owner_id bigint, --segmentby attribute without index
device_id bigint, --non-segmentby attribute
temp float8,
humidity float4
);
select create_hypertable('readings', by_range('created_at'));
create_hypertable
-------------------
(1,t)
(1 row)

select setseed(1);
setseed
---------

(1 row)

insert into readings(created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
alter table readings set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id, owner_id'
);
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

create table saved_rows (like readings, new_row bool not null, kind text);
create function save_transition_table() returns trigger as $$
begin
case tg_op
when 'INSERT' then
insert into saved_rows select n.*, true, tg_op from new_table n;
when 'DELETE' then
insert into saved_rows select o.*, false, tg_op from old_table o;
when 'UPDATE' then
insert into saved_rows select n.*, true, tg_op from new_table n;
insert into saved_rows select o.*, false, tg_op from old_table o;
end case;
return null;
end;
$$ language plpgsql;
create trigger save_insert_transition_table_trg
after insert on readings
referencing new table as new_table
for each statement execute function save_transition_table();
insert into readings(created_at, location_id, device_id, owner_id, temp, humidity)
values ('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14),
('2022-06-01 00:02:23', 999, 666, 112, 3.14, 3.14);
select * from saved_rows order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | t | INSERT
(2 rows)

truncate saved_rows;
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv);
select * from saved_rows order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT
(1 row)

truncate saved_rows;
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

create trigger save_update_transition_table_trg
after update on readings
referencing new table as new_table old table as old_table
for each statement execute function save_transition_table();
select * from readings where location_id = 999 order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity
-----------+------------------------------+-------------+----------+-----------+------+----------
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14
(3 rows)

update readings set humidity = 99.99 where location_id = 999;
select * from saved_rows order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 99.99 | t | UPDATE
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | f | UPDATE
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE
(6 rows)

truncate saved_rows;
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

-- This is not supported since it is possible to delete an entire
-- segment without executing the trigger.
\set ON_ERROR_STOP 0
create trigger save_delete_transition_table_trg
after delete on readings
referencing old table as old_table
for each statement execute function save_transition_table();
ERROR: DELETE triggers with transition tables not supported
\set ON_ERROR_STOP 1
-- Test that we get an error when enabling compression and have a
-- delete trigger with a transition table. We allow transition tables
-- for update and insert triggers.
create table test2(
created_at timestamptz not null unique,
location_id bigint,
temp float8
);
select create_hypertable('test2', by_range('created_at'));
create_hypertable
-------------------
(3,t)
(1 row)

create trigger save_test2_insert_trg
after insert on test2
referencing new table as new_table
for each statement execute function save_transition_table();
create trigger save_test2_update_trg
after update on test2
referencing new table as new_table old table as old_table
for each statement execute function save_transition_table();
create trigger save_test2_delete_trg
after delete on test2
referencing old table as old_table
for each statement execute function save_transition_table();
-- This should fail
\set ON_ERROR_STOP 0
alter table test2 set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id'
);
ERROR: DELETE triggers with transition tables not supported
\set ON_ERROR_STOP 1
-- drop the delete trigger
drop trigger save_test2_delete_trg on test2;
-- This should now succeed.
alter table test2 set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id'
);
Loading

0 comments on commit 9de9eec

Please sign in to comment.