-
Notifications
You must be signed in to change notification settings - Fork 901
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add error for DELETE triggers with transition tables
DELETE triggers cannot be supported for compressed tables not using the hypercore table access method since it can delete an entire compressed segment and a transition table is not built for this case, so generating an error for this case. This commit also adds a bunch of tests for triggers with transition tables for normal compressed tables and extend the hypercore trigger tests.
- Loading branch information
Showing
9 changed files
with
496 additions
and
15 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,193 @@ | ||
-- This file and its contents are licensed under the Timescale License. | ||
-- Please see the included NOTICE for copyright information and | ||
-- LICENSE-TIMESCALE for a copy of the license. | ||
-- This is copied from hypercore_trigger.sql | ||
set client_min_messages to warning; | ||
create table readings( | ||
metric_id serial, | ||
created_at timestamptz not null unique, | ||
location_id smallint, --segmentby attribute with index | ||
owner_id bigint, --segmentby attribute without index | ||
device_id bigint, --non-segmentby attribute | ||
temp float8, | ||
humidity float4 | ||
); | ||
select create_hypertable('readings', by_range('created_at')); | ||
create_hypertable | ||
------------------- | ||
(1,t) | ||
(1 row) | ||
|
||
select setseed(1); | ||
setseed | ||
--------- | ||
|
||
(1 row) | ||
|
||
insert into readings(created_at, location_id, device_id, owner_id, temp, humidity) | ||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100 | ||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t; | ||
alter table readings set ( | ||
timescaledb.compress, | ||
timescaledb.compress_orderby = 'created_at', | ||
timescaledb.compress_segmentby = 'location_id, owner_id' | ||
); | ||
select compress_chunk(show_chunks('readings')); | ||
compress_chunk | ||
---------------------------------------- | ||
_timescaledb_internal._hyper_1_1_chunk | ||
_timescaledb_internal._hyper_1_2_chunk | ||
_timescaledb_internal._hyper_1_3_chunk | ||
_timescaledb_internal._hyper_1_4_chunk | ||
_timescaledb_internal._hyper_1_5_chunk | ||
_timescaledb_internal._hyper_1_6_chunk | ||
(6 rows) | ||
|
||
create table saved_rows (like readings, new_row bool not null, kind text); | ||
create function save_transition_table() returns trigger as $$ | ||
begin | ||
case tg_op | ||
when 'INSERT' then | ||
insert into saved_rows select n.*, true, tg_op from new_table n; | ||
when 'DELETE' then | ||
insert into saved_rows select o.*, false, tg_op from old_table o; | ||
when 'UPDATE' then | ||
insert into saved_rows select n.*, true, tg_op from new_table n; | ||
insert into saved_rows select o.*, false, tg_op from old_table o; | ||
end case; | ||
return null; | ||
end; | ||
$$ language plpgsql; | ||
create trigger save_insert_transition_table_trg | ||
after insert on readings | ||
referencing new table as new_table | ||
for each statement execute function save_transition_table(); | ||
insert into readings(created_at, location_id, device_id, owner_id, temp, humidity) | ||
values ('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14), | ||
('2022-06-01 00:02:23', 999, 666, 112, 3.14, 3.14); | ||
select * from saved_rows order by metric_id; | ||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind | ||
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+-------- | ||
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT | ||
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | t | INSERT | ||
(2 rows) | ||
|
||
truncate saved_rows; | ||
select compress_chunk(show_chunks('readings')); | ||
compress_chunk | ||
---------------------------------------- | ||
_timescaledb_internal._hyper_1_1_chunk | ||
_timescaledb_internal._hyper_1_2_chunk | ||
_timescaledb_internal._hyper_1_3_chunk | ||
_timescaledb_internal._hyper_1_4_chunk | ||
_timescaledb_internal._hyper_1_5_chunk | ||
_timescaledb_internal._hyper_1_6_chunk | ||
(6 rows) | ||
|
||
copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv); | ||
select * from saved_rows order by metric_id; | ||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind | ||
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+-------- | ||
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT | ||
(1 row) | ||
|
||
truncate saved_rows; | ||
select compress_chunk(show_chunks('readings')); | ||
compress_chunk | ||
---------------------------------------- | ||
_timescaledb_internal._hyper_1_1_chunk | ||
_timescaledb_internal._hyper_1_2_chunk | ||
_timescaledb_internal._hyper_1_3_chunk | ||
_timescaledb_internal._hyper_1_4_chunk | ||
_timescaledb_internal._hyper_1_5_chunk | ||
_timescaledb_internal._hyper_1_6_chunk | ||
(6 rows) | ||
|
||
create trigger save_update_transition_table_trg | ||
after update on readings | ||
referencing new table as new_table old table as old_table | ||
for each statement execute function save_transition_table(); | ||
select * from readings where location_id = 999 order by metric_id; | ||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | ||
-----------+------------------------------+-------------+----------+-----------+------+---------- | ||
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | ||
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | ||
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | ||
(3 rows) | ||
|
||
update readings set humidity = 99.99 where location_id = 999; | ||
select * from saved_rows order by metric_id; | ||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind | ||
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+-------- | ||
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE | ||
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE | ||
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 99.99 | t | UPDATE | ||
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | f | UPDATE | ||
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE | ||
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE | ||
(6 rows) | ||
|
||
truncate saved_rows; | ||
select compress_chunk(show_chunks('readings')); | ||
compress_chunk | ||
---------------------------------------- | ||
_timescaledb_internal._hyper_1_1_chunk | ||
_timescaledb_internal._hyper_1_2_chunk | ||
_timescaledb_internal._hyper_1_3_chunk | ||
_timescaledb_internal._hyper_1_4_chunk | ||
_timescaledb_internal._hyper_1_5_chunk | ||
_timescaledb_internal._hyper_1_6_chunk | ||
(6 rows) | ||
|
||
-- This is not supported since it is possible to delete an entire | ||
-- segment without executing the trigger. | ||
\set ON_ERROR_STOP 0 | ||
create trigger save_delete_transition_table_trg | ||
after delete on readings | ||
referencing old table as old_table | ||
for each statement execute function save_transition_table(); | ||
ERROR: DELETE triggers with transition tables not supported | ||
\set ON_ERROR_STOP 1 | ||
-- Test that we get an error when enabling compression and have a | ||
-- delete trigger with a transition table. We allow transition tables | ||
-- for update and insert triggers. | ||
create table test2( | ||
created_at timestamptz not null unique, | ||
location_id bigint, | ||
temp float8 | ||
); | ||
select create_hypertable('test2', by_range('created_at')); | ||
create_hypertable | ||
------------------- | ||
(3,t) | ||
(1 row) | ||
|
||
create trigger save_test2_insert_trg | ||
after insert on test2 | ||
referencing new table as new_table | ||
for each statement execute function save_transition_table(); | ||
create trigger save_test2_update_trg | ||
after update on test2 | ||
referencing new table as new_table old table as old_table | ||
for each statement execute function save_transition_table(); | ||
create trigger save_test2_delete_trg | ||
after delete on test2 | ||
referencing old table as old_table | ||
for each statement execute function save_transition_table(); | ||
-- This should fail | ||
\set ON_ERROR_STOP 0 | ||
alter table test2 set ( | ||
timescaledb.compress, | ||
timescaledb.compress_orderby = 'created_at', | ||
timescaledb.compress_segmentby = 'location_id' | ||
); | ||
ERROR: DELETE triggers with transition tables not supported | ||
\set ON_ERROR_STOP 1 | ||
-- drop the delete trigger | ||
drop trigger save_test2_delete_trg on test2; | ||
-- This should now succeed. | ||
alter table test2 set ( | ||
timescaledb.compress, | ||
timescaledb.compress_orderby = 'created_at', | ||
timescaledb.compress_segmentby = 'location_id' | ||
); |
Oops, something went wrong.