Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix unique index detection for compressed chunks #6454

Merged
merged 1 commit into from
Dec 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .unreleased/pr_6454
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fixes: #6454 Fix unique expression indexes on compressed chunks
Thanks: @aarondglover for reporting an issue with unique expression indexes on compressed chunks
29 changes: 26 additions & 3 deletions src/indexing.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,10 +274,33 @@ indexing_create_and_verify_hypertable_indexes(const Hypertable *ht, bool create_
bool TSDLLEXPORT
ts_indexing_relation_has_primary_or_unique_index(Relation htrel)
{
Bitmapset *key_attrs = RelationGetIndexAttrBitmap(htrel, INDEX_ATTR_BITMAP_KEY);
bool result = bms_num_members(key_attrs) > 0;
List *indexoidlist = RelationGetIndexList(htrel);
ListCell *lc;
bool result = false;

if (OidIsValid(htrel->rd_pkindex))
return true;

foreach (lc, indexoidlist)
{
Oid indexoid = lfirst_oid(lc);
HeapTuple index_tuple;
Form_pg_index index;

index_tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexoid));
if (!HeapTupleIsValid(index_tuple)) /* should not happen */
elog(ERROR,
"cache lookup failed for index %u in \"%s\" ",
indexoid,
RelationGetRelationName(htrel));
index = (Form_pg_index) GETSTRUCT(index_tuple);
result = index->indisunique;
ReleaseSysCache(index_tuple);
if (result)
break;
}

bms_free(key_attrs);
list_free(indexoidlist);
return result;
}

Expand Down
33 changes: 33 additions & 0 deletions tsl/test/shared/expected/compress_unique_index.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- test unique expression indexes
CREATE TABLE compress_unique(offset_timestamp timestamptz not null, meter_id text, meter_channel_id text, timestamp timestamptz);
SELECT table_name FROM create_hypertable('compress_unique','offset_timestamp');
table_name
compress_unique
(1 row)

CREATE UNIQUE INDEX uniq_expr ON compress_unique USING btree (lower((meter_id)::text), meter_channel_id, offset_timestamp, "timestamp");
ALTER TABLE compress_unique SET (timescaledb.compress,timescaledb.compress_segmentby='meter_id,meter_channel_id');
WARNING: column "timestamp" should be used for segmenting or ordering
INSERT INTO compress_unique VALUES ('2000-01-01','m1','c1','2000-01-01');
INSERT INTO compress_unique VALUES ('2000-01-01','m1','c2','2000-01-01');
SELECT compress_chunk(show_chunks('compress_unique')) IS NOT NULL AS compress;
compress
t
(1 row)

-- should fail
\set ON_ERROR_STOP 0
INSERT INTO compress_unique VALUES ('2000-01-01','m1','c2','2000-01-01');
ERROR: duplicate key value violates unique constraint "_hyper_X_X_chunk_uniq_expr"
\set ON_ERROR_STOP 1
SELECT * FROM compress_unique ORDER BY compress_unique;
offset_timestamp | meter_id | meter_channel_id | timestamp
------------------------------+----------+------------------+------------------------------
Sat Jan 01 00:00:00 2000 PST | m1 | c1 | Sat Jan 01 00:00:00 2000 PST
Sat Jan 01 00:00:00 2000 PST | m1 | c2 | Sat Jan 01 00:00:00 2000 PST
(2 rows)

DROP TABLE compress_unique;
1 change: 1 addition & 0 deletions tsl/test/shared/sql/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ set(TEST_FILES_SHARED
cagg_compression.sql
classify_relation.sql
compat.sql
compress_unique_index.sql
constify_timestamptz_op_interval.sql
constraint_aware_append.sql
constraint_exclusion_prepared.sql
Expand Down
26 changes: 26 additions & 0 deletions tsl/test/shared/sql/compress_unique_index.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.

-- test unique expression indexes

CREATE TABLE compress_unique(offset_timestamp timestamptz not null, meter_id text, meter_channel_id text, timestamp timestamptz);
SELECT table_name FROM create_hypertable('compress_unique','offset_timestamp');

CREATE UNIQUE INDEX uniq_expr ON compress_unique USING btree (lower((meter_id)::text), meter_channel_id, offset_timestamp, "timestamp");
ALTER TABLE compress_unique SET (timescaledb.compress,timescaledb.compress_segmentby='meter_id,meter_channel_id');

INSERT INTO compress_unique VALUES ('2000-01-01','m1','c1','2000-01-01');
INSERT INTO compress_unique VALUES ('2000-01-01','m1','c2','2000-01-01');

SELECT compress_chunk(show_chunks('compress_unique')) IS NOT NULL AS compress;

-- should fail
\set ON_ERROR_STOP 0
INSERT INTO compress_unique VALUES ('2000-01-01','m1','c2','2000-01-01');
\set ON_ERROR_STOP 1

SELECT * FROM compress_unique ORDER BY compress_unique;

DROP TABLE compress_unique;

Loading