Skip to content

Commit

Permalink
Updates to work with SingleStore 8.0 and 8.1
Browse files Browse the repository at this point in the history
Summary:
Previously, some tests were failing due to the 'data_conversion_compatibility_level' variable introduced in 8.0.24. In this diff we:
- fixed all failing tests
- changed/got rid of tests that were expecting different behavior from SingleStore's AUTO_INCREMENT logic (it only guarantees that auto-generated values are unique)
- made some changes to CI (switched from clusters to workspaces, removed 'data_conversion_compatibility_level' restriction, no more testing with SingleStore 7.5 and 7.6)

Test Plan: https://app.circleci.com/pipelines/github/memsql/SingleStoreNETConnector/326/workflows/1732c57c-af85-4673-ae2e-674a4b4d110c

Reviewers: pmishchenko-ua, adrian

Reviewed By: pmishchenko-ua

Subscribers: engineering-list

JIRA Issues: PLAT-6469

Differential Revision: https://grizzly.internal.memcompute.com/D66279
  • Loading branch information
okramarenko committed Jan 22, 2024
1 parent f744d6f commit 6cf6551
Show file tree
Hide file tree
Showing 14 changed files with 145 additions and 290 deletions.
3 changes: 2 additions & 1 deletion .circleci/SideBySide/config.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
{
"Data": {
"ConnectionString": "server=SINGLESTORE_HOST;user id=SQL_USER_NAME;password=SQL_USER_PASSWORD;port=3306;database=singlestoretest",
"UnsupportedFeatures": "CachingSha2Password,Ed25519,QueryAttributes,Tls11,Tls13,UuidToBin,UnixDomainSocket,Sha256Password,GlobalLog"
"UnsupportedFeatures": "CachingSha2Password,Ed25519,QueryAttributes,Tls11,Tls13,UuidToBin,UnixDomainSocket,Sha256Password,GlobalLog",
"ManagedService": true
}
}
21 changes: 6 additions & 15 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ parameters:
connector-version:
type: string
default: "1.1.4"
dotnet-version:
type: string
default: "7.0.404"

orbs:
win: circleci/[email protected]
Expand Down Expand Up @@ -50,12 +53,12 @@ jobs:
- run:
name: Build project binaries
command: |
choco upgrade dotnet-sdk
choco install dotnet-sdk --version=<< pipeline.parameters.dotnet-version >>
dotnet.exe build -c Release
- run:
name: Start SingleStore for SideBySide tests
command: |
pip install pymysql
pip install singlestoredb
python.exe .circleci\s2ms_cluster.py start singlestoretest
- run:
name: Fill test config
Expand Down Expand Up @@ -121,7 +124,7 @@ jobs:
- run:
name: Build project binaries
command: |
choco upgrade dotnet-sdk
choco install dotnet-sdk --version=<< pipeline.parameters.dotnet-version >>
dotnet.exe build
- run:
name: Creating CI Artifacts directory
Expand Down Expand Up @@ -156,18 +159,6 @@ workflows:
parameters:
singlestore_image:
- singlestore/cluster-in-a-box:alma-7.8.9-e94a66258d-4.0.7-1.13.9
- test-ubuntu:
name: Test 7.6 cluster-in-a-box
matrix:
parameters:
singlestore_image:
- singlestore/cluster-in-a-box:centos-7.6.9-7d7e13942a-4.0.3-1.13.4
- test-ubuntu:
name: Test 7.5 cluster-in-a-box
matrix:
parameters:
singlestore_image:
- singlestore/cluster-in-a-box:centos-7.5.12-3112a491c2-4.0.0-1.12.5
- test-windows:
name: Test S2MS on Windows
publish:
Expand Down
11 changes: 3 additions & 8 deletions .circleci/fill_test_config.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
import json
import os


CLUSTER_ID_FILE = "CLUSTER_ID"
HOSTNAME_TMPL = "svc-{}-ddl.aws-frankfurt-1.svc.singlestore.com"
from s2ms_cluster import WORKSPACE_ENDPOINT_FILE

NET_FRAMEWORKS = ["net462", "net472", "netcoreapp3.1", "net6.0", "net7.0"]

Expand All @@ -14,10 +11,8 @@
if home_dir is None:
home_dir = os.getenv("HOME")

with open(CLUSTER_ID_FILE, "r") as f:
cluster_id = f.read().strip()

hostname = HOSTNAME_TMPL.format(cluster_id)
with open(WORKSPACE_ENDPOINT_FILE, "r") as f:
hostname = f.read()
password = os.getenv("SQL_USER_PASSWORD")

with open("./.circleci/SideBySide/config.json", "r") as f_in:
Expand Down
146 changes: 63 additions & 83 deletions .circleci/s2ms_cluster.py
Original file line number Diff line number Diff line change
@@ -1,112 +1,90 @@
import json
import os
import pymysql
import requests
from requests.adapters import HTTPAdapter
import singlestoredb as s2
import uuid
import sys
from time import sleep
import time
from typing import Dict, Optional
from urllib3 import Retry

BASE_URL = "https://api.singlestore.com"
CLUSTERS_PATH = "/v0beta/clusters"

SQL_USER_PASSWORD = os.getenv("SQL_USER_PASSWORD") # project UI env-var reference
S2MS_API_KEY = os.getenv("S2MS_API_KEY") # project UI env-var reference

HEADERS = {
"Authorization": f"Bearer {S2MS_API_KEY}",
"Content-Type": "application/json",
"Accept": "application/json"
}
WORKSPACE_GROUP_BASE_NAME = ".NET-connector-ci-test-cluster"
WORKSPACE_NAME = "tests"

CLUSTER_NAME = ".NET-connector-ci-test-cluster"
AWS_EU_CENTRAL_REGION = "7e7ffd27-20f7-44b6-87e6-e72828a81ac7"
AUTO_TERMINATE_MINUTES = 60

PAYLOAD_FOR_CREATE = {
"name": CLUSTER_NAME,
"regionID": AWS_EU_CENTRAL_REGION,
"adminPassword": SQL_USER_PASSWORD,
"expiresAt": f"{AUTO_TERMINATE_MINUTES}m",
"firewallRanges": [
"0.0.0.0/0"
],
"size": "S-00"
}
HOSTNAME_TMPL = "svc-{}-ddl.aws-frankfurt-1.svc.singlestore.com"
CLUSTER_ID_FILE = "CLUSTER_ID"
WORKSPACE_ENDPOINT_FILE = "WORKSPACE_ENDPOINT_FILE"
WORKSPACE_GROUP_ID_FILE = "WORKSPACE_GROUP_ID_FILE"

TOTAL_RETRIES = 5
S2MS_REQUEST_TIMEOUT = 60


def request_with_retry(request_method, url, data=None, headers=HEADERS):
try:
with requests.Session() as s:
retries = Retry(
total=TOTAL_RETRIES,
backoff_factor=0.2,
status_forcelist=[500, 502, 503, 504])

s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
def retry(func):
for i in range(TOTAL_RETRIES):
try:
return func()
except Exception as e:
if i == TOTAL_RETRIES - 1:
raise
print(f"Attempt {i+1} failed with error: {e}.")

return s.request(request_method, url, data=data, headers=headers, timeout=S2MS_REQUEST_TIMEOUT)
except requests.exceptions.RequestException as e:
raise SystemExit(e)

def create_workspace(workspace_manager):
for reg in workspace_manager.regions:
if 'US' in reg.name:
region = reg
break

def create_cluster() -> str:
cl_id_response = request_with_retry("POST", BASE_URL + CLUSTERS_PATH, data=json.dumps(PAYLOAD_FOR_CREATE))
cl_id_response.raise_for_status()
return cl_id_response.json()["clusterID"]
w_group_name = WORKSPACE_GROUP_BASE_NAME + "-" + uuid.uuid4().hex
def create_workspace_group():
return workspace_manager.create_workspace_group(
name=w_group_name,
region=region.id,
firewall_ranges=["0.0.0.0/0"],
admin_password=SQL_USER_PASSWORD,
expires_at="60m"
)
workspace_group = retry(create_workspace_group)

with open(WORKSPACE_GROUP_ID_FILE, "w") as f:
f.write(workspace_group.id)
print("Created workspace group {}".format(w_group_name))

def get_cluster_info(cluster_id: str) -> Dict:
cl_id = request_with_retry("GET", BASE_URL + CLUSTERS_PATH + f"/{cluster_id}")
return cl_id.json()
workspace = workspace_group.create_workspace(name=WORKSPACE_NAME, size="S-00", wait_on_active=True, wait_timeout=600)

with open(WORKSPACE_ENDPOINT_FILE, "w") as f:
f.write(workspace.endpoint)

def is_cluster_active(cluster_id: str) -> bool:
cl_info = get_cluster_info(cluster_id)
return cl_info["state"] == "Active"
return workspace


def wait_start(cluster_id: str) -> None:
print(f"Waiting for cluster {cluster_id} to be available for connection..", end="", flush=True)
time_wait = 0
while (not is_cluster_active(cluster_id) and time_wait < 600):
print(".", end="", flush=True)
sleep(5)
time_wait += 5
if time_wait < 600:
print("\nCluster is active!")
else:
print(f"\nTimeout error: can't connect to {cluster_id} for more than 10 minutes!")
def terminate_workspace(workspace_manager) -> None:
with open(WORKSPACE_GROUP_ID_FILE, "r") as f:
workspace_group_id = f.read()
workspace_group = workspace_manager.get_workspace_group(workspace_group_id)

for workspace in workspace_group.workspaces:
workspace.terminate(wait_on_terminated=True)
workspace_group.terminate()

def terminate_cluster(cluster_id: str) -> None:
request_with_retry("DELETE", BASE_URL + CLUSTERS_PATH + f"/{cluster_id}")

def check_and_update_connection(create_db: Optional[str] = None):
with open(WORKSPACE_GROUP_ID_FILE, "r") as f:
workspace_group_id = f.read()
workspace_group = workspace_manager.get_workspace_group(workspace_group_id)
workspace = workspace_group.workspaces[0]

def check_connection(cluster_id: str, create_db: Optional[str] = None):
conn = pymysql.connect(
user="admin",
password=SQL_USER_PASSWORD,
host=HOSTNAME_TMPL.format(cluster_id),
port=3306)
def connect_to_workspace():
return workspace.connect(user="admin", password=SQL_USER_PASSWORD, port=3306)
conn = retry(connect_to_workspace)

cur = conn.cursor()
try:
cur.execute("SELECT NOW():>TEXT")
res = cur.fetchall()
print(f"Successfully connected to {cluster_id} at {res[0][0]}")
print(f"Successfully connected to {workspace.id} at {res[0][0]}")

if create_db is not None:
cur.execute(f"DROP DATABASE IF EXISTS {create_db}")
cur.execute(f"CREATE DATABASE {create_db}")
cur.execute("SET GLOBAL data_conversion_compatibility_level = '6.0'")
finally:
cur.close()
conn.close()
Expand All @@ -121,16 +99,18 @@ def check_connection(cluster_id: str, create_db: Optional[str] = None):
if len(sys.argv) > 2:
db_name = sys.argv[2]

workspace_manager = s2.manage_workspaces(access_token=S2MS_API_KEY)

if command == "start":
new_cl_id = create_cluster()
with open(CLUSTER_ID_FILE, "w") as f:
f.write(new_cl_id)
wait_start(new_cl_id)
check_connection(new_cl_id, db_name)
create_workspace(workspace_manager)
check_and_update_connection(db_name)
exit(0)

if command == "terminate":
with open(CLUSTER_ID_FILE, "r") as f:
cl_id = f.read()
terminate_cluster(cl_id)
terminate_workspace(workspace_manager)
exit(0)

if command == "update":
check_and_update_connection(db_name)
exit(0)

1 change: 0 additions & 1 deletion .circleci/setup_cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ singlestore-wait-start() {
sleep 0.2
done
mysql -u root -h 127.0.0.1 -P 3306 -p"${SQL_USER_PASSWORD}" -e "create database if not exists singlestoretest" >/dev/null 2>/dev/null
mysql -u root -h 127.0.0.1 -P 3306 -p"${SQL_USER_PASSWORD}" -e "set global data_conversion_compatibility_level='6.0'" >/dev/null 2>/dev/null

echo ". Success!"
}
Expand Down
1 change: 1 addition & 0 deletions src/SingleStoreConnector/Core/ServerVersions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,5 @@ internal static class S2Versions
{
public static readonly Version SupportsUtf8Mb4 = new(7, 5, 0);
public static readonly Version SupportsResetConnection = new(7, 5, 0);
public static readonly Version HasDataConversionCompatibilityLevelParameter = new(8, 0, 0);
}
2 changes: 2 additions & 0 deletions tests/SideBySide/AppConfig.cs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ private static IConfiguration BuildConfiguration()

public static bool SupportsJson => SupportedFeatures.HasFlag(ServerFeatures.Json);

public static bool ManagedService => Config.GetValue<bool>("Data:ManagedService");

public static string SingleStoreBulkLoaderCsvFile => Config.GetValue<string>("Data:SingleStoreBulkLoaderCsvFile");
public static string SingleStoreBulkLoaderLocalCsvFile => Config.GetValue<string>("Data:SingleStoreBulkLoaderLocalCsvFile");
public static string SingleStoreBulkLoaderTsvFile => Config.GetValue<string>("Data:SingleStoreBulkLoaderTsvFile");
Expand Down
17 changes: 14 additions & 3 deletions tests/SideBySide/BulkLoaderAsync.cs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
using SingleStoreConnector.Core;
using Xunit.Sdk;

namespace SideBySide;
Expand Down Expand Up @@ -586,9 +587,19 @@ public async Task BulkCopyDataTableWithWarnings()
{
DestinationTableName = "bulk_load_data_table",
};
var result = await bulkCopy.WriteToServerAsync(dataTable);
Assert.Equal(2, result.RowsInserted);
Assert.Empty(result.Warnings);

// Starting with version 8.0, SingleStore has 'data_conversion_compatibility_level' variable that controls the way
// certain data conversions are performed, so it won't allow the truncation of the data described in this test
if (connection.Session.S2ServerVersion.Version.CompareTo(S2Versions.HasDataConversionCompatibilityLevelParameter) >= 0)
{
await Assert.ThrowsAsync<SingleStoreException>(async () => await bulkCopy.WriteToServerAsync(dataTable));
}
else
{
var result = await bulkCopy.WriteToServerAsync(dataTable);
Assert.Equal(2, result.RowsInserted);
Assert.Empty(result.Warnings);
}

// SingleStore doesn't show warnings on data conversion in LOAD DATA
// Assert.Equal(2, result.Warnings.Count);
Expand Down
17 changes: 14 additions & 3 deletions tests/SideBySide/BulkLoaderSync.cs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
using SingleStoreConnector.Core;
using Xunit.Sdk;

namespace SideBySide;
Expand Down Expand Up @@ -1150,9 +1151,19 @@ public void BulkCopyDataTableWithWarnings()
{
DestinationTableName = "bulk_load_data_table",
};
var result = bulkCopy.WriteToServer(dataTable);
Assert.Equal(2, result.RowsInserted);
Assert.Empty(result.Warnings);

// Starting with version 8.0, SingleStore has 'data_conversion_compatibility_level' variable that controls the way
// certain data conversions are performed, so it won't allow the truncation of the data described in this test
if (connection.Session.S2ServerVersion.Version.CompareTo(S2Versions.HasDataConversionCompatibilityLevelParameter) >= 0)
{
Assert.Throws<SingleStoreException>(() => bulkCopy.WriteToServer(dataTable));
}
else
{
var result = bulkCopy.WriteToServer(dataTable);
Assert.Equal(2, result.RowsInserted);
Assert.Empty(result.Warnings);
}

// SingleStore doesn't show warnings on data conversion in LOAD DATA
// Assert.Equal(2, result.Warnings.Count);
Expand Down
12 changes: 0 additions & 12 deletions tests/SideBySide/DataAdapterTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,6 @@ public void Fill()
Assert.Single(ds.Tables);
Assert.Equal(3, ds.Tables[0].Rows.Count);

Assert.Equal(1L, ds.Tables[0].Rows[0]["id"]);
Assert.Equal(2L, ds.Tables[0].Rows[1]["id"]);
Assert.Equal(3L, ds.Tables[0].Rows[2]["id"]);

Assert.Equal(DBNull.Value, ds.Tables[0].Rows[0]["int_value"]);
Assert.Equal(0, ds.Tables[0].Rows[1]["int_value"]);
Assert.Equal(1, ds.Tables[0].Rows[2]["int_value"]);
Expand All @@ -105,10 +101,6 @@ public void LoadDataTable()

Assert.Equal(3, dt.Rows.Count);

Assert.Equal(1L, dt.Rows[0]["id"]);
Assert.Equal(2L, dt.Rows[1]["id"]);
Assert.Equal(3L, dt.Rows[2]["id"]);

Assert.Equal(DBNull.Value, dt.Rows[0]["int_value"]);
Assert.Equal(0, dt.Rows[1]["int_value"]);
Assert.Equal(1, dt.Rows[2]["int_value"]);
Expand Down Expand Up @@ -153,16 +145,12 @@ public void InsertWithDataSet()
using var cmd2 = new SingleStoreCommand("SELECT id, int_value, text_value FROM data_adapter ORDER BY id", m_connection);
using var dr2 = cmd2.ExecuteReader();
Assert.True(dr2.Read());
Assert.Equal(1L, dr2[0]);

Assert.True(dr2.Read());
Assert.Equal(2L, dr2[0]);

Assert.True(dr2.Read());
Assert.Equal(3L, dr2[0]);

Assert.True(dr2.Read());
Assert.Equal(4L, dr2[0]);
Assert.Equal(4, dr2[1]);
Assert.Equal("four", dr2[2]);
}
Expand Down
Loading

0 comments on commit 6cf6551

Please sign in to comment.