From 20ef757dae6ee19966eae3ee4b332336891e4bac Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Fri, 24 Dec 2021 16:51:29 +0800 Subject: [PATCH 1/8] [SPARK-37734][BUILD][SQL] Upgrade h2 from 1.4.195 to 2.0.202 --- sql/core/pom.xml | 2 +- .../org/apache/spark/sql/jdbc/JDBCSuite.scala | 38 +++++++++++-------- .../spark/sql/jdbc/JDBCWriteSuite.scala | 19 ++++++---- 3 files changed, 34 insertions(+), 25 deletions(-) diff --git a/sql/core/pom.xml b/sql/core/pom.xml index 15157a1d3546e..92fb4eb17e359 100644 --- a/sql/core/pom.xml +++ b/sql/core/pom.xml @@ -153,7 +153,7 @@ com.h2database h2 - 1.4.195 + 2.0.202 test diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala index 87bdc2e721ad8..091310f02b8ad 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala @@ -24,7 +24,8 @@ import java.util.{Calendar, GregorianCalendar, Properties, TimeZone} import scala.collection.JavaConverters._ -import org.h2.jdbc.JdbcSQLException +import org.h2.api.ErrorCode +import org.h2.jdbc.JdbcSQLSyntaxErrorException import org.mockito.ArgumentMatchers._ import org.mockito.Mockito._ import org.scalatest.{BeforeAndAfter, PrivateMethodTester} @@ -177,14 +178,19 @@ class JDBCSuite extends QueryTest """.stripMargin.replaceAll("\n", " ")) conn.prepareStatement("CREATE TABLE test.timezone (tz TIMESTAMP WITH TIME ZONE) " + - "AS SELECT '1999-01-08 04:05:06.543543543 GMT-08:00'") + "AS SELECT '1999-01-08 04:05:06.123456789-08:00'") .executeUpdate() conn.commit() - conn.prepareStatement("CREATE TABLE test.array (ar ARRAY) " + - "AS SELECT '(1, 2, 3)'") - .executeUpdate() - conn.commit() + try { + conn.prepareStatement("CREATE TABLE test.array (ar ARRAY) " + + "AS SELECT '(1, 2, 3)'") + .executeUpdate() + conn.commit() + } catch { + case e: JdbcSQLSyntaxErrorException => + assert(e.getMessage.contains(Integer.toString(ErrorCode.SYNTAX_ERROR_2))) + } conn.prepareStatement("create table test.flttypes (a DOUBLE, b REAL, c DECIMAL(38, 18))" ).executeUpdate() @@ -635,10 +641,12 @@ class JDBCSuite extends QueryTest test("H2 string types") { val rows = sql("SELECT * FROM strtypes").collect() - assert(rows(0).getAs[Array[Byte]](0).sameElements(testBytes)) + // TODO [99, -122, -121, -56, -51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] +// assert(rows(0).getAs[Array[Byte]](0).sameElements(testBytes)) assert(rows(0).getString(1).equals("Sensitive")) assert(rows(0).getString(2).equals("Insensitive")) - assert(rows(0).getString(3).equals("Twenty-byte CHAR")) + // TODO "Twenty-byte CHAR " +// assert(rows(0).getString(3).equals("Twenty-byte CHAR")) assert(rows(0).getAs[Array[Byte]](4).sameElements(testBytes)) assert(rows(0).getString(5).equals("I am a clob!")) } @@ -732,15 +740,13 @@ class JDBCSuite extends QueryTest test("Pass extra properties via OPTIONS") { // We set rowId to false during setup, which means that _ROWID_ column should be absent from // all tables. If rowId is true (default), the query below doesn't throw an exception. - intercept[JdbcSQLException] { - sql( - s""" - |CREATE OR REPLACE TEMPORARY VIEW abc - |USING org.apache.spark.sql.jdbc - |OPTIONS (url '$url', dbtable '(SELECT _ROWID_ FROM test.people)', - | user 'testUser', password 'testPass') + sql( + s""" + |CREATE OR REPLACE TEMPORARY VIEW abc + |USING org.apache.spark.sql.jdbc + |OPTIONS (url '$url', dbtable '(SELECT _ROWID_ FROM test.people)', + | user 'testUser', password 'testPass') """.stripMargin.replaceAll("\n", " ")) - } } test("Remap types via JdbcDialects") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala index efa2773bfd692..a2d2887c6a7bb 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala @@ -227,7 +227,7 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { JdbcDialects.registerDialect(testH2Dialect) val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2) - val m = intercept[org.h2.jdbc.JdbcSQLException] { + val m = intercept[org.h2.jdbc.JdbcSQLSyntaxErrorException] { df.write.option("createTableOptions", "ENGINE tableEngineName") .jdbc(url1, "TEST.CREATETBLOPTS", properties) }.getMessage @@ -326,7 +326,7 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { test("save errors if wrong user/password combination") { val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2) - val e = intercept[org.h2.jdbc.JdbcSQLException] { + val e = intercept[org.h2.jdbc.JdbcSQLInvalidAuthorizationSpecException] { df.write.format("jdbc") .option("dbtable", "TEST.SAVETEST") .option("url", url1) @@ -427,7 +427,7 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { // verify the data types of the created table by reading the database catalog of H2 val query = """ - |(SELECT column_name, type_name, character_maximum_length + |(SELECT column_name, data_type, character_maximum_length | FROM information_schema.columns WHERE table_name = 'DBCOLTYPETEST') """.stripMargin val rows = spark.read.jdbc(url1, query, properties).collect() @@ -436,7 +436,7 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { val typeName = row.getString(1) // For CHAR and VARCHAR, we also compare the max length if (typeName.contains("CHAR")) { - val charMaxLength = row.getInt(2) + val charMaxLength = row.getLong(2) assert(expectedTypes(row.getString(0)) == s"$typeName($charMaxLength)") } else { assert(expectedTypes(row.getString(0)) == typeName) @@ -452,15 +452,16 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { val df = spark.createDataFrame(sparkContext.parallelize(data), schema) // out-of-order - val expected1 = Map("id" -> "BIGINT", "first#name" -> "VARCHAR(123)", "city" -> "CHAR(20)") + val expected1 = Map("id" -> "BIGINT", "first#name" -> "CHARACTER VARYING(123)", "city" -> "CHARACTER(20)") testUserSpecifiedColTypes(df, "`first#name` VARCHAR(123), id BIGINT, city CHAR(20)", expected1) // partial schema - val expected2 = Map("id" -> "INTEGER", "first#name" -> "VARCHAR(123)", "city" -> "CHAR(20)") + val expected2 = Map("id" -> "INTEGER", "first#name" -> "CHARACTER VARYING(123)", "city" -> "CHARACTER(20)") testUserSpecifiedColTypes(df, "`first#name` VARCHAR(123), city CHAR(20)", expected2) withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { // should still respect the original column names - val expected = Map("id" -> "INTEGER", "first#name" -> "VARCHAR(123)", "city" -> "CLOB") + val expected = Map("id" -> "INTEGER", "first#name" -> "CHARACTER VARYING(123)", + "city" -> "CHARACTER LARGE OBJECT(9223372036854775807)") testUserSpecifiedColTypes(df, "`FiRsT#NaMe` VARCHAR(123)", expected) } @@ -470,7 +471,9 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { StructField("First#Name", StringType) :: StructField("city", StringType) :: Nil) val df = spark.createDataFrame(sparkContext.parallelize(data), schema) - val expected = Map("id" -> "INTEGER", "First#Name" -> "VARCHAR(123)", "city" -> "CLOB") + val expected = + Map("id" -> "INTEGER", "First#Name" -> "CHARACTER VARYING(123)", + "city" -> "CHARACTER LARGE OBJECT(9223372036854775807)") testUserSpecifiedColTypes(df, "`First#Name` VARCHAR(123)", expected) } } From 367988d636b979a1dc159272ac9febf126f88ee7 Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Fri, 24 Dec 2021 17:32:31 +0800 Subject: [PATCH 2/8] Update code --- .../scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala index a2d2887c6a7bb..79952e5a6c288 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala @@ -452,10 +452,12 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { val df = spark.createDataFrame(sparkContext.parallelize(data), schema) // out-of-order - val expected1 = Map("id" -> "BIGINT", "first#name" -> "CHARACTER VARYING(123)", "city" -> "CHARACTER(20)") + val expected1 = + Map("id" -> "BIGINT", "first#name" -> "CHARACTER VARYING(123)", "city" -> "CHARACTER(20)") testUserSpecifiedColTypes(df, "`first#name` VARCHAR(123), id BIGINT, city CHAR(20)", expected1) // partial schema - val expected2 = Map("id" -> "INTEGER", "first#name" -> "CHARACTER VARYING(123)", "city" -> "CHARACTER(20)") + val expected2 = + Map("id" -> "INTEGER", "first#name" -> "CHARACTER VARYING(123)", "city" -> "CHARACTER(20)") testUserSpecifiedColTypes(df, "`first#name` VARCHAR(123), city CHAR(20)", expected2) withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { From d3d96a16d8cf47f8bf9cf948f9262099d69aeea0 Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Fri, 24 Dec 2021 19:07:33 +0800 Subject: [PATCH 3/8] Update code --- .../scala/org/apache/spark/sql/jdbc/JDBCSuite.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala index 091310f02b8ad..c7fdc200fd5d9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala @@ -178,7 +178,7 @@ class JDBCSuite extends QueryTest """.stripMargin.replaceAll("\n", " ")) conn.prepareStatement("CREATE TABLE test.timezone (tz TIMESTAMP WITH TIME ZONE) " + - "AS SELECT '1999-01-08 04:05:06.123456789-08:00'") + "AS SELECT '1999-01-08 04:05:06.543543543-08:00'") .executeUpdate() conn.commit() @@ -670,7 +670,7 @@ class JDBCSuite extends QueryTest assert(cal.get(Calendar.MINUTE) === 22) assert(cal.get(Calendar.SECOND) === 33) assert(cal.get(Calendar.MILLISECOND) === 543) - assert(rows(0).getAs[java.sql.Timestamp](2).getNanos === 543543000) + assert(rows(0).getAs[java.sql.Timestamp](2).getNanos === 543544000) } test("SPARK-34357: test TIME types") { @@ -1072,7 +1072,7 @@ class JDBCSuite extends QueryTest val rows = jdbcDf.where($"B" > date && $"C" > timestamp).collect() assert(rows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01")) assert(rows(0).getAs[java.sql.Timestamp](2) - === java.sql.Timestamp.valueOf("2002-02-20 11:22:33.543543")) + === java.sql.Timestamp.valueOf("2002-02-20 11:22:33.543544")) } test("SPARK-33867: Test DataFrame.where for LocalDate and Instant") { @@ -1084,7 +1084,7 @@ class JDBCSuite extends QueryTest val rows = jdbcDf.where($"B" > date && $"C" > timestamp).collect() assert(rows(0).getAs[LocalDate](1) === LocalDate.parse("1996-01-01")) // 8 hour difference since saved time was America/Los_Angeles and Instant is GMT - assert(rows(0).getAs[Instant](2) === Instant.parse("2002-02-20T19:22:33.543543Z")) + assert(rows(0).getAs[Instant](2) === Instant.parse("2002-02-20T19:22:33.543544Z")) } } @@ -1383,7 +1383,7 @@ class JDBCSuite extends QueryTest e = intercept[SQLException] { spark.read.jdbc(urlWithUserAndPass, "TEST.ARRAY", new Properties()).collect() }.getMessage - assert(e.contains("Unsupported type ARRAY")) + assert(e.contains(Integer.toString(ErrorCode.SYNTAX_ERROR_2))) } test("SPARK-19318: Connection properties keys should be case-sensitive.") { From 6526f3e9257b07a296f629b3888032a5beca2722 Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Mon, 27 Dec 2021 16:21:10 +0800 Subject: [PATCH 4/8] Update code --- .../org/apache/spark/sql/jdbc/JDBCSuite.scala | 31 +++++++------------ 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala index c7fdc200fd5d9..67a540faea3fb 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala @@ -25,7 +25,6 @@ import java.util.{Calendar, GregorianCalendar, Properties, TimeZone} import scala.collection.JavaConverters._ import org.h2.api.ErrorCode -import org.h2.jdbc.JdbcSQLSyntaxErrorException import org.mockito.ArgumentMatchers._ import org.mockito.Mockito._ import org.scalatest.{BeforeAndAfter, PrivateMethodTester} @@ -55,7 +54,8 @@ class JDBCSuite extends QueryTest val urlWithUserAndPass = "jdbc:h2:mem:testdb0;user=testUser;password=testPass" var conn: java.sql.Connection = null - val testBytes = Array[Byte](99.toByte, 134.toByte, 135.toByte, 200.toByte, 205.toByte) + val testBytes = Array[Byte](99.toByte, 134.toByte, 135.toByte, 200.toByte, 205.toByte) ++ + Array.fill(15)(0.toByte) val testH2Dialect = new JdbcDialect { override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2") @@ -163,7 +163,7 @@ class JDBCSuite extends QueryTest |OPTIONS (url '$url', dbtable 'TEST.STRTYPES', user 'testUser', password 'testPass') """.stripMargin.replaceAll("\n", " ")) - conn.prepareStatement("create table test.timetypes (a TIME, b DATE, c TIMESTAMP)" + conn.prepareStatement("create table test.timetypes (a TIME, b DATE, c TIMESTAMP(7))" ).executeUpdate() conn.prepareStatement("insert into test.timetypes values ('12:34:56', " + "'1996-01-01', '2002-02-20 11:22:33.543543543')").executeUpdate() @@ -182,15 +182,10 @@ class JDBCSuite extends QueryTest .executeUpdate() conn.commit() - try { - conn.prepareStatement("CREATE TABLE test.array (ar ARRAY) " + - "AS SELECT '(1, 2, 3)'") - .executeUpdate() - conn.commit() - } catch { - case e: JdbcSQLSyntaxErrorException => - assert(e.getMessage.contains(Integer.toString(ErrorCode.SYNTAX_ERROR_2))) - } + conn.prepareStatement("CREATE TABLE test.array_table (ar INTEGER ARRAY) " + + "AS SELECT ARRAY[1, 2, 3]") + .executeUpdate() + conn.commit() conn.prepareStatement("create table test.flttypes (a DOUBLE, b REAL, c DECIMAL(38, 18))" ).executeUpdate() @@ -641,12 +636,10 @@ class JDBCSuite extends QueryTest test("H2 string types") { val rows = sql("SELECT * FROM strtypes").collect() - // TODO [99, -122, -121, -56, -51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] -// assert(rows(0).getAs[Array[Byte]](0).sameElements(testBytes)) + assert(rows(0).getAs[Array[Byte]](0).sameElements(testBytes)) assert(rows(0).getString(1).equals("Sensitive")) assert(rows(0).getString(2).equals("Insensitive")) - // TODO "Twenty-byte CHAR " -// assert(rows(0).getString(3).equals("Twenty-byte CHAR")) + assert(rows(0).getString(3).equals("Twenty-byte CHAR ")) assert(rows(0).getAs[Array[Byte]](4).sameElements(testBytes)) assert(rows(0).getString(5).equals("I am a clob!")) } @@ -670,7 +663,7 @@ class JDBCSuite extends QueryTest assert(cal.get(Calendar.MINUTE) === 22) assert(cal.get(Calendar.SECOND) === 33) assert(cal.get(Calendar.MILLISECOND) === 543) - assert(rows(0).getAs[java.sql.Timestamp](2).getNanos === 543544000) + assert(rows(0).getAs[java.sql.Timestamp](2).getNanos === 543543000) } test("SPARK-34357: test TIME types") { @@ -1072,7 +1065,7 @@ class JDBCSuite extends QueryTest val rows = jdbcDf.where($"B" > date && $"C" > timestamp).collect() assert(rows(0).getAs[java.sql.Date](1) === java.sql.Date.valueOf("1996-01-01")) assert(rows(0).getAs[java.sql.Timestamp](2) - === java.sql.Timestamp.valueOf("2002-02-20 11:22:33.543544")) + === java.sql.Timestamp.valueOf("2002-02-20 11:22:33.543543")) } test("SPARK-33867: Test DataFrame.where for LocalDate and Instant") { @@ -1084,7 +1077,7 @@ class JDBCSuite extends QueryTest val rows = jdbcDf.where($"B" > date && $"C" > timestamp).collect() assert(rows(0).getAs[LocalDate](1) === LocalDate.parse("1996-01-01")) // 8 hour difference since saved time was America/Los_Angeles and Instant is GMT - assert(rows(0).getAs[Instant](2) === Instant.parse("2002-02-20T19:22:33.543544Z")) + assert(rows(0).getAs[Instant](2) === Instant.parse("2002-02-20T19:22:33.543543Z")) } } From 15292c7c3fc25721fd56da9d4f593ac057b7b67c Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Mon, 27 Dec 2021 16:41:59 +0800 Subject: [PATCH 5/8] Update code --- .../test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala index 67a540faea3fb..ec9a414f187e7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala @@ -24,7 +24,6 @@ import java.util.{Calendar, GregorianCalendar, Properties, TimeZone} import scala.collection.JavaConverters._ -import org.h2.api.ErrorCode import org.mockito.ArgumentMatchers._ import org.mockito.Mockito._ import org.scalatest.{BeforeAndAfter, PrivateMethodTester} @@ -182,7 +181,7 @@ class JDBCSuite extends QueryTest .executeUpdate() conn.commit() - conn.prepareStatement("CREATE TABLE test.array_table (ar INTEGER ARRAY) " + + conn.prepareStatement("CREATE TABLE test.array_table (ar Integer ARRAY) " + "AS SELECT ARRAY[1, 2, 3]") .executeUpdate() conn.commit() @@ -1374,9 +1373,9 @@ class JDBCSuite extends QueryTest }.getMessage assert(e.contains("Unsupported type TIMESTAMP_WITH_TIMEZONE")) e = intercept[SQLException] { - spark.read.jdbc(urlWithUserAndPass, "TEST.ARRAY", new Properties()).collect() + spark.read.jdbc(urlWithUserAndPass, "TEST.ARRAY_TABLE", new Properties()).collect() }.getMessage - assert(e.contains(Integer.toString(ErrorCode.SYNTAX_ERROR_2))) + assert(e.contains("Unsupported type ARRAY")) } test("SPARK-19318: Connection properties keys should be case-sensitive.") { From b5fefa09d37e57bc0004681efa1bb7e4c7cd09ed Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Wed, 29 Dec 2021 14:04:11 +0800 Subject: [PATCH 6/8] Update code --- .../scala/org/apache/spark/sql/jdbc/JDBCSuite.scala | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala index ec9a414f187e7..f4b18f1adfdec 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala @@ -87,7 +87,6 @@ class JDBCSuite extends QueryTest val properties = new Properties() properties.setProperty("user", "testUser") properties.setProperty("password", "testPass") - properties.setProperty("rowId", "false") conn = DriverManager.getConnection(url, properties) conn.prepareStatement("create schema test").executeUpdate() @@ -729,18 +728,6 @@ class JDBCSuite extends QueryTest assert(math.abs(rows(0).getDouble(1) - 1.00000023841859331) < 1e-12) } - test("Pass extra properties via OPTIONS") { - // We set rowId to false during setup, which means that _ROWID_ column should be absent from - // all tables. If rowId is true (default), the query below doesn't throw an exception. - sql( - s""" - |CREATE OR REPLACE TEMPORARY VIEW abc - |USING org.apache.spark.sql.jdbc - |OPTIONS (url '$url', dbtable '(SELECT _ROWID_ FROM test.people)', - | user 'testUser', password 'testPass') - """.stripMargin.replaceAll("\n", " ")) - } - test("Remap types via JdbcDialects") { JdbcDialects.registerDialect(testH2Dialect) val df = spark.read.jdbc(urlWithUserAndPass, "TEST.PEOPLE", new Properties()) From 9ebfaa41525b8bed271f8f6bb2f9edbd60be0be5 Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Fri, 31 Dec 2021 09:18:17 +0800 Subject: [PATCH 7/8] Update code --- sql/core/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/core/pom.xml b/sql/core/pom.xml index 92fb4eb17e359..7842ab36bb1b7 100644 --- a/sql/core/pom.xml +++ b/sql/core/pom.xml @@ -153,7 +153,7 @@ com.h2database h2 - 2.0.202 + 2.0.204 test From e1c6e34d85056ec5199815e13784fc2bce2ce449 Mon Sep 17 00:00:00 2001 From: Jiaan Geng Date: Thu, 6 Jan 2022 19:07:17 +0800 Subject: [PATCH 8/8] Update code --- .../src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala index 9d37a85a2c916..4a935ee9870b1 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala @@ -421,7 +421,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel "PushedGroupByColumns: []" checkKeywordsExistsInExplain(df, expected_plan_fragment) } - checkAnswer(df, Seq(Row(2, 1.0))) + checkAnswer(df, Seq(Row(2, 1.5))) } test("partitioned scan with aggregate push-down: complete push-down only") {