From 54012bff3a7fdf8d766f85769ed8dd13401ca9ae Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 09:16:24 -0700
Subject: [PATCH 01/11] Enable lint checking for files in db_engine_spec that
 have few to no lint issues

---
 superset/db_engine_specs/athena.py     | 1 -
 superset/db_engine_specs/clickhouse.py | 3 +--
 superset/db_engine_specs/db2.py        | 2 +-
 superset/db_engine_specs/drill.py      | 1 -
 superset/db_engine_specs/druid.py      | 1 -
 superset/db_engine_specs/gsheets.py    | 1 -
 superset/db_engine_specs/impala.py     | 1 -
 superset/db_engine_specs/kylin.py      | 3 +--
 superset/db_engine_specs/mssql.py      | 1 -
 superset/db_engine_specs/oracle.py     | 1 -
 superset/db_engine_specs/postgres.py   | 3 +--
 superset/db_engine_specs/redshift.py   | 1 -
 superset/db_engine_specs/snowflake.py  | 1 -
 superset/db_engine_specs/sqlite.py     | 3 +--
 superset/db_engine_specs/teradata.py   | 1 -
 superset/db_engine_specs/vertica.py    | 1 -
 16 files changed, 5 insertions(+), 20 deletions(-)

diff --git a/superset/db_engine_specs/athena.py b/superset/db_engine_specs/athena.py
index e516664b76a4c..8213bdb43acde 100644
--- a/superset/db_engine_specs/athena.py
+++ b/superset/db_engine_specs/athena.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 
 from superset.db_engine_specs.base import BaseEngineSpec
diff --git a/superset/db_engine_specs/clickhouse.py b/superset/db_engine_specs/clickhouse.py
index e72f8752ea7b0..e5bfdbf609aaf 100644
--- a/superset/db_engine_specs/clickhouse.py
+++ b/superset/db_engine_specs/clickhouse.py
@@ -14,13 +14,12 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 
 from superset.db_engine_specs.base import BaseEngineSpec
 
 
-class ClickHouseEngineSpec(BaseEngineSpec):
+class ClickHouseEngineSpec(BaseEngineSpec):  # pylint: disable=abstract-method
     """Dialect for ClickHouse analytical DB."""
 
     engine = "clickhouse"
diff --git a/superset/db_engine_specs/db2.py b/superset/db_engine_specs/db2.py
index 93cb76f71bb20..4ec21bc89637f 100644
--- a/superset/db_engine_specs/db2.py
+++ b/superset/db_engine_specs/db2.py
@@ -1,3 +1,4 @@
+
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -14,7 +15,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 
 from superset.db_engine_specs.base import BaseEngineSpec, LimitMethod
diff --git a/superset/db_engine_specs/drill.py b/superset/db_engine_specs/drill.py
index 9ebe877030f67..ce3e57302b24d 100644
--- a/superset/db_engine_specs/drill.py
+++ b/superset/db_engine_specs/drill.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 from urllib import parse
 
diff --git a/superset/db_engine_specs/druid.py b/superset/db_engine_specs/druid.py
index 78a2a64610fc2..273f19426e521 100644
--- a/superset/db_engine_specs/druid.py
+++ b/superset/db_engine_specs/druid.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from superset.db_engine_specs.base import BaseEngineSpec
 
 
diff --git a/superset/db_engine_specs/gsheets.py b/superset/db_engine_specs/gsheets.py
index d7b3bc7a1aa0b..698728e098ffe 100644
--- a/superset/db_engine_specs/gsheets.py
+++ b/superset/db_engine_specs/gsheets.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from superset.db_engine_specs.sqlite import SqliteEngineSpec
 
 
diff --git a/superset/db_engine_specs/impala.py b/superset/db_engine_specs/impala.py
index 4feb3fc0dc51c..6fba983928b43 100644
--- a/superset/db_engine_specs/impala.py
+++ b/superset/db_engine_specs/impala.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 from typing import List
 
diff --git a/superset/db_engine_specs/kylin.py b/superset/db_engine_specs/kylin.py
index edde71dedc68f..acea219185029 100644
--- a/superset/db_engine_specs/kylin.py
+++ b/superset/db_engine_specs/kylin.py
@@ -14,13 +14,12 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 
 from superset.db_engine_specs.base import BaseEngineSpec
 
 
-class KylinEngineSpec(BaseEngineSpec):
+class KylinEngineSpec(BaseEngineSpec):  # pylint: disable=abstract-method
     """Dialect for Apache Kylin"""
 
     engine = "kylin"
diff --git a/superset/db_engine_specs/mssql.py b/superset/db_engine_specs/mssql.py
index 4e5a4fe8987d7..1a6aea8349a97 100644
--- a/superset/db_engine_specs/mssql.py
+++ b/superset/db_engine_specs/mssql.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 import re
 from typing import List, Optional, Tuple
diff --git a/superset/db_engine_specs/oracle.py b/superset/db_engine_specs/oracle.py
index 3b42f9d4364e6..b200e45db923c 100644
--- a/superset/db_engine_specs/oracle.py
+++ b/superset/db_engine_specs/oracle.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 
 from superset.db_engine_specs.base import LimitMethod
diff --git a/superset/db_engine_specs/postgres.py b/superset/db_engine_specs/postgres.py
index 5b8988021d4a0..bda62f39a9445 100644
--- a/superset/db_engine_specs/postgres.py
+++ b/superset/db_engine_specs/postgres.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 from typing import List, Optional, Tuple, TYPE_CHECKING
 
@@ -24,7 +23,7 @@
 
 if TYPE_CHECKING:
     # prevent circular imports
-    from superset.models.core import Database
+    from superset.models.core import Database  # pylint: disable=unused-import
 
 
 class PostgresBaseEngineSpec(BaseEngineSpec):
diff --git a/superset/db_engine_specs/redshift.py b/superset/db_engine_specs/redshift.py
index af4dc5416f814..5c9f4bc467622 100644
--- a/superset/db_engine_specs/redshift.py
+++ b/superset/db_engine_specs/redshift.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from superset.db_engine_specs.postgres import PostgresBaseEngineSpec
 
 
diff --git a/superset/db_engine_specs/snowflake.py b/superset/db_engine_specs/snowflake.py
index 8a1edc7744e05..4d691241dfc23 100644
--- a/superset/db_engine_specs/snowflake.py
+++ b/superset/db_engine_specs/snowflake.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from urllib import parse
 
 from superset.db_engine_specs.postgres import PostgresBaseEngineSpec
diff --git a/superset/db_engine_specs/sqlite.py b/superset/db_engine_specs/sqlite.py
index ff7074b340975..cd7e85db10918 100644
--- a/superset/db_engine_specs/sqlite.py
+++ b/superset/db_engine_specs/sqlite.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 from typing import List, TYPE_CHECKING
 
@@ -25,7 +24,7 @@
 
 if TYPE_CHECKING:
     # prevent circular imports
-    from superset.models.core import Database
+    from superset.models.core import Database  # pylint: disable=unused-import
 
 
 class SqliteEngineSpec(BaseEngineSpec):
diff --git a/superset/db_engine_specs/teradata.py b/superset/db_engine_specs/teradata.py
index fc6304908af40..bbc8475feda2d 100644
--- a/superset/db_engine_specs/teradata.py
+++ b/superset/db_engine_specs/teradata.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from superset.db_engine_specs.base import BaseEngineSpec, LimitMethod
 
 
diff --git a/superset/db_engine_specs/vertica.py b/superset/db_engine_specs/vertica.py
index c3f7b8b0753bb..e5f8901220984 100644
--- a/superset/db_engine_specs/vertica.py
+++ b/superset/db_engine_specs/vertica.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from superset.db_engine_specs.postgres import PostgresBaseEngineSpec
 
 

From 61122cf95cb76629113f1fa96217145c2455bce6 Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 09:17:26 -0700
Subject: [PATCH 02/11] Enable lint and fix issue in db_engine_spec/mysql.py

---
 superset/db_engine_specs/mysql.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/superset/db_engine_specs/mysql.py b/superset/db_engine_specs/mysql.py
index 467800bceb89c..361f4ecca8442 100644
--- a/superset/db_engine_specs/mysql.py
+++ b/superset/db_engine_specs/mysql.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 from typing import Any, Dict, Optional
 from urllib import parse
@@ -77,7 +76,7 @@ def get_datatype(cls, type_code: Any) -> Optional[str]:
         datatype = type_code
         if isinstance(type_code, int):
             datatype = cls.type_code_map.get(type_code)
-        if datatype and isinstance(datatype, str) and len(datatype):
+        if datatype and isinstance(datatype, str) and datatype:
             return datatype
         return None
 
@@ -92,7 +91,7 @@ def _extract_error_message(cls, e):
         try:
             if isinstance(e.args, tuple) and len(e.args) > 1:
                 message = e.args[1]
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             pass
         return message
 

From 6d69c81c0823001bd683ba22731d56b69e0d594e Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 09:18:24 -0700
Subject: [PATCH 03/11] Enable pylint and fix lint for db_engine_spec/pinot.py

---
 superset/db_engine_specs/pinot.py | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/superset/db_engine_specs/pinot.py b/superset/db_engine_specs/pinot.py
index af3fc2b255c35..ccd0200f80376 100644
--- a/superset/db_engine_specs/pinot.py
+++ b/superset/db_engine_specs/pinot.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from typing import Dict, List, Optional
 
 from sqlalchemy.sql.expression import ColumnClause, ColumnElement
@@ -22,7 +21,7 @@
 from superset.db_engine_specs.base import BaseEngineSpec, TimestampExpression
 
 
-class PinotEngineSpec(BaseEngineSpec):
+class PinotEngineSpec(BaseEngineSpec):  # pylint: disable=abstract-method
     engine = "pinot"
     allows_subqueries = False
     allows_joins = False
@@ -66,10 +65,10 @@ def make_select_compatible(
         # Pinot does not want the group by expr's to appear in the select clause
         select_sans_groupby = []
         # We want identity and not equality, so doing the filtering manually
-        for s in select_exprs:
+        for sel in select_exprs:
             for gr in groupby_exprs:
-                if s is gr:
+                if sel is gr:
                     break
             else:
-                select_sans_groupby.append(s)
+                select_sans_groupby.append(sel)
         return select_sans_groupby

From 398317540fe9a70d11f9e7c60dfb206ccee57f39 Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 09:19:17 -0700
Subject: [PATCH 04/11] Enable lint and fix issues for db_engine_specs/hive.py

---
 superset/db_engine_specs/hive.py | 32 +++++++++++++++++---------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/superset/db_engine_specs/hive.py b/superset/db_engine_specs/hive.py
index 0b765a9b4af34..130092cb3d982 100644
--- a/superset/db_engine_specs/hive.py
+++ b/superset/db_engine_specs/hive.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from datetime import datetime
 import logging
 import os
@@ -99,7 +98,7 @@ def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
             return []
 
     @classmethod
-    def create_table_from_csv(cls, form, table):
+    def create_table_from_csv(cls, form, table):  # pylint: disable=too-many-locals
         """Uploads a csv file and creates a superset datasource in Hive."""
 
         def convert_to_hive_type(col_type):
@@ -223,7 +222,7 @@ def progress(cls, log_lines):
                 reduce_progress = int(match.groupdict()["reduce_progress"])
                 stages[stage_number] = (map_progress + reduce_progress) / 2
         logging.info(
-            "Progress detail: {}, "
+            "Progress detail: {}, "  # pylint: disable=logging-format-interpolation
             "current job {}, "
             "total jobs: {}".format(stages, current_job, total_jobs)
         )
@@ -239,9 +238,10 @@ def get_tracking_url(cls, log_lines):
         for line in log_lines:
             if lkp in line:
                 return line.split(lkp)[1]
+            return None
 
     @classmethod
-    def handle_cursor(cls, cursor, query, session):
+    def handle_cursor(cls, cursor, query, session):  # pylint: disable=too-many-locals
         """Updates progress information"""
         from pyhive import hive  # pylint: disable=no-name-in-module
 
@@ -302,33 +302,33 @@ def get_columns(
         return inspector.get_columns(table_name, schema)
 
     @classmethod
-    def where_latest_partition(
+    def where_latest_partition(  # pylint: disable=too-many-arguments
         cls,
         table_name: str,
         schema: Optional[str],
         database,
-        qry: Select,
+        query: Select,
         columns: Optional[List] = None,
     ) -> Optional[Select]:
         try:
             col_names, values = cls.latest_partition(
                 table_name, schema, database, show_first=True
             )
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             # table is not partitioned
             return None
         if values is not None and columns is not None:
             for col_name, value in zip(col_names, values):
-                for c in columns:
-                    if c.get("name") == col_name:
-                        qry = qry.where(Column(col_name) == value)
+                for clm in columns:
+                    if clm.get("name") == col_name:
+                        query = query.where(Column(col_name) == value)
 
-            return qry
+            return query
         return None
 
     @classmethod
     def _get_fields(cls, cols: List[dict]) -> List[ColumnClause]:
-        return BaseEngineSpec._get_fields(cols)
+        return BaseEngineSpec._get_fields(cols)  # pylint: disable=protected-access
 
     @classmethod
     def latest_sub_partition(cls, table_name, schema, database, **kwargs):
@@ -343,11 +343,13 @@ def _latest_partition_from_df(cls, df) -> Optional[List[str]]:
         return None
 
     @classmethod
-    def _partition_query(cls, table_name, limit=0, order_by=None, filters=None):
+    def _partition_query(  # pylint: disable=too-many-arguments
+        cls, table_name, database, limit=0, order_by=None, filters=None
+        ):
         return f"SHOW PARTITIONS {table_name}"
 
     @classmethod
-    def select_star(
+    def select_star(  # pylint: disable=too-many-arguments
         cls,
         database,
         table_name: str,
@@ -413,6 +415,6 @@ def get_configuration_for_impersonation(
         return configuration
 
     @staticmethod
-    def execute(cursor, query: str, async_: bool = False):
+    def execute(cursor, query: str, async_: bool = False):  # pylint: disable=arguments-differ
         kwargs = {"async": async_}
         cursor.execute(query, **kwargs)

From ea4fe10f1ab6fbc418c09b05d303261f018c5327 Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 09:20:19 -0700
Subject: [PATCH 05/11] Enable lint and fix for db_engine_spec/presto.py

---
 superset/db_engine_specs/presto.py | 117 ++++++++++-------------------
 1 file changed, 39 insertions(+), 78 deletions(-)

diff --git a/superset/db_engine_specs/presto.py b/superset/db_engine_specs/presto.py
index f79f5d205783d..636a2a9f540fd 100644
--- a/superset/db_engine_specs/presto.py
+++ b/superset/db_engine_specs/presto.py
@@ -14,7 +14,6 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
 from collections import defaultdict, deque, OrderedDict
 from contextlib import closing
 from datetime import datetime
@@ -42,7 +41,7 @@
 
 if TYPE_CHECKING:
     # prevent circular imports
-    from superset.models.core import Database
+    from superset.models.core import Database  # pylint: disable=unused-import
 
 QueryStatus = utils.QueryStatus
 config = app.config
@@ -80,7 +79,7 @@ def get_children(column: Dict[str, str]) -> List[Dict[str, str]]:
     :param column: dictionary representing a Presto column
     :return: list of dictionaries representing children columns
     """
-    pattern = re.compile("(?P<type>\w+)\((?P<children>.*)\)")
+    pattern = re.compile(r"(?P<type>\w+)\((?P<children>.*)\)")
     match = pattern.match(column["type"])
     if not match:
         raise Exception(f"Unable to parse column type {column['type']}")
@@ -157,7 +156,8 @@ def get_view_names(
             return []
 
         if schema:
-            sql = "SELECT table_name FROM information_schema.views WHERE table_schema=%(schema)s"
+            sql = "SELECT table_name FROM information_schema.views" \
+                "WHERE table_schema=%(schema)s"
             params = {"schema": schema}
         else:
             sql = "SELECT table_name FROM information_schema.views"
@@ -220,7 +220,7 @@ def _split_data_type(cls, data_type: str, delimiter: str) -> List[str]:
         )
 
     @classmethod
-    def _parse_structural_column(
+    def _parse_structural_column(  # pylint: disable=too-many-locals,too-many-branches
         cls, parent_column_name: str, parent_data_type: str, result: List[dict]
     ) -> None:
         """
@@ -243,7 +243,7 @@ def _parse_structural_column(
             inner_types = cls._split_data_type(data_type, r"\)")
             for inner_type in inner_types:
                 # We have finished parsing multiple structural data types
-                if not inner_type and len(stack) > 0:
+                if not inner_type and stack:
                     stack.pop()
                 elif cls._has_nested_data_types(inner_type):
                     # split on comma , to get individual data types
@@ -283,11 +283,11 @@ def _parse_structural_column(
                     if not (inner_type.endswith("array") or inner_type.endswith("row")):
                         stack.pop()
                 # We have an array of row objects (i.e. array(row(...)))
-                elif "array" == inner_type or "row" == inner_type:
+                elif inner_type == "array" or inner_type == "row":
                     # Push a dummy object to represent the structural data type
                     stack.append(("", inner_type))
                 # We have an array of a basic data types(i.e. array(varchar)).
-                elif len(stack) > 0:
+                elif stack:
                     # Because it is an array of a basic data type. We have finished
                     # parsing the structural data type and can move on.
                     stack.pop()
@@ -348,7 +348,7 @@ def get_columns(
                     column_type = presto_type_map[column.Type]()
             except KeyError:
                 logging.info(
-                    "Did not recognize type {} of column {}".format(
+                    "Did not recognize type {} of column {}".format(  # pylint: disable=logging-format-interpolation
                         column.Type, column.Column
                     )
                 )
@@ -439,7 +439,7 @@ def _filter_out_array_nested_cols(
         return filtered_cols, array_cols
 
     @classmethod
-    def select_star(
+    def select_star( # pylint: disable=too-many-arguments
         cls,
         database,
         table_name: str,
@@ -476,7 +476,7 @@ def select_star(
         )
 
     @classmethod
-    def estimate_statement_cost(
+    def estimate_statement_cost(  # pylint: disable=too-many-locals
         cls, statement: str, database, cursor, user_name: str
     ) -> Dict[str, str]:
         """
@@ -490,9 +490,9 @@ def estimate_statement_cost(
         parsed_query = ParsedQuery(statement)
         sql = parsed_query.stripped()
 
-        SQL_QUERY_MUTATOR = config.get("SQL_QUERY_MUTATOR")
-        if SQL_QUERY_MUTATOR:
-            sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
+        sql_query_mutator = config.get("SQL_QUERY_MUTATOR")
+        if sql_query_mutator:
+            sql = sql_query_mutator(sql, user_name, security_manager, database)
 
         sql = f"EXPLAIN (TYPE IO, FORMAT JSON) {sql}"
         cursor.execute(sql)
@@ -579,7 +579,7 @@ def get_all_datasource_names(
             None,
         )
         datasource_names: List[utils.DatasourceName] = []
-        for unused, row in datasource_df.iterrows():
+        for _unused, row in datasource_df.iterrows():
             datasource_names.append(
                 utils.DatasourceName(
                     schema=row["table_schema"], table=row["table_name"]
@@ -599,7 +599,7 @@ def _build_column_hierarchy(
                be root nodes
         :param column_hierarchy: dictionary representing the graph
         """
-        if len(columns) == 0:
+        if not columns:
             return
         root = columns.pop(0)
         root_info = {"type": root["type"], "children": []}
@@ -697,7 +697,7 @@ def _expand_row_data(cls, datum: dict, column: str, column_hierarchy: dict) -> N
                     datum[row_child] = ""
 
     @classmethod
-    def _split_array_columns_by_process_state(
+    def _split_ary_cols_by_proc_state(
         cls, array_columns: List[str], array_column_hierarchy: dict, datum: dict
     ) -> Tuple[List[str], Set[str]]:
         """
@@ -727,7 +727,7 @@ def _split_array_columns_by_process_state(
         return array_columns_to_process, unprocessed_array_columns
 
     @classmethod
-    def _convert_data_list_to_array_data_dict(
+    def _convert_data_lst_to_ary_dict(
         cls, data: List[dict], array_columns_to_process: List[str]
     ) -> dict:
         """
@@ -755,7 +755,7 @@ def _convert_data_list_to_array_data_dict(
         return array_data_dict
 
     @classmethod
-    def _process_array_data(
+    def _process_array_data(  # pylint: disable=too-many-locals,too-many-branches
         cls, data: List[dict], all_columns: List[dict], array_column_hierarchy: dict
     ) -> dict:
         """
@@ -793,16 +793,17 @@ def _process_array_data(
         # Determine what columns are ready to be processed. This is necessary for
         # array columns that contain rows with nested arrays. We first process
         # the outer arrays before processing inner arrays.
-        array_columns_to_process, unprocessed_array_columns = cls._split_array_columns_by_process_state(
-            array_columns, array_column_hierarchy, data[0]
-        )
+        array_columns_to_process, unprocessed_array_columns = \
+            cls._split_ary_cols_by_proc_state(
+                array_columns, array_column_hierarchy, data[0]
+            )
 
         # Pull out array data that is ready to be processed into a dictionary.
-        all_array_data = cls._convert_data_list_to_array_data_dict(
+        all_array_data = cls._convert_data_lst_to_ary_dict(
             data, array_columns_to_process
         )
 
-        for original_data_index, expanded_array_data in all_array_data.items():
+        for expanded_array_data in all_array_data.values():
             for array_column in array_columns:
                 if array_column in unprocessed_array_columns:
                     continue
@@ -841,47 +842,6 @@ def _process_array_data(
                             array_value[array_child] = ""
         return all_array_data
 
-    @classmethod
-    def _consolidate_array_data_into_data(
-        cls, data: List[dict], array_data: dict
-    ) -> None:
-        """
-        Consolidate data given a list representing rows of data and a dictionary
-        representing expanded array data
-        Example:
-          Original data set = [
-              {'ColumnA': [1, 2], 'ColumnB': [3]},
-              {'ColumnA': [11, 22], 'ColumnB': [33]}
-          ]
-          array_data = {
-              0: [
-                  {'ColumnA': 1, 'ColumnB': 3},
-                  {'ColumnA': 2, 'ColumnB': ''},
-              ],
-              1: [
-                  {'ColumnA': 11, 'ColumnB': 33},
-                  {'ColumnA': 22, 'ColumnB': ''},
-              ],
-          }
-          Final data set = [
-               {'ColumnA': 1, 'ColumnB': 3},
-               {'ColumnA': 2, 'ColumnB': ''},
-               {'ColumnA': 11, 'ColumnB': 33},
-               {'ColumnA': 22, 'ColumnB': ''},
-          ]
-        :param data: list representing rows of data
-        :param array_data: dictionary representing expanded array data
-        :return: list where data and array_data are combined
-        """
-        data_index = 0
-        original_data_index = 0
-        while data_index < len(data):
-            data[data_index].update(array_data[original_data_index][0])
-            array_data[original_data_index].pop(0)
-            data[data_index + 1 : data_index + 1] = array_data[original_data_index]
-            data_index = data_index + len(array_data[original_data_index]) + 1
-            original_data_index = original_data_index + 1
-
     @classmethod
     def _remove_processed_array_columns(
         cls, unprocessed_array_columns: Set[str], array_column_hierarchy: dict
@@ -899,7 +859,7 @@ def _remove_processed_array_columns(
                 del array_column_hierarchy[array_column]
 
     @classmethod
-    def expand_data(
+    def expand_data(  # pylint: disable=too-many-locals
         cls, columns: List[dict], data: List[dict]
     ) -> Tuple[List[dict], List[dict], List[dict]]:
         """
@@ -926,7 +886,8 @@ def expand_data(
         if not is_feature_enabled("PRESTO_EXPAND_DATA"):
             return columns, data, []
 
-        # process each column, unnesting ARRAY types and expanding ROW types into new columns
+        # process each column, unnesting ARRAY types and
+        # expanding ROW types into new columns
         to_process = deque((column, 0) for column in columns)
         all_columns: List[dict] = []
         expanded_columns = []
@@ -937,10 +898,10 @@ def expand_data(
                 all_columns.append(column)
 
             # When unnesting arrays we need to keep track of how many extra rows
-            # were added, for each original row. This is necessary when we expand multiple
-            # arrays, so that the arrays after the first reuse the rows added by
-            # the first. every time we change a level in the nested arrays we
-            # reinitialize this.
+            # were added, for each original row. This is necessary when we expand
+            # multiple arrays, so that the arrays after the first reuse the rows
+            # added by the first. every time we change a level in the nested arrays
+            # we reinitialize this.
             if level != current_array_level:
                 unnested_rows: Dict[int, int] = defaultdict(int)
                 current_array_level = level
@@ -1085,7 +1046,7 @@ def handle_cursor(cls, cursor, query, session):
                 if total_splits and completed_splits:
                     progress = 100 * (completed_splits / total_splits)
                     logging.info(
-                        "Query {} progress: {} / {} "
+                        "Query {} progress: {} / {} "  # pylint: disable=logging-format-interpolation
                         "splits".format(query_id, completed_splits, total_splits)
                     )
                     if progress > query.progress:
@@ -1111,14 +1072,14 @@ def _extract_error_message(cls, e):
         if (
             type(e).__name__ == "DatabaseError"
             and hasattr(e, "args")
-            and len(e.args) > 0
+            and e.args
         ):
             error_dict = e.args[0]
             return error_dict.get("message")
         return utils.error_msg_from_exception(e)
 
     @classmethod
-    def _partition_query(
+    def _partition_query(  # pylint: disable=too-many-arguments,too-many-locals
         cls, table_name, database, limit=0, order_by=None, filters=None
     ):
         """Returns a partition query
@@ -1170,7 +1131,7 @@ def _partition_query(
         return sql
 
     @classmethod
-    def where_latest_partition(
+    def where_latest_partition(  # pylint: disable=too-many-arguments
         cls,
         table_name: str,
         schema: str,
@@ -1182,7 +1143,7 @@ def where_latest_partition(
             col_names, values = cls.latest_partition(
                 table_name, schema, database, show_first=True
             )
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             # table is not partitioned
             return None
 
@@ -1196,7 +1157,7 @@ def where_latest_partition(
         return query
 
     @classmethod
-    def _latest_partition_from_df(cls, df) -> Optional[List[str]]:
+    def _latest_partition_from_df(cls, df) -> Optional[List[str]]:  # pylint: disable=invalid-name
         if not df.empty:
             return df.to_records(index=False)[0].item()
         return None
@@ -1264,7 +1225,7 @@ def latest_sub_partition(cls, table_name, schema, database, **kwargs):
         """
         indexes = database.get_indexes(table_name, schema)
         part_fields = indexes[0]["column_names"]
-        for k in kwargs.keys():
+        for k in kwargs.keys():  # pylint: disable=consider-iterating-dictionary
             if k not in k in part_fields:
                 msg = "Field [{k}] is not part of the portioning key"
                 raise SupersetTemplateException(msg)

From c70fda460d7b8ea93631e5fa944c237bb41cd22a Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 09:52:36 -0700
Subject: [PATCH 06/11] Re-enable lint on base.py, fix/disable specific
 failures, including one bad method signature

---
 superset/db_engine_specs/base.py   | 51 +++++++++++++++---------------
 superset/db_engine_specs/hive.py   |  4 +--
 superset/db_engine_specs/presto.py |  4 +--
 superset/db_engine_specs/sqlite.py | 20 ++++++------
 4 files changed, 40 insertions(+), 39 deletions(-)

diff --git a/superset/db_engine_specs/base.py b/superset/db_engine_specs/base.py
index bd7b1d1de099d..94647eeae9685 100644
--- a/superset/db_engine_specs/base.py
+++ b/superset/db_engine_specs/base.py
@@ -14,7 +14,7 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=C,R,W
+# pylint: disable=unused-argument
 from contextlib import closing
 from datetime import datetime
 import hashlib
@@ -42,10 +42,10 @@
 
 if TYPE_CHECKING:
     # prevent circular imports
-    from superset.models.core import Database
+    from superset.models.core import Database  # pylint: disable=unused-import
 
 
-class TimeGrain(NamedTuple):
+class TimeGrain(NamedTuple): # pylint: disable=too-few-public-methods
     name: str  # TODO: redundant field, remove
     label: str
     function: str
@@ -79,7 +79,7 @@ class TimeGrain(NamedTuple):
 }
 
 
-class TimestampExpression(ColumnClause):
+class TimestampExpression(ColumnClause):  # pylint: disable=abstract-method,too-many-ancestors,too-few-public-methods
     def __init__(self, expr: str, col: ColumnClause, **kwargs):
         """Sqlalchemy class that can be can be used to render native column elements
         respeting engine-specific quoting rules as part of a string-based expression.
@@ -106,7 +106,7 @@ def compile_timegrain_expression(
     return element.name.replace("{col}", compiler.process(element.col, **kw))
 
 
-class LimitMethod(object):
+class LimitMethod(object):  # pylint: disable=too-few-public-methods
     """Enum the ways that limits can be applied"""
 
     FETCH_MANY = "fetch_many"
@@ -114,7 +114,7 @@ class LimitMethod(object):
     FORCE_LIMIT = "force_limit"
 
 
-class BaseEngineSpec:
+class BaseEngineSpec:  # pylint: disable=too-many-public-methods
     """Abstract class for database engine specific configurations"""
 
     engine = "base"  # str as defined in sqlalchemy.engine.engine
@@ -128,7 +128,7 @@ class BaseEngineSpec:
     force_column_alias_quotes = False
     arraysize = 0
     max_column_name_length = 0
-    try_remove_schema_from_table_name = True
+    try_remove_schema_from_table_name = True  # pylint: disable=invalid-name
 
     @classmethod
     def get_allow_cost_estimate(cls, version: str = None) -> bool:
@@ -287,7 +287,7 @@ def get_datatype(cls, type_code: Any) -> Optional[str]:
         :param type_code: Type code from cursor description
         :return: String representation of type code
         """
-        if isinstance(type_code, str) and len(type_code):
+        if isinstance(type_code, str) and type_code != "":
             return type_code.upper()
         return None
 
@@ -375,7 +375,7 @@ def csv_to_df(**kwargs) -> pd.DataFrame:
         return df
 
     @classmethod
-    def df_to_sql(cls, df: pd.DataFrame, **kwargs):
+    def df_to_sql(cls, df: pd.DataFrame, **kwargs):  # pylint: disable=invalid-name
         """ Upload data from a Pandas DataFrame to a database. For
         regular engines this calls the DataFrame.to_sql() method. Can be
         overridden for engines that don't work well with to_sql(), e.g.
@@ -449,35 +449,35 @@ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
 
     @classmethod
     def get_all_datasource_names(
-        cls, db, datasource_type: str
+        cls, database, datasource_type: str
     ) -> List[utils.DatasourceName]:
         """Returns a list of all tables or views in database.
 
-        :param db: Database instance
+        :param database: Database instance
         :param datasource_type: Datasource_type can be 'table' or 'view'
         :return: List of all datasources in database or schema
         """
         # TODO: Fix circular import caused by importing Database
-        schemas = db.get_all_schema_names(
-            cache=db.schema_cache_enabled,
-            cache_timeout=db.schema_cache_timeout,
+        schemas = database.get_all_schema_names(
+            cache=database.schema_cache_enabled,
+            cache_timeout=database.schema_cache_timeout,
             force=True,
         )
         all_datasources: List[utils.DatasourceName] = []
         for schema in schemas:
             if datasource_type == "table":
-                all_datasources += db.get_all_table_names_in_schema(
+                all_datasources += database.get_all_table_names_in_schema(
                     schema=schema,
                     force=True,
-                    cache=db.table_cache_enabled,
-                    cache_timeout=db.table_cache_timeout,
+                    cache=database.table_cache_enabled,
+                    cache_timeout=database.table_cache_timeout,
                 )
             elif datasource_type == "view":
-                all_datasources += db.get_all_view_names_in_schema(
+                all_datasources += database.get_all_view_names_in_schema(
                     schema=schema,
                     force=True,
-                    cache=db.table_cache_enabled,
-                    cache_timeout=db.table_cache_timeout,
+                    cache=database.table_cache_enabled,
+                    cache_timeout=database.table_cache_timeout,
                 )
             else:
                 raise Exception(f"Unsupported datasource_type: {datasource_type}")
@@ -588,7 +588,7 @@ def get_columns(
         return inspector.get_columns(table_name, schema)
 
     @classmethod
-    def where_latest_partition(
+    def where_latest_partition(  # pylint: disable=too-many-arguments
         cls,
         table_name: str,
         schema: Optional[str],
@@ -615,7 +615,7 @@ def _get_fields(cls, cols):
         return [column(c.get("name")) for c in cols]
 
     @classmethod
-    def select_star(
+    def select_star(  # pylint: disable=too-many-arguments,too-many-locals
         cls,
         database,
         table_name: str,
@@ -727,7 +727,7 @@ def modify_url_for_impersonation(cls, url, impersonate_user: bool, username: str
             url.username = username
 
     @classmethod
-    def get_configuration_for_impersonation(
+    def get_configuration_for_impersonation(  # pylint: disable=invalid-name
         cls, uri: str, impersonate_user: bool, username: str
     ) -> Dict[str, str]:
         """
@@ -830,8 +830,9 @@ def column_datatype_to_string(
         cls, sqla_column_type: TypeEngine, dialect: Dialect
     ) -> str:
         """
-        Convert sqlalchemy column type to string representation. Can be overridden to remove
-        unnecessary details, especially collation info (see mysql, mssql).
+        Convert sqlalchemy column type to string representation.
+        Can be overridden to remove unnecessary details, especially
+        collation info (see mysql, mssql).
 
         :param sqla_column_type: SqlAlchemy column type
         :param dialect: Sqlalchemy dialect
diff --git a/superset/db_engine_specs/hive.py b/superset/db_engine_specs/hive.py
index 130092cb3d982..9bbbb872cb450 100644
--- a/superset/db_engine_specs/hive.py
+++ b/superset/db_engine_specs/hive.py
@@ -80,9 +80,9 @@ def patch(cls):
 
     @classmethod
     def get_all_datasource_names(
-        cls, db, datasource_type: str
+        cls, database, datasource_type: str
     ) -> List[utils.DatasourceName]:
-        return BaseEngineSpec.get_all_datasource_names(db, datasource_type)
+        return BaseEngineSpec.get_all_datasource_names(database, datasource_type)
 
     @classmethod
     def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
diff --git a/superset/db_engine_specs/presto.py b/superset/db_engine_specs/presto.py
index 636a2a9f540fd..2922f0f2e7261 100644
--- a/superset/db_engine_specs/presto.py
+++ b/superset/db_engine_specs/presto.py
@@ -569,9 +569,9 @@ def epoch_to_dttm(cls) -> str:
 
     @classmethod
     def get_all_datasource_names(
-        cls, db, datasource_type: str
+        cls, database, datasource_type: str
     ) -> List[utils.DatasourceName]:
-        datasource_df = db.get_df(
+        datasource_df = database.get_df(
             "SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S "
             "ORDER BY concat(table_schema, '.', table_name)".format(
                 datasource_type.upper()
diff --git a/superset/db_engine_specs/sqlite.py b/superset/db_engine_specs/sqlite.py
index cd7e85db10918..95dead2685706 100644
--- a/superset/db_engine_specs/sqlite.py
+++ b/superset/db_engine_specs/sqlite.py
@@ -49,27 +49,27 @@ def epoch_to_dttm(cls) -> str:
 
     @classmethod
     def get_all_datasource_names(
-        cls, db, datasource_type: str
+        cls, database, datasource_type: str
     ) -> List[utils.DatasourceName]:
-        schemas = db.get_all_schema_names(
-            cache=db.schema_cache_enabled,
-            cache_timeout=db.schema_cache_timeout,
+        schemas = database.get_all_schema_names(
+            cache=database.schema_cache_enabled,
+            cache_timeout=database.schema_cache_timeout,
             force=True,
         )
         schema = schemas[0]
         if datasource_type == "table":
-            return db.get_all_table_names_in_schema(
+            return database.get_all_table_names_in_schema(
                 schema=schema,
                 force=True,
-                cache=db.table_cache_enabled,
-                cache_timeout=db.table_cache_timeout,
+                cache=database.table_cache_enabled,
+                cache_timeout=database.table_cache_timeout,
             )
         elif datasource_type == "view":
-            return db.get_all_view_names_in_schema(
+            return database.get_all_view_names_in_schema(
                 schema=schema,
                 force=True,
-                cache=db.table_cache_enabled,
-                cache_timeout=db.table_cache_timeout,
+                cache=database.table_cache_enabled,
+                cache_timeout=database.table_cache_timeout,
             )
         else:
             raise Exception(f"Unsupported datasource_type: {datasource_type}")

From c6b007edc0e367d7dfd03bd9e696661e247b9d00 Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 10:36:49 -0700
Subject: [PATCH 07/11] Make flake8 happy after a number of pylint fixes

---
 superset/db_engine_specs/base.py   | 2 +-
 superset/db_engine_specs/hive.py   | 2 +-
 superset/db_engine_specs/presto.py | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/superset/db_engine_specs/base.py b/superset/db_engine_specs/base.py
index 94647eeae9685..94d9dffdaecf3 100644
--- a/superset/db_engine_specs/base.py
+++ b/superset/db_engine_specs/base.py
@@ -45,7 +45,7 @@
     from superset.models.core import Database  # pylint: disable=unused-import
 
 
-class TimeGrain(NamedTuple): # pylint: disable=too-few-public-methods
+class TimeGrain(NamedTuple):  # pylint: disable=too-few-public-methods
     name: str  # TODO: redundant field, remove
     label: str
     function: str
diff --git a/superset/db_engine_specs/hive.py b/superset/db_engine_specs/hive.py
index 9bbbb872cb450..07c195086566e 100644
--- a/superset/db_engine_specs/hive.py
+++ b/superset/db_engine_specs/hive.py
@@ -345,7 +345,7 @@ def _latest_partition_from_df(cls, df) -> Optional[List[str]]:
     @classmethod
     def _partition_query(  # pylint: disable=too-many-arguments
         cls, table_name, database, limit=0, order_by=None, filters=None
-        ):
+    ):
         return f"SHOW PARTITIONS {table_name}"
 
     @classmethod
diff --git a/superset/db_engine_specs/presto.py b/superset/db_engine_specs/presto.py
index 2922f0f2e7261..61106a54694e6 100644
--- a/superset/db_engine_specs/presto.py
+++ b/superset/db_engine_specs/presto.py
@@ -439,7 +439,7 @@ def _filter_out_array_nested_cols(
         return filtered_cols, array_cols
 
     @classmethod
-    def select_star( # pylint: disable=too-many-arguments
+    def select_star(  # pylint: disable=too-many-arguments
         cls,
         database,
         table_name: str,

From 4fa870fcbd3105690f4cedb917e1aa1116a21f7a Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 15:00:59 -0700
Subject: [PATCH 08/11] Update db_engine_spec_test test cases related to Presto
 to support different method naming

---
 tests/db_engine_specs_test.py | 32 ++++----------------------------
 1 file changed, 4 insertions(+), 28 deletions(-)

diff --git a/tests/db_engine_specs_test.py b/tests/db_engine_specs_test.py
index 5975f954996cf..60ff348a7cc24 100644
--- a/tests/db_engine_specs_test.py
+++ b/tests/db_engine_specs_test.py
@@ -528,7 +528,7 @@ def test_presto_expand_row_data(self):
         }
         self.assertEqual(datum, expected_datum)
 
-    def test_split_array_columns_by_process_state(self):
+    def test_presto_split_ary_cols_by_proc_state(self):
         array_cols = ["array_column", "array_column.nested_array"]
         array_col_hierarchy = {
             "array_column": {
@@ -541,7 +541,7 @@ def test_split_array_columns_by_process_state(self):
             },
         }
         datum = {"array_column": [[[1], [2]]]}
-        actual_array_cols_to_process, actual_unprocessed_array_cols = PrestoEngineSpec._split_array_columns_by_process_state(  # noqa ignore: E50
+        actual_array_cols_to_process, actual_unprocessed_array_cols = PrestoEngineSpec._split_ary_cols_by_proc_state(  # noqa ignore: E50
             array_cols, array_col_hierarchy, datum
         )
         expected_array_cols_to_process = ["array_column"]
@@ -549,13 +549,13 @@ def test_split_array_columns_by_process_state(self):
         self.assertEqual(actual_array_cols_to_process, expected_array_cols_to_process)
         self.assertEqual(actual_unprocessed_array_cols, expected_unprocessed_array_cols)
 
-    def test_presto_convert_data_list_to_array_data_dict(self):
+    def test_presto_convert_data_lst_to_ary_dict(self):
         data = [
             {"array_column": [1, 2], "int_column": 3},
             {"array_column": [11, 22], "int_column": 33},
         ]
         array_columns_to_process = ["array_column"]
-        actual_array_data_dict = PrestoEngineSpec._convert_data_list_to_array_data_dict(
+        actual_array_data_dict = PrestoEngineSpec._convert_data_lst_to_ary_dict(
             data, array_columns_to_process
         )
         expected_array_data_dict = {
@@ -592,30 +592,6 @@ def test_presto_process_array_data(self):
         }
         self.assertEqual(actual_array_data, expected_array_data)
 
-    def test_presto_consolidate_array_data_into_data(self):
-        data = [
-            {"arr_col": [[1], [2]], "int_col": 3},
-            {"arr_col": [[11], [22]], "int_col": 33},
-        ]
-        array_data = {
-            0: [
-                {"arr_col": [[1], [2]], "arr_col.nested_row": 1},
-                {"arr_col": "", "arr_col.nested_row": 2, "int_col": ""},
-            ],
-            1: [
-                {"arr_col": [[11], [22]], "arr_col.nested_row": 11},
-                {"arr_col": "", "arr_col.nested_row": 22, "int_col": ""},
-            ],
-        }
-        PrestoEngineSpec._consolidate_array_data_into_data(data, array_data)
-        expected_data = [
-            {"arr_col": [[1], [2]], "arr_col.nested_row": 1, "int_col": 3},
-            {"arr_col": "", "arr_col.nested_row": 2, "int_col": ""},
-            {"arr_col": [[11], [22]], "arr_col.nested_row": 11, "int_col": 33},
-            {"arr_col": "", "arr_col.nested_row": 22, "int_col": ""},
-        ]
-        self.assertEqual(data, expected_data)
-
     def test_presto_remove_processed_array_columns(self):
         array_col_hierarchy = {
             "array_column": {

From 2120665197f2bb1c9184e574c72852c31b2fd289 Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 15:31:21 -0700
Subject: [PATCH 09/11] automated reformatting

---
 superset/db_engine_specs/base.py   |  4 +++-
 superset/db_engine_specs/db2.py    |  1 -
 superset/db_engine_specs/hive.py   |  4 +++-
 superset/db_engine_specs/presto.py | 21 ++++++++++-----------
 4 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/superset/db_engine_specs/base.py b/superset/db_engine_specs/base.py
index 94d9dffdaecf3..12911ad99f888 100644
--- a/superset/db_engine_specs/base.py
+++ b/superset/db_engine_specs/base.py
@@ -79,7 +79,9 @@ class TimeGrain(NamedTuple):  # pylint: disable=too-few-public-methods
 }
 
 
-class TimestampExpression(ColumnClause):  # pylint: disable=abstract-method,too-many-ancestors,too-few-public-methods
+class TimestampExpression(
+    ColumnClause
+):  # pylint: disable=abstract-method,too-many-ancestors,too-few-public-methods
     def __init__(self, expr: str, col: ColumnClause, **kwargs):
         """Sqlalchemy class that can be can be used to render native column elements
         respeting engine-specific quoting rules as part of a string-based expression.
diff --git a/superset/db_engine_specs/db2.py b/superset/db_engine_specs/db2.py
index 4ec21bc89637f..faab33f0658a8 100644
--- a/superset/db_engine_specs/db2.py
+++ b/superset/db_engine_specs/db2.py
@@ -1,4 +1,3 @@
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
diff --git a/superset/db_engine_specs/hive.py b/superset/db_engine_specs/hive.py
index 07c195086566e..a08edc22144f3 100644
--- a/superset/db_engine_specs/hive.py
+++ b/superset/db_engine_specs/hive.py
@@ -415,6 +415,8 @@ def get_configuration_for_impersonation(
         return configuration
 
     @staticmethod
-    def execute(cursor, query: str, async_: bool = False):  # pylint: disable=arguments-differ
+    def execute(
+        cursor, query: str, async_: bool = False
+    ):  # pylint: disable=arguments-differ
         kwargs = {"async": async_}
         cursor.execute(query, **kwargs)
diff --git a/superset/db_engine_specs/presto.py b/superset/db_engine_specs/presto.py
index 61106a54694e6..5345adab9185d 100644
--- a/superset/db_engine_specs/presto.py
+++ b/superset/db_engine_specs/presto.py
@@ -156,8 +156,10 @@ def get_view_names(
             return []
 
         if schema:
-            sql = "SELECT table_name FROM information_schema.views" \
+            sql = (
+                "SELECT table_name FROM information_schema.views"
                 "WHERE table_schema=%(schema)s"
+            )
             params = {"schema": schema}
         else:
             sql = "SELECT table_name FROM information_schema.views"
@@ -793,10 +795,9 @@ def _process_array_data(  # pylint: disable=too-many-locals,too-many-branches
         # Determine what columns are ready to be processed. This is necessary for
         # array columns that contain rows with nested arrays. We first process
         # the outer arrays before processing inner arrays.
-        array_columns_to_process, unprocessed_array_columns = \
-            cls._split_ary_cols_by_proc_state(
-                array_columns, array_column_hierarchy, data[0]
-            )
+        array_columns_to_process, unprocessed_array_columns = cls._split_ary_cols_by_proc_state(
+            array_columns, array_column_hierarchy, data[0]
+        )
 
         # Pull out array data that is ready to be processed into a dictionary.
         all_array_data = cls._convert_data_lst_to_ary_dict(
@@ -1069,11 +1070,7 @@ def _extract_error_message(cls, e):
                 error_dict.get("errorLocation"),
                 error_dict.get("message"),
             )
-        if (
-            type(e).__name__ == "DatabaseError"
-            and hasattr(e, "args")
-            and e.args
-        ):
+        if type(e).__name__ == "DatabaseError" and hasattr(e, "args") and e.args:
             error_dict = e.args[0]
             return error_dict.get("message")
         return utils.error_msg_from_exception(e)
@@ -1157,7 +1154,9 @@ def where_latest_partition(  # pylint: disable=too-many-arguments
         return query
 
     @classmethod
-    def _latest_partition_from_df(cls, df) -> Optional[List[str]]:  # pylint: disable=invalid-name
+    def _latest_partition_from_df(
+        cls, df
+    ) -> Optional[List[str]]:  # pylint: disable=invalid-name
         if not df.empty:
             return df.to_records(index=False)[0].item()
         return None

From 84a4e5c90376d0a4c69bf382b27b03cb1cbf1038 Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 15:33:19 -0700
Subject: [PATCH 10/11] One more pylint disable for druid.py

---
 superset/db_engine_specs/druid.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/superset/db_engine_specs/druid.py b/superset/db_engine_specs/druid.py
index 273f19426e521..3610a5891a774 100644
--- a/superset/db_engine_specs/druid.py
+++ b/superset/db_engine_specs/druid.py
@@ -17,7 +17,7 @@
 from superset.db_engine_specs.base import BaseEngineSpec
 
 
-class DruidEngineSpec(BaseEngineSpec):
+class DruidEngineSpec(BaseEngineSpec):  # pylint: disable=abstract-method
     """Engine spec for Druid.io"""
 
     engine = "druid"

From 79f49655c24a76d790791a7f621a4ff2121966e9 Mon Sep 17 00:00:00 2001
From: Will Barrett <will@preset.io>
Date: Wed, 2 Oct 2019 16:56:17 -0700
Subject: [PATCH 11/11] Find the magic invocation that makes all the lint tools
 happy

---
 superset/db_engine_specs/presto.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/superset/db_engine_specs/presto.py b/superset/db_engine_specs/presto.py
index 5345adab9185d..206e43ede6970 100644
--- a/superset/db_engine_specs/presto.py
+++ b/superset/db_engine_specs/presto.py
@@ -795,7 +795,7 @@ def _process_array_data(  # pylint: disable=too-many-locals,too-many-branches
         # Determine what columns are ready to be processed. This is necessary for
         # array columns that contain rows with nested arrays. We first process
         # the outer arrays before processing inner arrays.
-        array_columns_to_process, unprocessed_array_columns = cls._split_ary_cols_by_proc_state(
+        array_columns_to_process, unprocessed_array_columns = cls._split_ary_cols_by_proc_state(  # pylint: disable=line-too-long
             array_columns, array_column_hierarchy, data[0]
         )
 
@@ -1154,9 +1154,9 @@ def where_latest_partition(  # pylint: disable=too-many-arguments
         return query
 
     @classmethod
-    def _latest_partition_from_df(
+    def _latest_partition_from_df(  # pylint: disable=invalid-name
         cls, df
-    ) -> Optional[List[str]]:  # pylint: disable=invalid-name
+    ) -> Optional[List[str]]:
         if not df.empty:
             return df.to_records(index=False)[0].item()
         return None