Skip to content

Commit

Permalink
Flake8 fixes - see apache#3302
Browse files Browse the repository at this point in the history
  • Loading branch information
rhunwicks committed Oct 3, 2017
1 parent 38d6c03 commit dd769ed
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 18 deletions.
24 changes: 9 additions & 15 deletions contrib/connectors/pandas/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,13 +98,6 @@ def is_dttm(self):
def is_string(self):
return self.type and is_string_dtype(self.type)

num_types = (
'DOUBLE', 'FLOAT', 'INT', 'BIGINT',
'LONG', 'REAL', 'NUMERIC', 'DECIMAL'
)
date_types = ('DATE', 'TIME', 'DATETIME')
str_types = ('VARCHAR', 'STRING', 'CHAR')

@property
def data(self):
attrs = (
Expand Down Expand Up @@ -366,8 +359,8 @@ def get_filter_query(self, filter):
# Rely on Pandas partial string indexing for datetime fields,
# see https://pandas.pydata.org/pandas-docs/stable/timeseries.html#partial-string-indexing # NOQA
try:
if ((col_obj.is_string or col_obj.is_dttm)
and not isinstance(eq, list)):
if ((col_obj.is_string or col_obj.is_dttm) and
not isinstance(eq, list)):
eq = "'{}'".format(eq)
except AttributeError:
# col_obj is None, probably because the col is a metric,
Expand Down Expand Up @@ -472,12 +465,13 @@ def process_dataframe(
metric = metrics_dict[timeseries_limit_metric]
assert isinstance(metric.source, basestring)
aggregates = {metric.source: metric.expression}
df = (df[df.set_index(groupby).index.isin(
df.groupby(groupby, sort=False)
.aggregate(aggregates)
.sort_values(metric.source,
ascending=metric_order_asc)
.iloc[:timeseries_limit].index)])
df = (df[df.set_index(groupby)
.index.isin(
df.groupby(groupby, sort=False)
.aggregate(aggregates)
.sort_values(metric.source,
ascending=metric_order_asc)
.iloc[:timeseries_limit].index)])

query_str += ('[df.set_index({groupby}).index.isin('
'df.groupby({groupby}, sort=False)'
Expand Down
8 changes: 5 additions & 3 deletions contrib/tests/connector_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,8 @@ def test_groupby_value_percentage_metric(self):
self.assertEqual(result.error_message, None)
self.assertEqual(result.status, QueryStatus.SUCCESS)
expected_df = (self.df.groupby(parameters['groupby'])
.apply(lambda x: sum(x['value'])/sum(x['value'] + x['value2']))
.apply(lambda x: sum(x['value']) /
sum(x['value'] + x['value2']))
.reset_index()
.sort_values([0], ascending=False))
expected_df.columns = parameters['groupby'] + parameters['metrics']
Expand Down Expand Up @@ -935,7 +936,8 @@ class SqlaConnectorTestCase(BaseConnectorTestCase):
SqlMetric(metric_name='value_percentage', metric_type='custom',
expression="SUM(value)/SUM(value + value2)"),
SqlMetric(metric_name='category_percentage', metric_type='custom',
expression="SUM(CASE WHEN category='CategoryA' THEN 1 ELSE 0 END)/CAST(COUNT(*) AS REAL)"),
expression="SUM(CASE WHEN category='CategoryA' THEN 1 ELSE 0 END)/"
"CAST(COUNT(*) AS REAL)"),
]

def setUp(self):
Expand Down Expand Up @@ -993,7 +995,7 @@ def setUp(self):
metrics=self.metrics)

def calc_value_percentage(group):
return sum(group['value'])/sum(group['value'] + group['value2'])
return sum(group['value']) / sum(group['value'] + group['value2'])

self.datasource.calc_value_percentage = calc_value_percentage

Expand Down

0 comments on commit dd769ed

Please sign in to comment.