diff --git a/RELEASING/Dockerfile.from_local_tarball b/RELEASING/Dockerfile.from_local_tarball index ff7eb5653602d..4860db64287cc 100644 --- a/RELEASING/Dockerfile.from_local_tarball +++ b/RELEASING/Dockerfile.from_local_tarball @@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \ RUN apt-get update -y -# Install dependencies to fix `curl https support error` and `elaying package configuration warning` +# Install dependencies to fix `curl https support error` and `delaying package configuration warning` RUN apt-get install -y apt-transport-https apt-utils # Install superset dependencies diff --git a/RELEASING/Dockerfile.from_svn_tarball b/RELEASING/Dockerfile.from_svn_tarball index b88481f40d10e..3deea5b8d3541 100644 --- a/RELEASING/Dockerfile.from_svn_tarball +++ b/RELEASING/Dockerfile.from_svn_tarball @@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \ RUN apt-get update -y -# Install dependencies to fix `curl https support error` and `elaying package configuration warning` +# Install dependencies to fix `curl https support error` and `delaying package configuration warning` RUN apt-get install -y apt-transport-https apt-utils # Install superset dependencies diff --git a/docs/static/resources/openapi.json b/docs/static/resources/openapi.json index 8279811b53dc1..86060e5470a19 100644 --- a/docs/static/resources/openapi.json +++ b/docs/static/resources/openapi.json @@ -746,7 +746,7 @@ "type": "array" }, "metrics": { - "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.", + "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.", "items": {}, "nullable": true, "type": "array" @@ -1309,7 +1309,7 @@ "type": "boolean" }, "metrics": { - "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.", + "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.", "items": {}, "nullable": true, "type": "array" @@ -1968,7 +1968,7 @@ "type": "string" }, "query_context_generation": { - "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.", + "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.", "nullable": true, "type": "boolean" }, @@ -2075,7 +2075,7 @@ "type": "string" }, "query_context_generation": { - "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.", + "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.", "nullable": true, "type": "boolean" }, @@ -2760,7 +2760,7 @@ "type": "string" }, "query_context_generation": { - "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.", + "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.", "nullable": true, "type": "boolean" }, @@ -2867,7 +2867,7 @@ "type": "string" }, "query_context_generation": { - "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.", + "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.", "nullable": true, "type": "boolean" }, diff --git a/scripts/cancel_github_workflows.py b/scripts/cancel_github_workflows.py index 720dc05cbef22..4d30d34adf405 100755 --- a/scripts/cancel_github_workflows.py +++ b/scripts/cancel_github_workflows.py @@ -143,7 +143,7 @@ def print_commit(commit: Dict[str, Any], branch: str) -> None: "--include-last/--skip-last", default=False, show_default=True, - help="Whether to also cancel the lastest run.", + help="Whether to also cancel the latest run.", ) @click.option( "--include-running/--skip-running", diff --git a/scripts/permissions_cleanup.py b/scripts/permissions_cleanup.py index 99d192919c6c4..5ca75e394cccf 100644 --- a/scripts/permissions_cleanup.py +++ b/scripts/permissions_cleanup.py @@ -24,7 +24,7 @@ def cleanup_permissions() -> None: pvms = security_manager.get_session.query( security_manager.permissionview_model ).all() - print("# of permission view menues is: {}".format(len(pvms))) + print("# of permission view menus is: {}".format(len(pvms))) pvms_dict = defaultdict(list) for pvm in pvms: pvms_dict[(pvm.permission, pvm.view_menu)].append(pvm) @@ -43,9 +43,9 @@ def cleanup_permissions() -> None: pvms = security_manager.get_session.query( security_manager.permissionview_model ).all() - print("Stage 1: # of permission view menues is: {}".format(len(pvms))) + print("Stage 1: # of permission view menus is: {}".format(len(pvms))) - # 2. Clean up None permissions or view menues + # 2. Clean up None permissions or view menus pvms = security_manager.get_session.query( security_manager.permissionview_model ).all() @@ -57,15 +57,15 @@ def cleanup_permissions() -> None: pvms = security_manager.get_session.query( security_manager.permissionview_model ).all() - print("Stage 2: # of permission view menues is: {}".format(len(pvms))) + print("Stage 2: # of permission view menus is: {}".format(len(pvms))) - # 3. Delete empty permission view menues from roles + # 3. Delete empty permission view menus from roles roles = security_manager.get_session.query(security_manager.role_model).all() for role in roles: role.permissions = [p for p in role.permissions if p] security_manager.get_session.commit() - # 4. Delete empty roles from permission view menues + # 4. Delete empty roles from permission view menus pvms = security_manager.get_session.query( security_manager.permissionview_model ).all() diff --git a/scripts/python_tests.sh b/scripts/python_tests.sh index 6491a3f6f9d46..c3f27d17f78c4 100755 --- a/scripts/python_tests.sh +++ b/scripts/python_tests.sh @@ -19,7 +19,7 @@ set -e # Temporary fix, probably related with https://bugs.launchpad.net/ubuntu/+source/opencv/+bug/1890170 -# MySQL was failling with: +# MySQL was failing with: # from . import _mysql # ImportError: /lib/x86_64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6 diff --git a/scripts/tests/run.sh b/scripts/tests/run.sh index 24233010107dd..2c3b5bf359733 100755 --- a/scripts/tests/run.sh +++ b/scripts/tests/run.sh @@ -24,7 +24,7 @@ set -e # function reset_db() { echo -------------------- - echo Reseting test DB + echo Resetting test DB echo -------------------- docker-compose stop superset-tests-worker superset || true RESET_DB_CMD="psql \"postgresql://${DB_USER}:${DB_PASSWORD}@127.0.0.1:5432\" <<-EOF diff --git a/superset-websocket/spec/index.test.ts b/superset-websocket/spec/index.test.ts index 320f13b4451e9..ca575e9e8af56 100644 --- a/superset-websocket/spec/index.test.ts +++ b/superset-websocket/spec/index.test.ts @@ -98,7 +98,7 @@ describe('server', () => { expect(endMock).toHaveBeenLastCalledWith('OK'); }); - test('reponds with a 404 when not found', () => { + test('responds with a 404 when not found', () => { const endMock = jest.fn(); const writeHeadMock = jest.fn(); diff --git a/superset-websocket/utils/client-ws-app/views/index.pug b/superset-websocket/utils/client-ws-app/views/index.pug index 3b1efc7fbff98..2322bec5805cc 100644 --- a/superset-websocket/utils/client-ws-app/views/index.pug +++ b/superset-websocket/utils/client-ws-app/views/index.pug @@ -24,7 +24,7 @@ block content div Sockets connected: span#socket-count 0 - div Messages recevied: + div Messages received: span#message-count 0 div Last message received: code#message-debug diff --git a/superset/utils/core.py b/superset/utils/core.py index 0ab3a685a39c3..15c3554276879 100644 --- a/superset/utils/core.py +++ b/superset/utils/core.py @@ -1026,7 +1026,7 @@ def send_mime_email( smtp_password = config["SMTP_PASSWORD"] smtp_starttls = config["SMTP_STARTTLS"] smtp_ssl = config["SMTP_SSL"] - smpt_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"] + smtp_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"] if dryrun: logger.info("Dryrun enabled, email notification content is below:") @@ -1035,7 +1035,7 @@ def send_mime_email( # Default ssl context is SERVER_AUTH using the default system # root CA certificates - ssl_context = ssl.create_default_context() if smpt_ssl_server_auth else None + ssl_context = ssl.create_default_context() if smtp_ssl_server_auth else None smtp = ( smtplib.SMTP_SSL(smtp_host, smtp_port, context=ssl_context) if smtp_ssl diff --git a/tests/integration_tests/charts/data/api_tests.py b/tests/integration_tests/charts/data/api_tests.py index d83cb8286b529..164fb0ca6c720 100644 --- a/tests/integration_tests/charts/data/api_tests.py +++ b/tests/integration_tests/charts/data/api_tests.py @@ -451,7 +451,7 @@ def test_with_filter_suppose_to_return_empty_data__no_data_returned(self): def test_with_invalid_where_parameter__400(self): self.query_context_payload["queries"][0]["filters"] = [] - # erroneus WHERE-clause + # erroneous WHERE-clause self.query_context_payload["queries"][0]["extras"]["where"] = "(gender abc def)" rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data") diff --git a/tests/integration_tests/core_tests.py b/tests/integration_tests/core_tests.py index 799ddacad4958..1b35d81f8394a 100644 --- a/tests/integration_tests/core_tests.py +++ b/tests/integration_tests/core_tests.py @@ -1619,7 +1619,7 @@ def test_dashboard_injected_exceptions(self, mock_db_connection_mutator): Handle injected exceptions from the db mutator """ - # Assert we can handle a custom excetion at the mutator level + # Assert we can handle a custom exception at the mutator level exception = SupersetException("Error message") mock_db_connection_mutator.side_effect = exception dash = db.session.query(Dashboard).first() diff --git a/tests/integration_tests/csv_upload_tests.py b/tests/integration_tests/csv_upload_tests.py index 724a1776342f5..3e0200d18a26b 100644 --- a/tests/integration_tests/csv_upload_tests.py +++ b/tests/integration_tests/csv_upload_tests.py @@ -209,7 +209,7 @@ def mock_upload_to_s3(filename: str, upload_prefix: str, table: Table) -> str: container.exec_run(f"hdfs dfs -mkdir -p {dest_dir}") dest = os.path.join(dest_dir, os.path.basename(filename)) container.exec_run(f"hdfs dfs -put {src} {dest}") - # hive external table expectes a directory for the location + # hive external table expects a directory for the location return dest_dir diff --git a/tests/integration_tests/datasets/api_tests.py b/tests/integration_tests/datasets/api_tests.py index ff8206354c0d5..95236af09041e 100644 --- a/tests/integration_tests/datasets/api_tests.py +++ b/tests/integration_tests/datasets/api_tests.py @@ -1810,7 +1810,7 @@ def test_export_dataset_gamma(self): "datasource_access", dataset.perm ) - # add perissions to allow export + access to query this dataset + # add permissions to allow export + access to query this dataset gamma_role = security_manager.find_role("Gamma") security_manager.add_permission_role(gamma_role, perm1) security_manager.add_permission_role(gamma_role, perm2) diff --git a/tests/integration_tests/datasets/commands_tests.py b/tests/integration_tests/datasets/commands_tests.py index 9498c911f266d..5cc5c85beab37 100644 --- a/tests/integration_tests/datasets/commands_tests.py +++ b/tests/integration_tests/datasets/commands_tests.py @@ -72,7 +72,7 @@ def test_export_dataset_command(self, mock_g): metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"]) - # sort columns for deterministc comparison + # sort columns for deterministic comparison metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name")) metadata["metrics"] = sorted(metadata["metrics"], key=itemgetter("metric_name")) diff --git a/tests/integration_tests/datasource_tests.py b/tests/integration_tests/datasource_tests.py index 4969321a1c54b..52bd9ec244cc3 100644 --- a/tests/integration_tests/datasource_tests.py +++ b/tests/integration_tests/datasource_tests.py @@ -233,7 +233,7 @@ def test_external_metadata_for_malicious_virtual_table(self): resp = self.get_json_resp(url) self.assertEqual(resp["error"], "Only `SELECT` statements are allowed") - def test_external_metadata_for_mutistatement_virtual_table(self): + def test_external_metadata_for_multistatement_virtual_table(self): self.login(username="admin") table = SqlaTable( table_name="multistatement_sql_table", diff --git a/tests/integration_tests/model_tests.py b/tests/integration_tests/model_tests.py index f187eadfbb27a..da6c5e6a3c254 100644 --- a/tests/integration_tests/model_tests.py +++ b/tests/integration_tests/model_tests.py @@ -476,15 +476,15 @@ def test_query_with_expr_groupby_timeseries(self): # TODO(bkyryliuk): make it work for presto. return - def cannonicalize_df(df): + def canonicalize_df(df): ret = df.sort_values(by=list(df.columns.values), inplace=False) ret.reset_index(inplace=True, drop=True) return ret df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True) - name_list1 = cannonicalize_df(df1).name.values.tolist() + name_list1 = canonicalize_df(df1).name.values.tolist() df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False) - name_list2 = cannonicalize_df(df1).name.values.tolist() + name_list2 = canonicalize_df(df1).name.values.tolist() self.assertFalse(df2.empty) assert name_list2 == name_list1 diff --git a/tests/integration_tests/query_context_tests.py b/tests/integration_tests/query_context_tests.py index 23bec876f72d2..5e5beae345b86 100644 --- a/tests/integration_tests/query_context_tests.py +++ b/tests/integration_tests/query_context_tests.py @@ -74,7 +74,7 @@ def test_schema_deserialization(self): for query_idx, query in enumerate(query_context.queries): payload_query = payload["queries"][query_idx] - # check basic properies + # check basic properties self.assertEqual(query.extras, payload_query["extras"]) self.assertEqual(query.filter, payload_query["filters"]) self.assertEqual(query.columns, payload_query["columns"]) diff --git a/tests/integration_tests/reports/api_tests.py b/tests/integration_tests/reports/api_tests.py index a304f083159ab..22b9be9990b74 100644 --- a/tests/integration_tests/reports/api_tests.py +++ b/tests/integration_tests/reports/api_tests.py @@ -571,7 +571,7 @@ def test_get_list_report_schedule_filter_type(self): @pytest.mark.usefixtures("create_report_schedules") def test_get_related_report_schedule(self): """ - ReportSchedule Api: Test get releated report schedule + ReportSchedule Api: Test get related report schedule """ self.login(username="admin") related_columns = ["created_by", "chart", "dashboard", "database"] diff --git a/tests/integration_tests/sqllab_tests.py b/tests/integration_tests/sqllab_tests.py index a33a541a63245..19e397e8f6961 100644 --- a/tests/integration_tests/sqllab_tests.py +++ b/tests/integration_tests/sqllab_tests.py @@ -91,7 +91,7 @@ def test_sql_json(self): data = self.run_sql("SELECT * FROM birth_names LIMIT 10", "1") self.assertLess(0, len(data["data"])) - data = self.run_sql("SELECT * FROM unexistant_table", "2") + data = self.run_sql("SELECT * FROM nonexistent_table", "2") if backend() == "presto": assert ( data["errors"][0]["error_type"] diff --git a/tests/unit_tests/charts/test_post_processing.py b/tests/unit_tests/charts/test_post_processing.py index cfab4e3d74b8c..f63ee5d66af9c 100644 --- a/tests/unit_tests/charts/test_post_processing.py +++ b/tests/unit_tests/charts/test_post_processing.py @@ -64,7 +64,7 @@ def test_pivot_df_no_cols_no_rows_single_metric(): """.strip() ) - # tranpose_pivot and combine_metrics do nothing in this case + # transpose_pivot and combine_metrics do nothing in this case pivoted = pivot_df( df, rows=[], @@ -169,7 +169,7 @@ def test_pivot_df_no_cols_no_rows_two_metrics(): """.strip() ) - # tranpose_pivot and combine_metrics do nothing in this case + # transpose_pivot and combine_metrics do nothing in this case pivoted = pivot_df( df, rows=[], diff --git a/tests/unit_tests/db_engine_specs/test_snowflake.py b/tests/unit_tests/db_engine_specs/test_snowflake.py index 3611c7214d2da..854d3f5f61c08 100644 --- a/tests/unit_tests/db_engine_specs/test_snowflake.py +++ b/tests/unit_tests/db_engine_specs/test_snowflake.py @@ -77,11 +77,11 @@ def test_extract_errors() -> None: ) ] - msg = "syntax error line 1 at position 10 unexpected 'limmmited'." + msg = "syntax error line 1 at position 10 unexpected 'limited'." result = SnowflakeEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Please check your query for syntax errors at or near "limmmited". Then, try running your query again.', + message='Please check your query for syntax errors at or near "limited". Then, try running your query again.', error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ diff --git a/tests/unit_tests/sql_parse_tests.py b/tests/unit_tests/sql_parse_tests.py index 70e5d4d3b9c56..ba3da69aaefaf 100644 --- a/tests/unit_tests/sql_parse_tests.py +++ b/tests/unit_tests/sql_parse_tests.py @@ -675,7 +675,7 @@ def test_extract_tables_nested_select() -> None: """ select (extractvalue(1,concat(0x7e,(select GROUP_CONCAT(COLUMN_NAME) from INFORMATION_SCHEMA.COLUMNS -WHERE TABLE_NAME="bi_achivement_daily"),0x7e))); +WHERE TABLE_NAME="bi_achievement_daily"),0x7e))); """ ) == {Table("COLUMNS", "INFORMATION_SCHEMA")}