diff --git a/dask_ml/linear_model/glm.py b/dask_ml/linear_model/glm.py index 54abc0685..31ef4542a 100644 --- a/dask_ml/linear_model/glm.py +++ b/dask_ml/linear_model/glm.py @@ -38,7 +38,7 @@ C : float Regularization strength. Note that ``dask-glm`` solvers use - the parameterization :math:`\lambda = 1 / C` + the parameterization :math:`\\lambda = 1 / C` fit_intercept : bool, default True Specifies if a constant (a.k.a. bias or intercept) should be diff --git a/dask_ml/model_selection/_search.py b/dask_ml/model_selection/_search.py index e38ae384c..1f74052ae 100644 --- a/dask_ml/model_selection/_search.py +++ b/dask_ml/model_selection/_search.py @@ -1144,9 +1144,8 @@ def fit(self, X, y=None, groups=None, **fit_params): if self.multimetric_: if self.refit is not False and ( not isinstance(self.refit, str) - or # This will work for both dict / list (tuple) - self.refit not in scorer + or self.refit not in scorer ): raise ValueError( "For multi-metric scoring, the parameter " diff --git a/docs/source/conf.py b/docs/source/conf.py index 250766eb1..abb7290f1 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -21,9 +21,10 @@ import shutil import subprocess -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. +import packaging.version + import dask_sphinx_theme +from dask_ml import __version__ as version # import sys # sys.path.insert(0, os.path.abspath('.')) @@ -99,10 +100,8 @@ # built documents. # # The short X.Y version. -version = "" -# The full version, including alpha/beta/rc tags. -from dask_ml import __version__ as version -import packaging.version +# version = "" + release = packaging.version.parse(version).base_version # The language for content autogenerated by Sphinx. Refer to documentation diff --git a/setup.cfg b/setup.cfg index eabbddcd3..3557a416a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,6 +22,8 @@ include_trailing_comma=True force_grid_wrap=0 combine_as_imports=True line_length=88 +skip= + docs/source/conf.py [coverage:run] source=dask_ml diff --git a/tests/compose/test_column_transformer.py b/tests/compose/test_column_transformer.py index 218ff0992..bf88a74b4 100644 --- a/tests/compose/test_column_transformer.py +++ b/tests/compose/test_column_transformer.py @@ -14,13 +14,13 @@ def test_column_transformer(): a = sklearn.compose.make_column_transformer( - (["A"], sklearn.preprocessing.OneHotEncoder(sparse=False)), - (["B"], sklearn.preprocessing.StandardScaler()), + (sklearn.preprocessing.OneHotEncoder(sparse=False), ["A"]), + (sklearn.preprocessing.StandardScaler(), ["B"]), ) b = dask_ml.compose.make_column_transformer( - (["A"], dask_ml.preprocessing.OneHotEncoder(sparse=False)), - (["B"], dask_ml.preprocessing.StandardScaler()), + (dask_ml.preprocessing.OneHotEncoder(sparse=False), ["A"]), + (dask_ml.preprocessing.StandardScaler(), ["B"]), ) a.fit(df) diff --git a/tests/test_incremental.py b/tests/test_incremental.py index 072d16684..40fb2bdfc 100644 --- a/tests/test_incremental.py +++ b/tests/test_incremental.py @@ -139,7 +139,9 @@ def test_score(xy_classification): client = distributed.Client(n_workers=2) X, y = xy_classification - inc = Incremental(SGDClassifier(max_iter=1000, random_state=0), scoring="accuracy") + inc = Incremental( + SGDClassifier(max_iter=1000, random_state=0, tol=1e-3), scoring="accuracy" + ) with client: inc.fit(X, y, classes=[0, 1]) diff --git a/tests/test_pca.py b/tests/test_pca.py index 50aa0dc73..992920aa7 100644 --- a/tests/test_pca.py +++ b/tests/test_pca.py @@ -375,8 +375,8 @@ def test_pca_validation(): assert_raises_regex( ValueError, - "n_components={}L? must be between " - "{}L? and min\(n_samples, n_features\)=" + r"n_components={}L? must be between " + "{}L? and min\\(n_samples, n_features\\)=" "{}L? with svd_solver='{}'".format( n_components, lower_limit[solver], smallest_d, solver_reported ), @@ -389,9 +389,9 @@ def test_pca_validation(): assert_raises_regex( ValueError, - "n_components={}L? must be " + r"n_components={}L? must be " "strictly less than " - "min\(n_samples, n_features\)={}L?" + "min\\(n_samples, n_features\\)={}L?" " with svd_solver='arpack'".format(n_components, smallest_d), dd.PCA(n_components, svd_solver=solver).fit, data, @@ -689,7 +689,12 @@ def test_pca_bad_solver(): @pytest.mark.parametrize( "svd_solver", - ["full", pytest.mark.xfail(reason="svd_compressed promotes")("randomized")], + [ + "full", + pytest.param( + "randomized", marks=pytest.mark.xfail(reason="svd_compressed promotes") + ), + ], ) def test_pca_float_dtype_preservation(svd_solver): # Ensure that PCA does not upscale the dtype when input is float32