Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TYP: disallow comment-based annotation syntax #29741

Merged
merged 8 commits into from
Nov 21, 2019
4 changes: 4 additions & 0 deletions ci/code_checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -R --include="*.py" --include="*.pyx" -E 'class.*:\n\n( )+"""' .
RET=$(($RET + $?)) ; echo $MSG "DONE"

MSG='Check for use of comment-based annotation syntax' ; echo $MSG
invgrep -R --include="*.py" -P '# type: (?!ignore)' pandas
RET=$(($RET + $?)) ; echo $MSG "DONE"

MSG='Check that no file in the repo contains trailing whitespaces' ; echo $MSG
set -o pipefail
if [[ "$AZURE" == "true" ]]; then
Expand Down
10 changes: 5 additions & 5 deletions doc/source/development/contributing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -804,27 +804,27 @@ Types imports should follow the ``from typing import ...`` convention. So rather

import typing

primes = [] # type: typing.List[int]
primes: typing.List[int] = []

You should write

.. code-block:: python

from typing import List, Optional, Union

primes = [] # type: List[int]
primes: List[int] = []

``Optional`` should be used where applicable, so instead of

.. code-block:: python

maybe_primes = [] # type: List[Union[int, None]]
maybe_primes: List[Union[int, None]] = []

You should write

.. code-block:: python

maybe_primes = [] # type: List[Optional[int]]
maybe_primes: List[Optional[int]] = []

In some cases in the code base classes may define class variables that shadow builtins. This causes an issue as described in `Mypy 1775 <https://github.com/python/mypy/issues/1775#issuecomment-310969854>`_. The defensive solution here is to create an unambiguous alias of the builtin and use that without your annotation. For example, if you come across a definition like

Expand All @@ -840,7 +840,7 @@ The appropriate way to annotate this would be as follows
str_type = str

class SomeClass2:
str = None # type: str_type
str: str_type = None

In some cases you may be tempted to use ``cast`` from the typing module when you know better than the analyzer. This occurs particularly when using custom inference functions. For example

Expand Down
8 changes: 4 additions & 4 deletions pandas/_config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,16 +58,16 @@
RegisteredOption = namedtuple("RegisteredOption", "key defval doc validator cb")

# holds deprecated option metdata
_deprecated_options = {} # type: Dict[str, DeprecatedOption]
_deprecated_options: Dict[str, DeprecatedOption] = {}

# holds registered option metdata
_registered_options = {} # type: Dict[str, RegisteredOption]
_registered_options: Dict[str, RegisteredOption] = {}

# holds the current values for registered options
_global_config = {} # type: Dict[str, str]
_global_config: Dict[str, str] = {}

# keys which have a special meaning
_reserved_keys = ["all"] # type: List[str]
_reserved_keys: List[str] = ["all"]


class OptionError(AttributeError, KeyError):
Expand Down
2 changes: 1 addition & 1 deletion pandas/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class NotThisMethod(Exception):
pass


HANDLERS = {} # type: Dict[str, Dict[str, Callable]]
HANDLERS: Dict[str, Dict[str, Callable]] = {}


def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
Expand Down
24 changes: 12 additions & 12 deletions pandas/compat/numpy/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def validate_argmax_with_skipna(skipna, args, kwargs):
return skipna


ARGSORT_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[Union[int, str]]]
ARGSORT_DEFAULTS: "OrderedDict[str, Optional[Union[int, str]]]" = OrderedDict()
ARGSORT_DEFAULTS["axis"] = -1
ARGSORT_DEFAULTS["kind"] = "quicksort"
ARGSORT_DEFAULTS["order"] = None
Expand All @@ -122,7 +122,7 @@ def validate_argmax_with_skipna(skipna, args, kwargs):

# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND = OrderedDict() # type: OrderedDict[str, Optional[int]]
ARGSORT_DEFAULTS_KIND: "OrderedDict[str, Optional[int]]" = OrderedDict()
ARGSORT_DEFAULTS_KIND["axis"] = -1
ARGSORT_DEFAULTS_KIND["order"] = None
validate_argsort_kind = CompatValidator(
Expand Down Expand Up @@ -169,14 +169,14 @@ def validate_clip_with_axis(axis, args, kwargs):
return axis


COMPRESS_DEFAULTS = OrderedDict() # type: OrderedDict[str, Any]
COMPRESS_DEFAULTS: "OrderedDict[str, Any]" = OrderedDict()
COMPRESS_DEFAULTS["axis"] = None
COMPRESS_DEFAULTS["out"] = None
validate_compress = CompatValidator(
COMPRESS_DEFAULTS, fname="compress", method="both", max_fname_arg_count=1
)

CUM_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Any]
CUM_FUNC_DEFAULTS: "OrderedDict[str, Any]" = OrderedDict()
CUM_FUNC_DEFAULTS["dtype"] = None
CUM_FUNC_DEFAULTS["out"] = None
validate_cum_func = CompatValidator(
Expand All @@ -202,7 +202,7 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
return skipna


ALLANY_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[bool]]
ALLANY_DEFAULTS: "OrderedDict[str, Optional[bool]]" = OrderedDict()
ALLANY_DEFAULTS["dtype"] = None
ALLANY_DEFAULTS["out"] = None
ALLANY_DEFAULTS["keepdims"] = False
Expand All @@ -224,28 +224,28 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
)

RESHAPE_DEFAULTS = dict(order="C") # type: Dict[str, str]
RESHAPE_DEFAULTS: Dict[str, str] = dict(order="C")
validate_reshape = CompatValidator(
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
)

REPEAT_DEFAULTS = dict(axis=None) # type: Dict[str, Any]
REPEAT_DEFAULTS: Dict[str, Any] = dict(axis=None)
validate_repeat = CompatValidator(
REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
)

ROUND_DEFAULTS = dict(out=None) # type: Dict[str, Any]
ROUND_DEFAULTS: Dict[str, Any] = dict(out=None)
validate_round = CompatValidator(
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
)

SORT_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[Union[int, str]]]
SORT_DEFAULTS: "OrderedDict[str, Optional[Union[int, str]]]" = OrderedDict()
SORT_DEFAULTS["axis"] = -1
SORT_DEFAULTS["kind"] = "quicksort"
SORT_DEFAULTS["order"] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")

STAT_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[Any]]
STAT_FUNC_DEFAULTS: "OrderedDict[str, Optional[Any]]" = OrderedDict()
STAT_FUNC_DEFAULTS["dtype"] = None
STAT_FUNC_DEFAULTS["out"] = None

Expand Down Expand Up @@ -273,13 +273,13 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name):
MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
)

STAT_DDOF_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[bool]]
STAT_DDOF_FUNC_DEFAULTS: "OrderedDict[str, Optional[bool]]" = OrderedDict()
STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
STAT_DDOF_FUNC_DEFAULTS["out"] = None
STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")

TAKE_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[str]]
TAKE_DEFAULTS: "OrderedDict[str, Optional[str]]" = OrderedDict()
TAKE_DEFAULTS["out"] = None
TAKE_DEFAULTS["mode"] = "raise"
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@


class DirNamesMixin:
_accessors = set() # type: Set[str]
_deprecations = frozenset() # type: FrozenSet[str]
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset()

def _dir_deletions(self):
"""
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
from pandas.core.construction import array, extract_array
from pandas.core.indexers import validate_indices

_shared_docs = {} # type: Dict[str, str]
_shared_docs: Dict[str, str] = {}


# --------------- #
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,9 @@ def frame_apply(
""" construct and return a row or column based frame apply object """

axis = obj._get_axis_number(axis)
klass: Type[FrameApply]
if axis == 0:
klass = FrameRowApply # type: Type[FrameApply]
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

_not_implemented_message = "{} does not implement {}."

_extension_array_shared_docs = dict() # type: Dict[str, str]
_extension_array_shared_docs: Dict[str, str] = dict()


def try_cast_to_ea(cls_or_instance, obj, dtype=None):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@


class AttributesMixin:
_data = None # type: np.ndarray
_data: np.ndarray

@classmethod
def _simple_new(cls, values, **kwargs):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps
# -----------------------------------------------------------------
# Constructors

_dtype = None # type: Union[np.dtype, DatetimeTZDtype]
_dtype: Union[np.dtype, DatetimeTZDtype]
_freq = None

def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/integer.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ class _IntegerDtype(ExtensionDtype):
The attributes name & type are set when these subclasses are created.
"""

name = None # type: str
name: str
base = None
type = None # type: Type
type: Type
na_value = np.nan

def __repr__(self) -> str:
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/arrays/period.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
_scalar_type = Period

# Names others delegate to us
_other_ops = [] # type: List[str]
_other_ops: List[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
Expand Down Expand Up @@ -894,9 +894,9 @@ def period_array(

data = np.asarray(data)

dtype: Optional[PeriodDtype]
if freq:
# typed Optional here because the else block below assigns None
dtype = PeriodDtype(freq) # type: Optional[PeriodDtype]
dtype = PeriodDtype(freq)
else:
dtype = None

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,8 @@ class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps):
_scalar_type = Timedelta
__array_priority__ = 1000
# define my properties & methods for delegation
_other_ops = [] # type: List[str]
_bool_ops = [] # type: List[str]
_other_ops: List[str] = []
_bool_ops: List[str] = []
_object_ops = ["freq"]
_field_ops = ["days", "seconds", "microseconds", "nanoseconds"]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
from pandas.core.arrays import ExtensionArray
import pandas.core.nanops as nanops

_shared_docs = dict() # type: Dict[str, str]
_shared_docs: Dict[str, str] = dict()
_indexops_doc_kwargs = dict(
klass="IndexOpsMixin",
inplace="",
Expand Down Expand Up @@ -603,7 +603,7 @@ def _is_builtin_func(self, arg):


class ShallowMixin:
_attributes = [] # type: List[str]
_attributes: List[str] = []

def _shallow_copy(self, obj=None, **kwargs):
"""
Expand All @@ -627,7 +627,7 @@ class IndexOpsMixin:

# ndarray compatibility
__array_priority__ = 1000
_deprecations = frozenset(
_deprecations: FrozenSet[str] = frozenset(
[
"tolist", # tolist is not deprecated, just suppressed in the __dir__
"base",
Expand All @@ -637,7 +637,7 @@ class IndexOpsMixin:
"flags",
"strides",
]
) # type: FrozenSet[str]
)

def transpose(self, *args, **kwargs):
"""
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ class BaseExprVisitor(ast.NodeVisitor):
preparser : callable
"""

const_type = Constant # type: Type[Term]
const_type: Type[Term] = Constant
term_type = Term

binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/dtypes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def __from_arrow__(
provided for registering virtual subclasses.
"""

_metadata = () # type: Tuple[str, ...]
_metadata: Tuple[str, ...] = ()

def __str__(self) -> str:
return self.name
Expand Down
Loading