Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
fix lint

fix lint
  • Loading branch information
eric-haibin-lin committed Jul 22, 2017
1 parent dda7893 commit 4bd51ae
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion python/mxnet/kvstore.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import pickle
from .ndarray import NDArray
from .base import _LIB
from .base import check_call, c_array, c_str, string_types, mx_uint, py_str, integer_types
from .base import check_call, c_array, c_str, string_types, mx_uint, py_str
from .base import NDArrayHandle, KVStoreHandle
from . import optimizer as opt

Expand Down
1 change: 0 additions & 1 deletion python/mxnet/module/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import warnings

from .. import context as ctx
from .. import ndarray as nd
from .. import optimizer as opt

from .executor_group import DataParallelExecutorGroup
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/ndarray/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from ..ndarray_doc import _build_doc

# Use different verison of SymbolBase
# Use different version of SymbolBase
# When possible, use cython to speedup part of computation.
# pylint: disable=unused-import
try:
Expand Down
3 changes: 2 additions & 1 deletion python/mxnet/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,8 @@ def create_state(self, index, weight):
"multi-precision doesn't supprot non-default weight yet"
weight_master_copy = array(weight, ctx=weight.context, dtype=numpy.float32)
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=numpy.float32, stype=weight.stype)
momentum = zeros(weight.shape, weight.context, dtype=numpy.float32,
stype=weight.stype)
return (momentum, weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
Expand Down

0 comments on commit 4bd51ae

Please sign in to comment.