Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fixing bugs in documentation. Tests now include tensors of random sha…
Browse files Browse the repository at this point in the history
…pes.
  • Loading branch information
access2rohit committed Jul 26, 2018
1 parent 923f8a8 commit 6879a50
Show file tree
Hide file tree
Showing 5 changed files with 84 additions and 32 deletions.
16 changes: 16 additions & 0 deletions python/mxnet/ndarray/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -1302,6 +1302,22 @@ def flip(self, *args, **kwargs):
"""
return op.flip(self, *args, **kwargs)

def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
return op.depth_to_space(self, *args, **kwargs)

def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
return op.space_to_depth(self, *args, **kwargs)

def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
Expand Down
16 changes: 16 additions & 0 deletions python/mxnet/symbol/symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -2046,6 +2046,22 @@ def flip(self, *args, **kwargs):
"""
return op.flip(self, *args, **kwargs)

def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
return op.depth_to_space(self, *args, **kwargs)

def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
return op.space_to_depth(self, *args, **kwargs)

def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
Expand Down
6 changes: 3 additions & 3 deletions src/operator/tensor/matrix_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -2243,7 +2243,7 @@ MSHADOW_XINLINE void update_index(int index_position, int dim_size, int *idx,
}

/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
Expand Down Expand Up @@ -2361,12 +2361,12 @@ inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2]%block, 0)
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3]%block, 0)
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";

Expand Down
29 changes: 17 additions & 12 deletions src/operator/tensor/matrix_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -910,20 +910,22 @@ NNVM_REGISTER_OP(_backward_squeeze)
.set_attr<FCompute>("FCompute<cpu>", UnaryOp::IdentityCompute<cpu>);

NNVM_REGISTER_OP(depth_to_space)
.describe(R"code(Similar to ONNX DepthToSpace operator:
.describe(R"code(Rearranges(permutes) data from depth into blocks of spatial data.
Similar to ONNX DepthToSpace operator:
https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace.
Rearranges(permutes) data from depth into blocks of spatial data.
The output is a new tensor where the values from depth dimension are moved in spatial blocks
to height and width dimension. The reverse of this operation is ``space_to_depth``.
.. math::
x \prime = reshape(x, [N, block_size, block_size, C / (block_size ^ 2), H * block_size, W * block_size]),
x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2])
y = reshape(x \prime \prime, [N, C / (block ^ 2), H * block_size, W * block_size]\)
\begin{gather*}
x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\
x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\
y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size])
\end{gather*}
where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width]
and :math:`y` is the output tensor of layout :math:`[N, C / (block_size ^ 2), H * block_size, W * block_size]`
and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]`
Example::
Expand Down Expand Up @@ -960,20 +962,23 @@ Example::
.add_arguments(DepthToSpaceParam::__FIELDS__());

NNVM_REGISTER_OP(space_to_depth)
.describe(R"code(Similar to ONNX SpaceToDepth operator:
.describe(R"code(Rearranges(permutes) blocks of spatial data into depth.
Similar to ONNX SpaceToDepth operator:
https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth
Rearranges(permutes) blocks of spatial data into depth.
The output is a new tensor where the values from height and width dimension are
moved to the depth dimension. The reverse of this operation is ``depth_to_space``.
.. math::
x \prime = reshape(x, [N, C, H / block_size, block_size, W / block_size, block_size]),
x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4])
y = reshape(x \prime \prime, [N, C * (block ^ 2), H / block_size, W / block_size]\)
\begin{gather*}
x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\
x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\
y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size])
\end{gather*}
where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width]
and :math:`y` is the output tensor of layout :math:`[N, C * (block ^ 2), H / block, W / block]`
and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]`
Example::
Expand Down
49 changes: 32 additions & 17 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6687,15 +6687,20 @@ def f(x, blocksize):
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y

shape_inp = (1,4,2,3)
block = 2
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)

shape_out = (1,1,4,6)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
Expand All @@ -6704,22 +6709,24 @@ def f(x, blocksize):
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])

def test_invalid_depth_dim():
invalid_shape_inp = (1,3,2,3)
block = 2
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)

def test_invalid_space_dim():
invalid_shape_inp = (1,4,2,3)
block = 2
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)

def test_invalid_block_size():
invalid_shape_inp = (1,0,2,3)
block = 2
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)

test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()

@with_seed()
def test_spacetodepth():
Expand All @@ -6730,15 +6737,21 @@ def f(x, blocksize):
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y

shape_inp = (1,1,4,6)
block = 2
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)

shape_out = (1,4,2,3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
Expand All @@ -6747,22 +6760,24 @@ def f(x, blocksize):
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])

def test_invalid_space_dim():
invalid_shape_inp = (1,1,2,3)
block = 2
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)

def test_invalid_block_size():
invalid_shape_inp = (1,1,4,2)
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)

def test_invalid_depth_dim():
invalid_shape_inp = (1,0,4,2)
block = 2
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)

test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()

if __name__ == '__main__':
import nose
Expand Down

0 comments on commit 6879a50

Please sign in to comment.