Skip to content

Commit

Permalink
Lint error resolved
Browse files Browse the repository at this point in the history
  • Loading branch information
ANSHUMAN TRIPATHY authored and ANSHUMAN TRIPATHY committed Oct 14, 2020
1 parent 575617f commit b68b6b5
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 19 deletions.
6 changes: 5 additions & 1 deletion python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -889,6 +889,7 @@ def _impl(inputs, attr, params, mod):

return _impl


def _sparse_tensor_dense_matmul():
# Sparse utility from Numpy
from scipy import sparse
Expand All @@ -906,7 +907,9 @@ def _impl(inputs, attr, params, mod):
cols = [x[1] for x in indices_tensor]

# Create Numpy sparse Tensor(CSR)
weight_sp = sparse.csr_matrix((values_tensor, (rows, cols)), shape=tuple(dense_shape_tensor.tolist()))
weight_sp = sparse.csr_matrix(
(values_tensor, (rows, cols)), shape=tuple(dense_shape_tensor.tolist())
)
weight_sp = sparse.csr_matrix(weight_sp.transpose())

weight_data = _expr.const(weight_sp.data, weight_sp.data.dtype)
Expand All @@ -924,6 +927,7 @@ def _impl(inputs, attr, params, mod):

return _impl


def _identity():
def _impl(inputs, attr, params, mod):
return inputs[0]
Expand Down
10 changes: 5 additions & 5 deletions python/tvm/topi/cuda/sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,12 +369,12 @@ def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type):
):
if len(inputs[1].data.asnumpy().shape) == 1:
sparse_matrix = sp.csr_matrix(
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
).tobsr()
else :
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
).tobsr()
else:
sparse_matrix = sp.bsr_matrix(
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
)
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
)
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size)
return relay.nn._make.sparse_dense_padded(
Expand Down
38 changes: 25 additions & 13 deletions tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1749,6 +1749,7 @@ def test_forward_batch_matmul():
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)


#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
Expand All @@ -1757,23 +1758,30 @@ def test_forward_batch_matmul():
def _test_sparse_dense_matmul(indices, values, A_shape, B_shape, dtype, flip=False):
""" One iteration of sparse_dense_matmul """

#TODO: Support adjoint options too
# TODO: Support adjoint options too
for adjoint_a in [False]:
for adjoint_b in [False]:
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=[[0, 0], [1, 2]], values=[4., 8.], dense_shape=A_shape)
A_sp = tf.sparse.SparseTensor(
indices=[[0, 0], [1, 2]], values=[4.0, 8.0], dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")

if flip:
result = tf.sparse.sparse_dense_matmul(B, A_sp, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
else:
result = tf.sparse.sparse_dense_matmul(A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)

B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)

#TODO: There is an issue in cuda scheduling for csr, work in progress
# TODO: There is an issue in cuda scheduling for csr, work in progress
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)


def test_forward_sparse_dense_matmul():
""" sparse_dense_matmul op test"""
###################################################################
Expand All @@ -1786,14 +1794,18 @@ def test_forward_sparse_dense_matmul():
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
#------------------------------------------------------------------

#TODO: False case for flip need to be supported
#_test_sparse_dense_matmul([[0, 0], [1, 2]], [4., 8.], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4., 8.], [3, 5], [4, 3], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4., 8.], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3., 6., 9.], [5, 5], [5, 5], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3., 6., 9.], [9, 5], [7, 9], "float32", True)
# ------------------------------------------------------------------

# TODO: False case for flip need to be supported
# _test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 5], [4, 3], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)


#######################################################################
Expand Down

0 comments on commit b68b6b5

Please sign in to comment.