diff --git a/include/mxnet/c_api.h b/include/mxnet/c_api.h index bd519fd7f417..608eec5591db 100644 --- a/include/mxnet/c_api.h +++ b/include/mxnet/c_api.h @@ -352,6 +352,17 @@ MXNET_DLL int MXNDArraySyncCopyFromCPU(NDArrayHandle handle, MXNET_DLL int MXNDArraySyncCopyToCPU(NDArrayHandle handle, void *data, size_t size); +/*! + * \brief Copy src.data() to dst.data() if i = -1, else dst.aux_data(i) if i >= 0 + * This function blocks. Do not use it in performance critical code. + * \param handle_dst handle of a dst ndarray whose data/aux_data has been allocated + * \param handle_src handle of a src ndarray which has default storage type + * \param i dst data blob indicator + */ +MXNET_DLL int MXNDArraySyncCopyFromNDArray(NDArrayHandle handle_dst, + const NDArrayHandle handle_src, + const int i); + /*! * \brief Wait until all the pending writes with respect NDArray are finished. * Always call this before read data out synchronizely. @@ -458,12 +469,20 @@ MXNET_DLL int MXNDArrayGetAuxType(NDArrayHandle handle, mx_uint i, int *out_type); -// Get the ith aux data blob wrapped in an NDArray +/*! + * \brief Get a deep copy of the ith aux data blob + * in the form of an NDArray of default storage type. + * This function blocks. Do not use it in performance critical code. + */ MXNET_DLL int MXNDArrayGetAuxNDArray(NDArrayHandle handle, mx_uint i, NDArrayHandle *out); -// Get the data blob wrapped in an NDArray +/*! + * \brief Get a deep copy of the data blob + * in the form of an NDArray of default storage type. + * This function blocks. Do not use it in performance critical code. + */ MXNET_DLL int MXNDArrayGetDataNDArray(NDArrayHandle handle, NDArrayHandle *out); /*! diff --git a/include/mxnet/ndarray.h b/include/mxnet/ndarray.h index 662b45546cb4..df6faa1bc4eb 100644 --- a/include/mxnet/ndarray.h +++ b/include/mxnet/ndarray.h @@ -146,11 +146,9 @@ class NDArray { * make sure the memory region is available through out the life of NDArray * \param data the memory content of static data * \param dev_id the device id this tensor sits at - * \param shared_var the same var handle shared with others. - It will not be deleted during destruction. */ - NDArray(const TBlob &data, int dev_id, Engine::VarHandle shared_var = nullptr) - : ptr_(std::make_shared(data, dev_id, shared_var)), shape_(data.shape_), + NDArray(const TBlob &data, int dev_id) + : ptr_(std::make_shared(data, dev_id)), shape_(data.shape_), dtype_(data.type_flag_), entry_({nullptr, 0, 0}) { #if MKL_EXPERIMENTAL == 1 Mkl_mem_ = std::make_shared(); @@ -166,8 +164,6 @@ class NDArray { * \param data the memory content of static data * \param aux_data the memory content of static aux data * \param dev_id the device id this tensor sits at - * \param shared_var the same var handle shared with others. - It will not be deleted during destruction. */ NDArray(const NDArrayStorageType stype, const TShape &shape, const TBlob &data, const std::vector &aux_data, int dev_id) @@ -423,6 +419,12 @@ class NDArray { * \param size the size of the source array, in sizeof(DType) not raw btyes. */ void SyncCopyFromCPU(const void *data, size_t size) const; + + /*! + * \brief Copy from src.data()/aux_data(i) to this->data()/aux_data(j) + */ + void SyncCopyFromNDArray(const NDArray &src, int i = -1, int j = -1); + /*! * \brief Do a synchronize copy to a continugous CPU memory region. * @@ -448,19 +450,19 @@ class NDArray { * \return idx-th sub array NDArray */ NDArray At(index_t idx) const; - // Wrap the tblob of aux data into an NDArray which shares the same variable with the - // current one. - inline const NDArray aux_ndarray(size_t i) const { - CHECK_NE(storage_type(), kDefaultStorage); - CHECK(i < ptr_->aux_shapes.size()); - return NDArray(aux_data(i), ctx().dev_id, var()); - } - // Wrap the tblob of data into an NDArray which shares the same variable with the - // current one. - inline const NDArray data_ndarray() const { - CHECK_NE(storage_type(), kDefaultStorage); - return NDArray(data(), ctx().dev_id, var()); - } + + /*! + * \brief Generate a deep copy of aux_data(i) returned as + * a default storage type NDArray + */ + NDArray aux_ndarray(size_t i) const; + + /*! + * \brief Generate a deep copy of data() returned as a + * default storage type NDArray + */ + NDArray data_ndarray() const; + /*! * \brief Create a NDArray that shares memory with current one * The new array must have smaller memory size than the current array. @@ -506,6 +508,23 @@ class NDArray { CHECK_EQ(storage_type(), kDefaultStorage); ptr_->CheckAndAlloc(); } + + /*! + * \brief Allocate the space if the allocation has been delayed + * or the requested size is bigger than the available one. + * This function can only be called by ndarray of default + * storage type and effectively changes the ndarray's shape_. + * Note: This function is named as this to avoid overload conflict + * with CheckAndAlloc(const std::vector &aux_shapes), since + * TShape tmp = some_shape is equivalent to TShape tmp = {some_shape}. + */ + void ReshapeAndAlloc(const TShape& shape) { + CHECK_EQ(storage_type(), kDefaultStorage); + CHECK(!is_none()); + shape_ = shape; + ptr_->CheckAndAlloc(shape.Size() * mshadow::mshadow_sizeof(dtype_)); + } + /* ! * \brief Alloc memory for non-default storage * aux_shape is only known at run time @@ -581,8 +600,6 @@ class NDArray { // The shape of aux data. The default value for the shape depends on the type of storage. // If aux_shapes[i].Size() is zero, aux data i is empty. std::vector aux_shapes; - // \brief skip the deletion of var handle. Usually set when shared_var is present. - bool skip_delete_var = false; /*! \brief default cosntructor */ Chunk() : static_data(true), delay_alloc(false) {} @@ -598,17 +615,10 @@ class NDArray { if (!delay_alloc_) this->CheckAndAlloc(); } - Chunk(const TBlob &data, int dev_id, Engine::VarHandle shared_var) + Chunk(const TBlob &data, int dev_id) : static_data(true), delay_alloc(false) { CHECK(storage_type == kDefaultStorage); - // init var - if (shared_var == nullptr) { - var = Engine::Get()->NewVariable(); - } else { - skip_delete_var = true; - var = shared_var; - } - // init ctx + var = Engine::Get()->NewVariable(); if (data.dev_mask() == cpu::kDevMask) { ctx = Context::CPU(); } else { @@ -633,6 +643,9 @@ class NDArray { // aux_handles always reflect the correct number of aux data for (size_t i = 0; i < aux_shapes.size(); i++) { CheckAndAllocAuxData(i, aux_shapes[i]); + // this line is needed in case when aux_shapes[i].Size() = 0 + // aux_handles[i] will not be updated and take only default value. + aux_handles[i].ctx = ctx; } if (!delay_alloc) { CheckAndAllocData(storage_shape, dtype); @@ -677,6 +690,22 @@ class NDArray { delay_alloc = false; } } + + /*! \brief Check and alloc memory for a dense ndarray */ + // size is the number of bytes + void CheckAndAlloc(uint64_t dbytes) { + CHECK_EQ(kDefaultStorage, storage_type); + if (delay_alloc) { + shandle = Storage::Get()->Alloc(dbytes, shandle.ctx); + delay_alloc = false; + } else if (shandle.size < dbytes) { + // free storage if necessary and alloc again + if (shandle.size > 0) Storage::Get()->Free(shandle); + // init storage + shandle = Storage::Get()->Alloc(dbytes, shandle.ctx); + } + } + inline void CheckAndAlloc(const TShape &shape, const std::vector &aux_shapes, int dtype) { // calculate size, perform allocation @@ -740,7 +769,6 @@ class NDArray { } /*! \brief destructor */ ~Chunk() { - if (skip_delete_var) return; bool skip_free = static_data || delay_alloc; Storage::Handle h = this->shandle; std::vector aux_h = this->aux_handles; diff --git a/python/mxnet/ndarray/sparse_ndarray.py b/python/mxnet/ndarray/sparse_ndarray.py index d0f25322f8ce..614090ba963f 100644 --- a/python/mxnet/ndarray/sparse_ndarray.py +++ b/python/mxnet/ndarray/sparse_ndarray.py @@ -23,7 +23,7 @@ from . import ndarray from .ndarray import _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP from .ndarray import _STORAGE_TYPE_STR_TO_ID -from .ndarray import NDArray, _storage_type, _zeros_ndarray +from .ndarray import NDArray, _storage_type, _zeros_ndarray, array from . import cast_storage from . import slice as nd_slice @@ -220,20 +220,19 @@ def _aux_type(self, i): @property def data(self): - """The values array of the SparseNDArray. This is a read-only view of the values array. - They reveal internal implementation details and should be used with care. + """Get a deep copy of the values array of the SparseNDArray. Returns ------- NDArray - This SparseNDArray's values array. + A deep copy of the SparseNDArray's values array. """ return self._data() @property def _num_aux(self): - ''' The number of aux data used to help store the sparse ndarray. - ''' + """The number of aux data used to help store the sparse ndarray. + """ return len(_STORAGE_AUX_TYPES[self.stype]) @property @@ -253,7 +252,6 @@ def _aux_types(self): def asnumpy(self): """Return a dense ``numpy.ndarray`` object with value copied from this array - """ return self.todense().asnumpy() @@ -311,25 +309,27 @@ def copyto(self, other): def todense(self): return todense(self) - def _aux_data(self, i, writable=False): - """ Get an NDArray referencing the ith aux data array associated with the SparseNDArray. + def _aux_data(self, i): + """ Get a deep copy NDArray of the i-th aux data array associated with the SparseNDArray. + This function blocks. Do not use it in performance critical code. """ self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl))) - return NDArray(hdl, writable) + return NDArray(hdl) - def _data(self, writable=False): - """ Get an NDArray referencing the value array associated with the SparseNDArray. + def _data(self): + """ Get a deep copy NDArray of the value array associated with the SparseNDArray. + This function blocks. Do not use it in performance critical code. """ self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl))) - return NDArray(hdl, writable) + return NDArray(hdl) # pylint: disable=abstract-method class CSRNDArray(SparseNDArray): - """A CSRNDArray represents a NDArray as three separate arrays: `values`, + """A CSRNDArray represents a NDArray as three separate arrays: `data`, `indptr` and `indices`. It uses the standard CSR representation where the column indices for row i are stored in indices[indptr[i]:indptr[i+1]] and their corresponding values are stored in values[indptr[i]:indptr[i+1]]. @@ -351,8 +351,8 @@ def __reduce__(self): @property def indices(self): - """The indices array of the SparseNDArray. This is a read-only view of the indices array. - They reveal internal implementation details and should be used with care. + """The indices array of the SparseNDArray with `csr` storage type. + This generates a deep copy of the column indices of the current `csr` matrix. Returns ------- @@ -364,8 +364,7 @@ def indices(self): @property def indptr(self): """The indptr array of the SparseNDArray with `csr` storage type. - This is a read-only view of the indptr array. - They reveal internal implementation details and should be used with care. + This generates a deep copy of the `indptr` of the current `csr` matrix. Returns ------- @@ -405,8 +404,8 @@ def __reduce__(self): @property def indices(self): - """The indices array of the SparseNDArray. This is a read-only view of the indices array. - They reveal internal implementation details and should be used with care. + """The indices array of the SparseNDArray with `row_sparse` storage type. + This generates a deep copy of the row indices of the current row-sparse matrix. Returns ------- @@ -490,22 +489,27 @@ def csr(data, indptr, indices, shape, ctx=None, dtype=None, indptr_type=None, in assert(len(shape) == 2) result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype, [indptr_type, indices_type], aux_shapes)) - # assign indptr, indices and data - data_ref = result._data(True) - indptr_ref = result._aux_data(0, True) - indices_ref = result._aux_data(1, True) - data_ref[:] = data - indptr_ref[:] = indptr - indices_ref[:] = indices + # TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays + # if they are not for now. In the future, we should provide a c-api + # to accept np.ndarray types to copy from to result.data and aux_data + if not isinstance(data, NDArray): + data = array(data, ctx, dtype) + if not isinstance(indptr, NDArray): + indptr = array(indptr, ctx, indptr_type) + if not isinstance(indices, NDArray): + indices = array(indices, ctx, indices_type) + check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1))) + check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0))) + check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1))) return result -def row_sparse(values, indices, shape, ctx=None, dtype=None, indices_type=None): +def row_sparse(data, indices, shape, ctx=None, dtype=None, indices_type=None): """Creates a row sparse array with a set of tensor slices at given indices. Parameters ---------- - values: array_like + data: array_like An object exposing the array interface, with shape [D0, D1, .. Dn], where D0 is the number of rows with non-zeros entries. indices: array_like @@ -513,8 +517,8 @@ def row_sparse(values, indices, shape, ctx=None, dtype=None, indices_type=None): ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional - The data type of the output array. The default dtype is ``values.dtype`` - if `values` is an `NDArray`, `float32` otherwise. + The data type of the output array. The default dtype is ``data.dtype`` + if `data` is an `NDArray`, `float32` otherwise. indices_type: str or numpy.dtype, optional The data type of the indices array. The default dtype is ``indices.dtype`` if `indicies` is an `NDArray`, `int32` otherwise. @@ -540,21 +544,26 @@ def row_sparse(values, indices, shape, ctx=None, dtype=None, indices_type=None): if ctx is None: ctx = Context.default_ctx # prepare src array and types - values, dtype = _prepare_src_array(values, dtype, mx_real_t) + data, dtype = _prepare_src_array(data, dtype, mx_real_t) indices, indices_type = _prepare_src_array(indices, indices_type, _STORAGE_AUX_TYPES[storage_type][0]) # verify types assert('int64' in str(indices_type)), "expected int64 for indices" # verify shapes - assert(values.ndim == len(shape)) + assert(data.ndim == len(shape)) assert(indices.ndim == 1) result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype, [indices_type], [indices.shape])) - # assign indices and values - values_ref = result._data(True) - indices_ref = result._aux_data(0, True) - values_ref[:] = values - indices_ref[:] = indices + + # TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays + # if they are not for now. In the future, we should provide a c-api + # to accept np.ndarray types to copy from to result.data and aux_data + if not isinstance(data, NDArray): + data = array(data, ctx, dtype) + if not isinstance(indices, NDArray): + indices = array(indices, ctx, indices_type) + check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1))) + check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0))) return result diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index f2472f93371e..1f7335a2928b 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -230,6 +230,23 @@ int MXNDArraySyncCopyToCPU(NDArrayHandle handle, API_END(); } +/*! + * \brief Copy src.data() to dst.data() if i = -1, else dst.aux_data(i) if i >= 0 + * This function blocks. Do not use it in performance critical code. + * \param handle_dst handle of a dst ndarray whose data/aux_data has been allocated + * \param handle_src handle of a src ndarray which has default storage type + * \param i dst data blob indicator + */ +int MXNDArraySyncCopyFromNDArray(NDArrayHandle handle_dst, + const NDArrayHandle handle_src, + const int i) { + API_BEGIN(); + NDArray* dst = static_cast(handle_dst); + NDArray* src = static_cast(handle_src); + dst->SyncCopyFromNDArray(*src, -1, i); + API_END(); +} + int MXNDArrayWaitToRead(NDArrayHandle handle) { API_BEGIN(); static_cast(handle)->WaitToRead(); @@ -436,6 +453,11 @@ int MXNDArrayGetAuxType(NDArrayHandle handle, API_END(); } +/*! + * \brief Get a deep copy of the ith aux data blob + * in the form of an NDArray of default storage type. + * This function blocks. Do not use it in performance critical code. + */ int MXNDArrayGetAuxNDArray(NDArrayHandle handle, mx_uint i, NDArrayHandle *out) { @@ -445,6 +467,11 @@ int MXNDArrayGetAuxNDArray(NDArrayHandle handle, API_END(); } +/*! + * \brief Get a deep copy of the data blob + * in the form of an NDArray of default storage type. + * This function blocks. Do not use it in performance critical code. + */ int MXNDArrayGetDataNDArray(NDArrayHandle handle, NDArrayHandle *out) { API_BEGIN(); diff --git a/src/ndarray/ndarray.cc b/src/ndarray/ndarray.cc index 80c1689dfba1..4a402def143d 100644 --- a/src/ndarray/ndarray.cc +++ b/src/ndarray/ndarray.cc @@ -106,6 +106,24 @@ NDArray NDArray::At(index_t idx) const { } } +/*! + * \brief Return deep copy of the current ndarry's aux_data(i) + * as an NDArray of default storage type. This function blocks. + */ +NDArray NDArray::aux_ndarray(size_t i) const { + CHECK_NE(storage_type(), kDefaultStorage); + CHECK(i < ptr_->aux_shapes.size()); + // create a delay_alloc default ndarray as output + NDArray ret(TShape(), ctx(), true, aux_type(i)); + ret.SyncCopyFromNDArray(*this, i); + return ret; +} + +NDArray NDArray::data_ndarray() const { + NDArray ret(TShape(), ctx(), true, dtype_); + ret.SyncCopyFromNDArray(*this); + return ret; +} bool NDArray::fresh_out_grad() const { if (entry_.ag_node != nullptr) return entry_.ag_node->fresh_out_grad; @@ -1027,6 +1045,101 @@ void NDArray::SyncCopyFromCPU(const void *data, size_t size) const { } } +/*! + * \brief Copy src.data()/aux_data(i) to dst->data()/aux_data(j). + */ +void NDArray::SyncCopyFromNDArray(const NDArray& src, int i, int j) { + if (i >= 0) { + CHECK_NE(src.storage_type(), kDefaultStorage); + } else { + CHECK(!src.is_none()) << "src dense ndarray must have been initialized"; + } + if (j >= 0) { + CHECK_NE(storage_type(), kDefaultStorage); + } else { + CHECK(!this->is_none()) << "dst dense ndarray must have been initialized"; + } + + if (src.var() == var()) { + // skip to copy to itself + LOG(WARNING) << "SyncCopyFromNDArray does not support copying to self"; + return; + } + const int src_dev_mask = src.ctx().dev_mask(); + const int dst_dev_mask = ctx().dev_mask(); + std::vector const_vars; + const_vars.push_back(src.var()); + + // get or create a dst tblob for copying src to it + // if dst is a dense format and has not been allocated, allocate memory for it + // else if dst is not initialized, allocate corresponding data blob for it + auto get_dst_data = [&](const TShape& src_shape) { + if (this->storage_type() == kDefaultStorage) { + this->ReshapeAndAlloc(src_shape); + } else if (!this->storage_initialized()) { + if (j < 0) { + this->CheckAndAllocData(src_shape); + } else { + this->CheckAndAllocAuxData(j, src_shape); + } + } + TBlob dst_data = (j >= 0? this->aux_data(j) : this->data()); + CHECK_LE(src_shape.Size(), dst_data.shape_.Size()); + return dst_data; + }; + + if (src_dev_mask == cpu::kDevMask && dst_dev_mask == cpu::kDevMask) { + Engine::Get()->PushSync([&](RunContext rctx) { + const TBlob src_data = (i >= 0? src.aux_data(i) : src.data()); + TBlob dst_data = get_dst_data(src_data.shape_); + ndarray::Copy(src_data, &dst_data, src.ctx(), this->ctx(), rctx); + }, this->ctx(), const_vars, {this->var()}, + FnProperty::kNormal, 0, PROFILER_MESSAGE("SyncCopyFromNDArrayCPU2CPU")); + } else { +#if MXNET_USE_CUDA + if (src_dev_mask == cpu::kDevMask && dst_dev_mask == gpu::kDevMask) { + Engine::Get()->PushSync([&](RunContext rctx) { + const TBlob src_data = (i >= 0? src.aux_data(i) : src.data()); + TBlob dst_data = get_dst_data(src_data.shape_); + ndarray::Copy(src_data, &dst_data, src.ctx(), this->ctx(), rctx); + rctx.get_stream()->Wait(); + }, this->ctx(), const_vars, {this->var()}, + FnProperty::kCopyToGPU, 0, PROFILER_MESSAGE("SyncCopyFromNDArrayCPU2GPU")); + } else if (src_dev_mask == gpu::kDevMask && dst_dev_mask == cpu::kDevMask) { + Engine::Get()->PushSync([&](RunContext rctx) { + const TBlob src_data = (i >= 0? src.aux_data(i) : src.data()); + TBlob dst_data = get_dst_data(src_data.shape_); + ndarray::Copy(src_data, &dst_data, src.ctx(), this->ctx(), rctx); + rctx.get_stream()->Wait(); + }, this->ctx(), const_vars, {this->var()}, + FnProperty::kCopyFromGPU, 0, PROFILER_MESSAGE("SyncCopyFromNDArrayGPU2CPU")); + } else if (src_dev_mask == gpu::kDevMask && dst_dev_mask == gpu::kDevMask) { + Engine::Get()->PushSync([&](RunContext rctx) { + const TBlob src_data = (i >= 0? src.aux_data(i) : src.data()); + TBlob dst_data = get_dst_data(src_data.shape_); + ndarray::Copy(src_data, &dst_data, src.ctx(), this->ctx(), rctx); + rctx.get_stream()->Wait(); + }, this->ctx(), const_vars, {this->var()}, + src.dtype() != this->dtype() ? FnProperty::kNormal : FnProperty::kCopyFromGPU, + 0, PROFILER_MESSAGE("SyncCopyFromNDArrayGPU2GPU")); + } else { + LOG(FATAL) << "unknown device mask"; + } +#else + LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; +#endif + } + // The copy operation was pushed to engine to execute. + // Need to wait here for it being completed. + // The reason for pushing the copy operation to engine + // is because when copying data from a sparse tensor + // to the current one, that sparse ndarray's storage_shape/aux_shape + // may not be ready or changed and we need to ensure + // thread safty for reading the correct shape info to allocate + // memory for the current ndarray. + WaitToRead(); +} + void NDArray::SyncCopyToCPU(void *data, size_t size) const { TShape dshape = this->shape(); CHECK_EQ(dshape.Size(), size) diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 87957812240c..9cd041201e51 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -6,6 +6,7 @@ from test_optimizer import * from test_random import * from test_sparse_operator import test_sparse_nd_zeros, test_sparse_dot +from test_sparse_ndarray import test_create_csr, test_create_row_sparse import mxnet as mx import numpy as np from mxnet.test_utils import check_consistency, set_default_context diff --git a/tests/python/unittest/test_sparse_ndarray.py b/tests/python/unittest/test_sparse_ndarray.py index e88057f16ba4..0189b84288e0 100644 --- a/tests/python/unittest/test_sparse_ndarray.py +++ b/tests/python/unittest/test_sparse_ndarray.py @@ -378,6 +378,38 @@ def test_sparse_ndarray_save_load(): os.remove(fname) +def test_create_csr(): + dim0 = 50 + dim1 = 50 + densities = [0, 0.01, 0.1, 0.2, 0.5] + for density in densities: + shape = rand_shape_2d(dim0, dim1) + matrix = rand_ndarray(shape, 'csr', density) + data = matrix.data + indptr = matrix.indptr + indices = matrix.indices + csr_created = mx.nd.csr(data=data, indptr=indptr, indices=indices, shape=shape) + assert csr_created.stype == 'csr' + assert same(csr_created.data.asnumpy(), data.asnumpy()) + assert same(csr_created.indptr.asnumpy(), indptr.asnumpy()) + assert same(csr_created.indices.asnumpy(), indices.asnumpy()) + + +def test_create_row_sparse(): + dim0 = 50 + dim1 = 50 + densities = [0, 0.01, 0.1, 0.2, 0.5] + for density in densities: + shape = rand_shape_2d(dim0, dim1) + matrix = rand_ndarray(shape, 'row_sparse', density) + data = matrix.data + indices = matrix.indices + rsp_created = mx.nd.row_sparse(data=data, indices=indices, shape=shape) + assert rsp_created.stype == 'row_sparse' + assert same(rsp_created.data.asnumpy(), data.asnumpy()) + assert same(rsp_created.indices.asnumpy(), indices.asnumpy()) + + if __name__ == '__main__': import nose nose.runmodule()