Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added frexp() operator #609

Merged
merged 1 commit into from
Apr 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions docs_input/api/math/misc/frexp.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
.. _frexp_func:

frexp
=====

Return the normalized fraction and exponent part of a floating point number

.. doxygenfunction:: frexp(const OpA &a)

Examples
~~~~~~~~

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin frexp-test-1
:end-before: example-end frexp-test-1
:dedent:

18 changes: 18 additions & 0 deletions docs_input/api/math/misc/frexpc.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
.. _frexpc_func:

frexpc
======

Return the normalized fraction and exponent part of a complex floating point number

.. doxygenfunction:: frexpc(const OpA &a)

Examples
~~~~~~~~

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin frexpc-test-1
:end-before: example-end frexpc-test-1
:dedent:

4 changes: 2 additions & 2 deletions docs_input/api/random/random.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ uses cuRAND on the device to generate random numbers from device code.
.. note::
randomGenerator_t has been deprecated after release 0.5.0. Please use the ``random()`` operator instead

.. doxygenfunction:: matx::random(ShapeType &&s, Distribution_t dist, uint64_t seed = 0, T alpha = 1, T beta = 0)
.. doxygenfunction:: matx::random(const index_t (&s)[RANK], Distribution_t dist, uint64_t seed = 0, T alpha = 1, T beta = 0)
.. doxygenfunction:: matx::random(ShapeType &&s, Distribution_t dist, uint64_t seed = 0, LowerType alpha = 1, LowerType beta = 0)
.. doxygenfunction:: matx::random(const index_t (&s)[RANK], Distribution_t dist, uint64_t seed = 0, LowerType alpha = 1, LowerType beta = 0)

Examples
~~~~~~~~
Expand Down
41 changes: 22 additions & 19 deletions include/matx/generators/random.h
Original file line number Diff line number Diff line change
Expand Up @@ -379,15 +379,16 @@ template <typename T, int RANK> class randomTensorView_t {
template <typename T, typename ShapeType>
class RandomOp : public BaseOp<RandomOp<T, ShapeType>> {
private:
using inner_t = typename inner_op_type_t<T>::type;
static constexpr int RANK = std::tuple_size<ShapeType>{};
Distribution_t dist_;
std::array<index_t, RANK> shape_;
std::array<index_t, RANK> strides_;
index_t total_size_;
curandStatePhilox4_32_10_t *states_;
uint64_t seed_;
T alpha_;
T beta_;
inner_t alpha_;
inner_t beta_;
bool init_ = false;
bool device_;

Expand All @@ -405,7 +406,7 @@ template <typename T, int RANK> class randomTensorView_t {
// Shapeless constructor to be allocated at run invocation
RandomOp() = delete;

inline RandomOp(ShapeType &&s, Distribution_t dist, uint64_t seed, T alpha, T beta) :
inline RandomOp(ShapeType &&s, Distribution_t dist, uint64_t seed, inner_t alpha, inner_t beta) :
dist_(dist), seed_(seed), alpha_(alpha), beta_(beta)
{
total_size_ = std::accumulate(s.begin(), s.end(), 1, std::multiplies<index_t>());
Expand Down Expand Up @@ -514,15 +515,15 @@ template <typename T, int RANK> class randomTensorView_t {
else if constexpr (std::is_same_v<T, double>) {
curandGenerateUniformDouble(gen_, &val, 1);
}
if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
float *tmp = reinterpret_cast<float *>(&val);
curandGenerateUniform(gen_, &val[0], 1);
curandGenerateUniform(gen_, &val[1], 1);
curandGenerateUniform(gen_, &tmp[0], 1);
curandGenerateUniform(gen_, &tmp[1], 1);
}
if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
double *tmp = reinterpret_cast<double *>(&val);
curandGenerateUniformDouble(gen_, &val[0], 1);
curandGenerateUniformDouble(gen_, &val[1], 1);
curandGenerateUniformDouble(gen_, &tmp[0], 1);
curandGenerateUniformDouble(gen_, &tmp[1], 1);
}

val = alpha_ * val + beta_;
Expand All @@ -534,15 +535,15 @@ template <typename T, int RANK> class randomTensorView_t {
else if constexpr (std::is_same_v<T, double>) {
curandGenerateNormalDouble(gen_, &val, 1, beta_, alpha_);
}
if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<float>>) {
float *tmp = reinterpret_cast<float *>(&val);
curandGenerateNormal(gen_, &val[0], 1, beta_, alpha_);
curandGenerateNormal(gen_, &val[1], 1, beta_, alpha_);
curandGenerateNormal(gen_, &tmp[0], 1, beta_, alpha_);
curandGenerateNormal(gen_, &tmp[1], 1, beta_, alpha_);
}
if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
else if constexpr (std::is_same_v<T, cuda::std::complex<double>>) {
double *tmp = reinterpret_cast<double *>(&val);
curandGenerateNormalDouble(gen_, &val[0], 1, beta_, alpha_);
curandGenerateNormalDouble(gen_, &val[1], 1, beta_, alpha_);
curandGenerateNormalDouble(gen_, &tmp[0], 1, beta_, alpha_);
curandGenerateNormalDouble(gen_, &tmp[1], 1, beta_, alpha_);
}
}
#endif
Expand All @@ -565,16 +566,17 @@ template <typename T, int RANK> class randomTensorView_t {
*
* @tparam ShapeType Shape type
* @tparam T Type of output
* @tparam LowerType Either T or the inner type of T if T is complex*
* @param s Shape of operator
* @param dist Distribution (either NORMAL or UNIFORM)
* @param seed Random number seed
* @param alpha Value to multiply by each number
* @param beta Value to add to each number
* @return Random number operator
*/
template <typename T, typename ShapeType,
template <typename T, typename ShapeType, typename LowerType = typename inner_op_type_t<T>::type,
std::enable_if_t<!std::is_array_v<remove_cvref_t<ShapeType>>, bool> = true>
inline auto random(ShapeType &&s, Distribution_t dist, uint64_t seed = 0, T alpha = 1, T beta = 0)
inline auto random(ShapeType &&s, Distribution_t dist, uint64_t seed = 0, LowerType alpha = 1, LowerType beta = 0)
{
using shape_strip_t = remove_cvref_t<ShapeType>;
return detail::RandomOp<T, shape_strip_t>(std::forward<shape_strip_t>(s), dist, seed, alpha, beta);
Expand All @@ -585,15 +587,16 @@ template <typename T, int RANK> class randomTensorView_t {
*
* @tparam RANK Rank of operator
* @tparam T Type of output
* @tparam LowerType Either T or the inner type of T if T is complex
* @param s Array of dimensions
* @param dist Distribution (either NORMAL or UNIFORM)
* @param seed Random number seed
* @param alpha Value to multiply by each number
* @param beta Value to add to each number
* @return Random number operator
*/
template <typename T, int RANK>
inline auto random(const index_t (&s)[RANK], Distribution_t dist, uint64_t seed = 0, T alpha = 1, T beta = 0)
template <typename T, int RANK, typename LowerType = typename inner_op_type_t<T>::type>
inline auto random(const index_t (&s)[RANK], Distribution_t dist, uint64_t seed = 0, LowerType alpha = 1, LowerType beta = 0)
{
auto sarray = detail::to_array(s);
return random<T, decltype(sarray)>(std::move(sarray), dist, seed, alpha, beta);
Expand Down
164 changes: 164 additions & 0 deletions include/matx/operators/frexp.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// COpBright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above cOpBright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above cOpBright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the cOpBright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COpBRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COpBRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////

#pragma once


#include "matx/core/type_utils.h"
#include "matx/operators/base_operator.h"
#include "matx/transforms/solver.h"

namespace matx {

namespace detail {
template<typename OpA, int WHICH>
class FrexpOp : public BaseOp<FrexpOp<OpA, WHICH>>
{
private:
OpA a_;

public:
using matxop = bool;
using scalar_type = typename OpA::scalar_type;

__MATX_INLINE__ std::string str() const { return "frexp()"; }
__MATX_INLINE__ FrexpOp(OpA a) : a_(a) {
static_assert(std::is_floating_point_v<scalar_type> ||
is_cuda_complex_v<scalar_type>, "frexp() must take a floating point input");

};

template <typename... Is>
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto operator()(Is... indices) const
{
[[maybe_unused]] int rexp;
if constexpr (is_cuda_complex_v<scalar_type>) {
if constexpr (std::is_same_v<float, typename scalar_type::value_type>) {
if constexpr (WHICH == 0) { // real fractional
const auto frac = cuda::std::frexpf(a_(indices...).real(), &rexp);
return frac;
} else if constexpr (WHICH == 1) { // real exponent
[[maybe_unused]] const auto frac = cuda::std::frexpf(a_(indices...).real(), &rexp);
return rexp;
} else if constexpr (WHICH == 2) { // imag fractional
const auto frac = cuda::std::frexpf(a_(indices...).imag(), &rexp);
return frac;
} else if constexpr (WHICH == 3) { // imag exponent
[[maybe_unused]] const auto frac = cuda::std::frexpf(a_(indices...).imag(), &rexp);
return rexp;
}
}
else {
if constexpr (WHICH == 0) { // real fractional
const auto frac = cuda::std::frexp(a_(indices...).real(), &rexp);
return frac;
} else if constexpr (WHICH == 1) { // real exponent
[[maybe_unused]] const auto frac = cuda::std::frexp(a_(indices...).real(), &rexp);
return rexp;
} else if constexpr (WHICH == 2) { // imag fractional
const auto frac = cuda::std::frexp(a_(indices...).imag(), &rexp);
return frac;
} else if constexpr (WHICH == 3) { // imag exponent
[[maybe_unused]] const auto frac = cuda::std::frexp(a_(indices...).imag(), &rexp);
return rexp;
}
}
}
else {
if constexpr (std::is_same_v<float, scalar_type>) {
[[maybe_unused]] const float frac = cuda::std::frexpf(a_(indices...), &rexp);
if constexpr (WHICH == 0) { // fractional
return frac;
} else if constexpr (WHICH == 1) { // exponent
return rexp;
}
}
else {
[[maybe_unused]] const double frac = cuda::std::frexp(a_(indices...), &rexp);
if constexpr (WHICH == 0) { // fractional
return frac;
} else if constexpr (WHICH == 1) { // exponent
return rexp;
}
}
}
}

static __MATX_INLINE__ constexpr __MATX_HOST__ __MATX_DEVICE__ int32_t Rank()
{
return OpA::Rank();
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PostRun(ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PostRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}
}

constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const
{
return a_.Size(dim);
}

};
}

template<typename OpA>
__MATX_INLINE__ auto frexp(const OpA &a) {
return std::tuple{
detail::FrexpOp<OpA, 0>(a),
detail::FrexpOp<OpA, 1>(a)
};
}

template<typename OpA>
__MATX_INLINE__ auto frexpc(const OpA &a) {
return std::tuple{
detail::FrexpOp<OpA, 0>(a),
detail::FrexpOp<OpA, 1>(a),
detail::FrexpOp<OpA, 2>(a),
detail::FrexpOp<OpA, 3>(a)
};
}

};

1 change: 1 addition & 0 deletions include/matx/operators/operators.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
#include "matx/operators/fftshift.h"
#include "matx/operators/filter.h"
#include "matx/operators/flatten.h"
#include "matx/operators/frexp.h"
#include "matx/operators/hermitian.h"
#include "matx/operators/hist.h"
#include "matx/operators/if.h"
Expand Down
Loading