From 57c6d04cfbdfa86632cd8fa092a03f0bedcdcf93 Mon Sep 17 00:00:00 2001 From: ricardoV94 Date: Wed, 9 Oct 2024 13:07:51 +0200 Subject: [PATCH] Remove `_asarray` --- pytensor/misc/safe_asarray.py | 57 -------------------- pytensor/scalar/basic.py | 13 ++--- pytensor/sparse/basic.py | 23 ++++----- pytensor/sparse/rewriting.py | 4 +- pytensor/tensor/basic.py | 7 ++- pytensor/tensor/elemwise.py | 3 +- pytensor/tensor/extra_ops.py | 5 +- pytensor/tensor/math.py | 31 ++++++----- pytensor/tensor/random/op.py | 3 +- pytensor/tensor/rewriting/math.py | 9 ++-- pytensor/tensor/shape.py | 5 +- pytensor/tensor/sharedvar.py | 3 +- pytensor/tensor/sort.py | 3 +- pytensor/tensor/subtensor.py | 3 +- pytensor/tensor/type.py | 11 ++-- tests/compile/function/test_pfunc.py | 3 +- tests/compile/test_shared.py | 3 +- tests/misc/test_may_share_memory.py | 3 +- tests/scan/test_basic.py | 3 +- tests/sparse/test_basic.py | 5 +- tests/tensor/rewriting/test_elemwise.py | 9 ++-- tests/tensor/rewriting/test_math.py | 69 ++++++++++++------------- tests/tensor/test_basic.py | 27 +++++----- tests/tensor/test_blas.py | 11 ++-- tests/tensor/test_blas_c.py | 5 +- tests/tensor/test_casting.py | 3 +- tests/tensor/test_inplace.py | 5 +- tests/tensor/test_math.py | 15 +++--- tests/tensor/test_shape.py | 9 ++-- tests/tensor/utils.py | 3 +- 30 files changed, 135 insertions(+), 218 deletions(-) delete mode 100644 pytensor/misc/safe_asarray.py diff --git a/pytensor/misc/safe_asarray.py b/pytensor/misc/safe_asarray.py deleted file mode 100644 index 1793070264..0000000000 --- a/pytensor/misc/safe_asarray.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Helper function to safely convert an array to a new data type. -""" - -import numpy as np - -from pytensor.configdefaults import config - - -__docformat__ = "restructuredtext en" - - -def _asarray(a, dtype, order=None): - """Convert the input to a Numpy array. - - This function is almost identical to ``numpy.asarray``, but it should be - used instead of its numpy counterpart when a data type is provided in - order to perform type conversion if required. - The reason is that ``numpy.asarray`` may not actually update the array's - data type to the user-provided type. For more information see ticket - http://projects.scipy.org/numpy/ticket/870. - - In that case, we check that both dtype have the same string - description (byte order, basic type, and number of bytes), and - return a view with the desired dtype. - - This function's name starts with a '_' to indicate that it is meant to be - used internally. It is imported so as to be available directly through - _asarray - """ - if str(dtype) == "floatX": - dtype = config.floatX - dtype = np.dtype(dtype) # Convert into dtype object. - rval = np.asarray(a, order=order).astype(dtype) - # Note that dtype comparison must be done by comparing their `num` - # attribute. One cannot assume that two identical data types are pointers - # towards the same object (e.g. under Windows this appears not to be the - # case). - if rval.dtype.num != dtype.num: - # Type mismatch between the data type we asked for, and the one - # returned by numpy.asarray. - # If both types have the same string description (byte order, basic - # type, and number of bytes), then it is safe to return a view. - if dtype.str == rval.dtype.str: - # Silent fix. - return rval.view(dtype=dtype) - else: - # Unexpected mismatch: better know what is going on! - raise TypeError( - "numpy.array did not return the data type we " - f"asked for ({dtype} {dtype.str} #{dtype.num}), instead it returned type " - f"{rval.dtype} {rval.str} #{rval.dtype.num}: function " - "_asarray may need to be modified to handle this " - "data type." - ) - else: - return rval diff --git a/pytensor/scalar/basic.py b/pytensor/scalar/basic.py index 985cc1bc6a..8be61f61fa 100644 --- a/pytensor/scalar/basic.py +++ b/pytensor/scalar/basic.py @@ -32,7 +32,6 @@ from pytensor.graph.utils import MetaObject, MethodNotDefined from pytensor.link.c.op import COp from pytensor.link.c.type import CType -from pytensor.misc.safe_asarray import _asarray from pytensor.printing import pprint from pytensor.utils import ( apply_across_args, @@ -150,7 +149,7 @@ def __call__(self, x): and rval.dtype in ("float64", "float32") and rval.dtype != config.floatX ): - rval = _asarray(rval, dtype=config.floatX) + rval = np.asarray(rval, dtype=config.floatX) return rval # The following is the original code, corresponding to the 'custom' @@ -176,7 +175,7 @@ def __call__(self, x): and config.floatX in self.dtypes and config.floatX != "float64" ): - return _asarray(x, dtype=config.floatX) + return np.asarray(x, dtype=config.floatX) # Don't autocast to float16 unless config.floatX is float16 try_dtypes = [ @@ -184,7 +183,7 @@ def __call__(self, x): ] for dtype in try_dtypes: - x_ = _asarray(x, dtype=dtype) + x_ = np.asarray(x).astype(dtype=dtype) if np.all(x == x_): break # returns either an exact x_==x, or the last cast x_ @@ -245,7 +244,9 @@ def convert(x, dtype=None): if dtype is not None: # in this case, the semantics are that the caller is forcing the dtype - x_ = _asarray(x, dtype=dtype) + if dtype == "floatX": + dtype = config.floatX + x_ = np.asarray(x).astype(dtype) else: # In this case, this function should infer the dtype according to the # autocasting rules. See autocasting above. @@ -256,7 +257,7 @@ def convert(x, dtype=None): except OverflowError: # This is to imitate numpy behavior which tries to fit # bigger numbers into a uint64. - x_ = _asarray(x, dtype="uint64") + x_ = np.asarray(x, dtype="uint64") elif isinstance(x, builtins.float): x_ = autocast_float(x) elif isinstance(x, np.ndarray): diff --git a/pytensor/sparse/basic.py b/pytensor/sparse/basic.py index a1f7fd5b13..1a3ca4ffdf 100644 --- a/pytensor/sparse/basic.py +++ b/pytensor/sparse/basic.py @@ -24,7 +24,6 @@ from pytensor.graph.op import Op from pytensor.link.c.op import COp from pytensor.link.c.type import generic -from pytensor.misc.safe_asarray import _asarray from pytensor.sparse.type import SparseTensorType, _is_sparse from pytensor.sparse.utils import hash_from_sparse from pytensor.tensor import basic as ptb @@ -595,11 +594,11 @@ def perform(self, node, inputs, out): (csm,) = inputs out[0][0] = csm.data if str(csm.data.dtype) == "int32": - out[0][0] = _asarray(out[0][0], dtype="int32") + out[0][0] = np.asarray(out[0][0], dtype="int32") # backport - out[1][0] = _asarray(csm.indices, dtype="int32") - out[2][0] = _asarray(csm.indptr, dtype="int32") - out[3][0] = _asarray(csm.shape, dtype="int32") + out[1][0] = np.asarray(csm.indices, dtype="int32") + out[2][0] = np.asarray(csm.indptr, dtype="int32") + out[3][0] = np.asarray(csm.shape, dtype="int32") def grad(self, inputs, g): # g[1:] is all integers, so their Jacobian in this op @@ -698,17 +697,17 @@ def make_node(self, data, indices, indptr, shape): if not isinstance(indices, Variable): indices_ = np.asarray(indices) - indices_32 = _asarray(indices, dtype="int32") + indices_32 = np.asarray(indices, dtype="int32") assert (indices_ == indices_32).all() indices = indices_32 if not isinstance(indptr, Variable): indptr_ = np.asarray(indptr) - indptr_32 = _asarray(indptr, dtype="int32") + indptr_32 = np.asarray(indptr, dtype="int32") assert (indptr_ == indptr_32).all() indptr = indptr_32 if not isinstance(shape, Variable): shape_ = np.asarray(shape) - shape_32 = _asarray(shape, dtype="int32") + shape_32 = np.asarray(shape, dtype="int32") assert (shape_ == shape_32).all() shape = shape_32 @@ -1461,7 +1460,7 @@ def perform(self, node, inputs, outputs): (x, ind1, ind2) = inputs (out,) = outputs assert _is_sparse(x) - out[0] = _asarray(x[ind1, ind2], x.dtype) + out[0] = np.asarray(x[ind1, ind2], x.dtype) get_item_scalar = GetItemScalar() @@ -2142,7 +2141,7 @@ def perform(self, node, inputs, outputs): # The asarray is needed as in some case, this return a # numpy.matrixlib.defmatrix.matrix object and not an ndarray. - out[0] = _asarray(x + y, dtype=node.outputs[0].type.dtype) + out[0] = np.asarray(x + y, dtype=node.outputs[0].type.dtype) def grad(self, inputs, gout): (x, y) = inputs @@ -3497,7 +3496,7 @@ def perform(self, node, inputs, outputs): # The cast is needed as otherwise we hit the bug mentioned into # _asarray function documentation. - out[0] = _asarray(variable, str(variable.dtype)) + out[0] = np.asarray(variable, str(variable.dtype)) def grad(self, inputs, gout): # a is sparse, b is dense, g_out is dense @@ -4012,7 +4011,7 @@ def perform(self, node, inputs, out): if x_is_sparse and y_is_sparse: rval = rval.toarray() - out[0] = _asarray(rval, dtype=node.outputs[0].dtype) + out[0] = np.asarray(rval, dtype=node.outputs[0].dtype) def grad(self, inputs, gout): (x, y) = inputs diff --git a/pytensor/sparse/rewriting.py b/pytensor/sparse/rewriting.py index c972b16114..5463e6251c 100644 --- a/pytensor/sparse/rewriting.py +++ b/pytensor/sparse/rewriting.py @@ -1,3 +1,4 @@ +import numpy as np import scipy import pytensor @@ -10,7 +11,6 @@ node_rewriter, ) from pytensor.link.c.op import COp, _NoPythonCOp -from pytensor.misc.safe_asarray import _asarray from pytensor.sparse import basic as sparse from pytensor.sparse.basic import ( CSC, @@ -283,7 +283,7 @@ def perform(self, node, inputs, outputs): (a_val, a_ind, a_ptr), (a_nrows, b.shape[0]), copy=False ) # out[0] = a.dot(b) - out[0] = _asarray(a * b, dtype=node.outputs[0].type.dtype) + out[0] = np.asarray(a * b, dtype=node.outputs[0].type.dtype) assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense def c_code(self, node, name, inputs, outputs, sub): diff --git a/pytensor/tensor/basic.py b/pytensor/tensor/basic.py index 8218accdf4..7d5236d04a 100644 --- a/pytensor/tensor/basic.py +++ b/pytensor/tensor/basic.py @@ -32,7 +32,6 @@ from pytensor.graph.type import HasShape, Type from pytensor.link.c.op import COp from pytensor.link.c.params_type import ParamsType -from pytensor.misc.safe_asarray import _asarray from pytensor.printing import Printer, min_informative_str, pprint, set_precedence from pytensor.raise_op import CheckAndRaise, assert_op from pytensor.scalar import int32 @@ -512,7 +511,7 @@ def get_underlying_scalar_constant_value( ret = v.owner.inputs[0].owner.inputs[idx] ret = get_underlying_scalar_constant_value(ret, max_recur=max_recur) # MakeVector can cast implicitly its input in some case. - return _asarray(ret, dtype=v.type.dtype) + return np.asarray(ret, dtype=v.type.dtype) # This is needed when we take the grad as the Shape op # are not already changed into MakeVector @@ -1834,7 +1833,7 @@ def perform(self, node, inputs, out_): (out,) = out_ # not calling pytensor._asarray as optimization if (out[0] is None) or (out[0].size != len(inputs)): - out[0] = _asarray(inputs, dtype=node.outputs[0].dtype) + out[0] = np.asarray(inputs, dtype=node.outputs[0].dtype) else: # assume that out has correct dtype. there is no cheap way to check out[0][...] = inputs @@ -2537,7 +2536,7 @@ def perform(self, node, axis_and_tensors, out_): f"Join axis {int(axis)} out of bounds [0, {int(ndim)})" ) - out[0] = _asarray( + out[0] = np.asarray( np.concatenate(tens, axis=axis), dtype=node.outputs[0].type.dtype ) diff --git a/pytensor/tensor/elemwise.py b/pytensor/tensor/elemwise.py index 53302c28c4..a51c2034af 100644 --- a/pytensor/tensor/elemwise.py +++ b/pytensor/tensor/elemwise.py @@ -17,7 +17,6 @@ from pytensor.link.c.op import COp, ExternalCOp, OpenMPOp from pytensor.link.c.params_type import ParamsType from pytensor.misc.frozendict import frozendict -from pytensor.misc.safe_asarray import _asarray from pytensor.printing import Printer, pprint from pytensor.scalar import get_scalar_type from pytensor.scalar.basic import bool as scalar_bool @@ -1412,7 +1411,7 @@ def perform(self, node, inp, out): out = self.ufunc.reduce(input, axis=axis, dtype=acc_dtype) - output[0] = _asarray(out, dtype=out_dtype) + output[0] = np.asarray(out, dtype=out_dtype) def infer_shape(self, fgraph, node, shapes): (ishape,) = shapes diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index 6f181062de..9de2b3f938 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -17,7 +17,6 @@ from pytensor.link.c.op import COp from pytensor.link.c.params_type import ParamsType from pytensor.link.c.type import EnumList, Generic -from pytensor.misc.safe_asarray import _asarray from pytensor.raise_op import Assert from pytensor.scalar import int32 as int_t from pytensor.scalar import upcast @@ -1307,7 +1306,7 @@ def perform(self, node, inp, out): res = np.unravel_index(indices, dims, order=self.order) assert len(res) == len(out) for i in range(len(out)): - ret = _asarray(res[i], node.outputs[0].dtype) + ret = np.asarray(res[i], node.outputs[0].dtype) if ret.base is not None: # NumPy will return a view when it can. # But we don't want that. @@ -1382,7 +1381,7 @@ def infer_shape(self, fgraph, node, input_shapes): def perform(self, node, inp, out): multi_index, dims = inp[:-1], inp[-1] res = np.ravel_multi_index(multi_index, dims, mode=self.mode, order=self.order) - out[0][0] = _asarray(res, node.outputs[0].dtype) + out[0][0] = np.asarray(res, node.outputs[0].dtype) def ravel_multi_index(multi_index, dims, mode="raise", order="C"): diff --git a/pytensor/tensor/math.py b/pytensor/tensor/math.py index d1aa438216..57d0c0364b 100644 --- a/pytensor/tensor/math.py +++ b/pytensor/tensor/math.py @@ -14,7 +14,6 @@ from pytensor.graph.replace import _vectorize_node from pytensor.link.c.op import COp from pytensor.link.c.params_type import ParamsType -from pytensor.misc.safe_asarray import _asarray from pytensor.printing import pprint from pytensor.raise_op import Assert from pytensor.scalar.basic import BinaryScalarOp @@ -202,7 +201,7 @@ def perform(self, node, inp, outs): new_shape = (*kept_shape, np.prod(reduced_shape, dtype="int64")) reshaped_x = transposed_x.reshape(new_shape) - max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype="int64") + max_idx[0] = np.asarray(np.argmax(reshaped_x, axis=-1), dtype="int64") def c_code(self, node, name, inp, out, sub): (x,) = inp @@ -730,32 +729,32 @@ def isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): -------- >>> import pytensor >>> import numpy as np - >>> a = _asarray([1e10, 1e-7], dtype="float64") - >>> b = _asarray([1.00001e10, 1e-8], dtype="float64") + >>> a = np.array([1e10, 1e-7], dtype="float64") + >>> b = np.array([1.00001e10, 1e-8], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() array([ True, False]) - >>> a = _asarray([1e10, 1e-8], dtype="float64") - >>> b = _asarray([1.00001e10, 1e-9], dtype="float64") + >>> a = np.array([1e10, 1e-8], dtype="float64") + >>> b = np.array([1.00001e10, 1e-9], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() array([ True, True]) - >>> a = _asarray([1e10, 1e-8], dtype="float64") - >>> b = _asarray([1.0001e10, 1e-9], dtype="float64") + >>> a = np.array([1e10, 1e-8], dtype="float64") + >>> b = np.array([1.0001e10, 1e-9], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() array([False, True]) - >>> a = _asarray([1.0, np.nan], dtype="float64") - >>> b = _asarray([1.0, np.nan], dtype="float64") + >>> a = np.array([1.0, np.nan], dtype="float64") + >>> b = np.array([1.0, np.nan], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() array([ True, False]) - >>> a = _asarray([1.0, np.nan], dtype="float64") - >>> b = _asarray([1.0, np.nan], dtype="float64") + >>> a = np.array([1.0, np.nan], dtype="float64") + >>> b = np.array([1.0, np.nan], dtype="float64") >>> pytensor.tensor.isclose(a, b, equal_nan=True).eval() array([ True, True]) - >>> a = _asarray([1.0, np.inf], dtype="float64") - >>> b = _asarray([1.0, -np.inf], dtype="float64") + >>> a = np.array([1.0, np.inf], dtype="float64") + >>> b = np.array([1.0, -np.inf], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() array([ True, False]) - >>> a = _asarray([1.0, np.inf], dtype="float64") - >>> b = _asarray([1.0, np.inf], dtype="float64") + >>> a = np.array([1.0, np.inf], dtype="float64") + >>> b = np.array([1.0, np.inf], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() array([ True, True]) diff --git a/pytensor/tensor/random/op.py b/pytensor/tensor/random/op.py index ba400454cd..e43dfaa222 100644 --- a/pytensor/tensor/random/op.py +++ b/pytensor/tensor/random/op.py @@ -10,7 +10,6 @@ from pytensor.graph.basic import Apply, Variable, equal_computations from pytensor.graph.op import Op from pytensor.graph.replace import _vectorize_node -from pytensor.misc.safe_asarray import _asarray from pytensor.scalar import ScalarVariable from pytensor.tensor.basic import ( as_tensor_variable, @@ -403,7 +402,7 @@ def perform(self, node, inputs, outputs): smpl_val = self.rng_fn(rng, *([*args, size])) if not isinstance(smpl_val, np.ndarray) or str(smpl_val.dtype) != self.dtype: - smpl_val = _asarray(smpl_val, dtype=self.dtype) + smpl_val = np.asarray(smpl_val, dtype=self.dtype) smpl_out[0] = smpl_val diff --git a/pytensor/tensor/rewriting/math.py b/pytensor/tensor/rewriting/math.py index 6568bcdf3e..2e30e1399b 100644 --- a/pytensor/tensor/rewriting/math.py +++ b/pytensor/tensor/rewriting/math.py @@ -19,7 +19,6 @@ node_rewriter, ) from pytensor.graph.rewriting.utils import get_clients_at_depth -from pytensor.misc.safe_asarray import _asarray from pytensor.raise_op import assert_op from pytensor.tensor.basic import ( Alloc, @@ -1205,7 +1204,7 @@ def mul_calculate(num, denum, aslist=False, out_type=None): out_dtype = ps.upcast(*[v.dtype for v in (num + denum)]) else: out_dtype = out_type.dtype - one = _asarray(1, dtype=out_dtype) + one = np.asarray(1, dtype=out_dtype) v = reduce(np.multiply, num, one) / reduce(np.multiply, denum, one) if aslist: @@ -1878,7 +1877,7 @@ def local_mul_zero(fgraph, node): # print 'MUL by value', value, node.inputs if value == 0: # print '... returning zeros' - return [broadcast_arrays(_asarray(0, dtype=otype.dtype), *node.inputs)[0]] + return [broadcast_arrays(np.asarray(0, dtype=otype.dtype), *node.inputs)[0]] # TODO: Add this to the canonicalization to reduce redundancy. @@ -2353,8 +2352,8 @@ def add_calculate(num, denum, aslist=False, out_type=None): if out_type is None: zero = 0.0 else: - zero = _asarray(0, dtype=out_type.dtype) - # zero = 0.0 if out_type is None else _asarray(0, + zero = np.asarray(0, dtype=out_type.dtype) + # zero = 0.0 if out_type is None else np.asarray(0, # dtype=out_type.dtype) if out_type and out_type.dtype == "bool": if len(denum) == 0: diff --git a/pytensor/tensor/shape.py b/pytensor/tensor/shape.py index 4d14768a13..2193c11575 100644 --- a/pytensor/tensor/shape.py +++ b/pytensor/tensor/shape.py @@ -14,7 +14,6 @@ from pytensor.graph.type import HasShape from pytensor.link.c.op import COp from pytensor.link.c.params_type import ParamsType -from pytensor.misc.safe_asarray import _asarray from pytensor.scalar import int32 from pytensor.tensor import _get_vector_length, as_tensor_variable, get_vector_length from pytensor.tensor import basic as ptb @@ -81,7 +80,7 @@ def make_node(self, x): def perform(self, node, inp, out_): (x,) = inp (out,) = out_ - out[0] = _asarray(np.shape(x), dtype="int64") + out[0] = np.asarray(np.shape(x), dtype="int64") def infer_shape(self, fgraph, node, in_shapes): return [[len(in_shapes[0])]] @@ -258,7 +257,7 @@ def perform(self, node, inp, out_): (x,) = inp (out,) = out_ if out[0] is None: - out[0] = _asarray(np.shape(x)[self.i], dtype="int64") + out[0] = np.asarray(np.shape(x)[self.i], dtype="int64") else: out[0][...] = np.shape(x)[self.i] diff --git a/pytensor/tensor/sharedvar.py b/pytensor/tensor/sharedvar.py index dad1751f9b..7ce15c2728 100644 --- a/pytensor/tensor/sharedvar.py +++ b/pytensor/tensor/sharedvar.py @@ -3,7 +3,6 @@ import numpy as np from pytensor.compile import SharedVariable, shared_constructor -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor import _get_vector_length from pytensor.tensor.type import TensorType from pytensor.tensor.variable import TensorVariable @@ -128,7 +127,7 @@ def scalar_constructor( dtype = np.asarray(value).dtype dtype = str(dtype) - value = _asarray(value, dtype=dtype) + value = np.asarray(value, dtype=dtype) tensor_type = TensorType(dtype=str(value.dtype), shape=()) # Do not pass the dtype to asarray because we want this to fail if diff --git a/pytensor/tensor/sort.py b/pytensor/tensor/sort.py index edcead0227..aae0e2b66e 100644 --- a/pytensor/tensor/sort.py +++ b/pytensor/tensor/sort.py @@ -5,7 +5,6 @@ from pytensor.gradient import grad_undefined from pytensor.graph.basic import Apply from pytensor.graph.op import Op -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor.basic import arange, as_tensor_variable, switch from pytensor.tensor.math import eq, ge from pytensor.tensor.type import TensorType @@ -173,7 +172,7 @@ def make_node(self, input, axis=-1): def perform(self, node, inputs, output_storage): a, axis = inputs z = output_storage[0] - z[0] = _asarray( + z[0] = np.asarray( np.argsort(a, int(axis), self.kind), dtype=node.outputs[0].dtype, ) diff --git a/pytensor/tensor/subtensor.py b/pytensor/tensor/subtensor.py index aa47a3415c..b0f4aaf9fc 100644 --- a/pytensor/tensor/subtensor.py +++ b/pytensor/tensor/subtensor.py @@ -18,7 +18,6 @@ from pytensor.graph.utils import MethodNotDefined from pytensor.link.c.op import COp from pytensor.link.c.params_type import ParamsType -from pytensor.misc.safe_asarray import _asarray from pytensor.printing import Printer, pprint, set_precedence from pytensor.scalar.basic import ScalarConstant, ScalarVariable from pytensor.tensor import ( @@ -2093,7 +2092,7 @@ def perform(self, node, inp, out_): # if they don't, that should be an error (no array can have that # many elements on a 32-bit arch). if i.dtype != np.intp: - i_ = _asarray(i, dtype=np.intp) + i_ = np.asarray(i, dtype=np.intp) if not np.can_cast(i.dtype, np.intp): # Check if there was actually an incorrect conversion if np.any(i != i_): diff --git a/pytensor/tensor/type.py b/pytensor/tensor/type.py index 3ba34a2903..5fdaba8fd8 100644 --- a/pytensor/tensor/type.py +++ b/pytensor/tensor/type.py @@ -12,7 +12,6 @@ from pytensor.graph.type import HasDataType, HasShape from pytensor.graph.utils import MetaType from pytensor.link.c.type import CType -from pytensor.misc.safe_asarray import _asarray from pytensor.utils import apply_across_args @@ -162,7 +161,7 @@ def filter(self, data, strict=False, allow_downcast=None) -> np.ndarray: pass elif isinstance(data, np.ndarray) and (data.dtype == self.numpy_dtype): if data.dtype.num != self.numpy_dtype.num: - data = _asarray(data, dtype=self.dtype) + data = np.asarray(data, dtype=self.dtype) # -- now fall through to ndim check elif strict: # If any of the two conditions above was not met, @@ -178,7 +177,7 @@ def filter(self, data, strict=False, allow_downcast=None) -> np.ndarray: else: if allow_downcast: # Convert to self.dtype, regardless of the type of data - data = _asarray(data, dtype=self.dtype) + data = np.asarray(data, dtype=self.dtype) # TODO: consider to pad shape with ones to make it consistent # with self.broadcastable... like vector->row type thing else: @@ -191,7 +190,7 @@ def filter(self, data, strict=False, allow_downcast=None) -> np.ndarray: # scalar array, see # http://projects.scipy.org/numpy/ticket/1611 # data = data.astype(self.dtype) - data = _asarray(data, dtype=self.dtype) + data = np.asarray(data, dtype=self.dtype) if up_dtype != self.dtype: err_msg = ( f"{self} cannot store a value of dtype {data.dtype} without " @@ -209,11 +208,11 @@ def filter(self, data, strict=False, allow_downcast=None) -> np.ndarray: ): # Special case where we allow downcasting of Python float # literals to floatX, even when floatX=='float32' - data = _asarray(data, self.dtype) + data = np.asarray(data, self.dtype) else: # data has to be converted. # Check that this conversion is lossless - converted_data = _asarray(data, self.dtype) + converted_data = np.asarray(data, self.dtype) # We use the `values_eq` static function from TensorType # to handle NaN values. if TensorType.values_eq( diff --git a/tests/compile/function/test_pfunc.py b/tests/compile/function/test_pfunc.py index b5cfaba5f0..0a9bda9846 100644 --- a/tests/compile/function/test_pfunc.py +++ b/tests/compile/function/test_pfunc.py @@ -9,7 +9,6 @@ from pytensor.compile.sharedvalue import shared from pytensor.configdefaults import config from pytensor.graph.utils import MissingInputError -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor.math import sum as pt_sum from pytensor.tensor.type import ( bscalar, @@ -426,7 +425,7 @@ def test_givens(self): z = ivector() c = z * y - f = pfunc([y], (c + 7), givens={z: _asarray([4, 4, 4], dtype="int32")}) + f = pfunc([y], (c + 7), givens={z: np.asarray([4, 4, 4], dtype="int32")}) assert np.all(f([1, 1, 1]) == [11, 11, 11]) assert x.get_value() == 0 diff --git a/tests/compile/test_shared.py b/tests/compile/test_shared.py index dcc981b73f..fca3c6e2fc 100644 --- a/tests/compile/test_shared.py +++ b/tests/compile/test_shared.py @@ -5,7 +5,6 @@ from pytensor.compile.sharedvalue import SharedVariable, shared from pytensor.configdefaults import config from pytensor.link.c.type import generic -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor.type import ( TensorType, bscalar, @@ -124,7 +123,7 @@ def test_use_numpy_strict_false(self): pass # check that an assignment of a perfect value results in no copying - uval = _asarray([5, 6, 7, 8], dtype="float64") + uval = np.asarray([5, 6, 7, 8], dtype="float64") u.set_value(uval, borrow=True) assert u.get_value(borrow=True) is uval diff --git a/tests/misc/test_may_share_memory.py b/tests/misc/test_may_share_memory.py index e9e74e11d6..9e80a3644a 100644 --- a/tests/misc/test_may_share_memory.py +++ b/tests/misc/test_may_share_memory.py @@ -13,7 +13,6 @@ scipy_imported = False from pytensor.misc.may_share_memory import may_share_memory -from pytensor.misc.safe_asarray import _asarray def may_share_memory_core(a, b): @@ -84,7 +83,7 @@ def test_may_share_memory_scipy(): b = scipy.sparse.csc_matrix(scipy.sparse.eye(4, 3)) def as_ar(a): - return _asarray(a, dtype="int32") + return np.asarray(a, dtype="int32") for a_, b_, rep in [ (a, a, True), diff --git a/tests/scan/test_basic.py b/tests/scan/test_basic.py index 880fcbd5fc..7bdf490b68 100644 --- a/tests/scan/test_basic.py +++ b/tests/scan/test_basic.py @@ -33,7 +33,6 @@ from pytensor.graph.op import Op from pytensor.graph.rewriting.basic import MergeOptimizer from pytensor.graph.utils import MissingInputError -from pytensor.misc.safe_asarray import _asarray from pytensor.raise_op import assert_op from pytensor.scan.basic import scan from pytensor.scan.op import Scan @@ -206,7 +205,7 @@ def scan_project_sum(*args, **kwargs): def asarrayX(value): - return _asarray(value, dtype=config.floatX) + return np.asarray(value, dtype=config.floatX) def clone_optimized_graph(f): diff --git a/tests/sparse/test_basic.py b/tests/sparse/test_basic.py index e4f2a69404..afae9b2187 100644 --- a/tests/sparse/test_basic.py +++ b/tests/sparse/test_basic.py @@ -14,7 +14,6 @@ from pytensor.gradient import GradientError from pytensor.graph.basic import Apply, Constant, applys_between from pytensor.graph.op import Op -from pytensor.misc.safe_asarray import _asarray from pytensor.sparse import ( CSC, CSM, @@ -259,7 +258,7 @@ def _rand(): # PyTensor don't like ulonglong type_num dtype = np.dtype(out_dtype) # Convert into dtype object. if data[0].dtype.num != dtype.num and dtype.str == data[0].dtype.str: - data[0].data = _asarray(data[0].data, out_dtype) + data[0].data = np.asarray(data[0].data, out_dtype) assert data[0].dtype.num == dtype.num return (variable, data) @@ -1913,7 +1912,7 @@ def test_may_share_memory(): b = sp.sparse.csc_matrix(sp.sparse.eye(4, 3)) def as_ar(a): - return _asarray(a, dtype="int32") + return np.asarray(a, dtype="int32") for a_, b_, rep in [ (a, a, True), diff --git a/tests/tensor/rewriting/test_elemwise.py b/tests/tensor/rewriting/test_elemwise.py index 82cfa884af..f651b14e0a 100644 --- a/tests/tensor/rewriting/test_elemwise.py +++ b/tests/tensor/rewriting/test_elemwise.py @@ -16,7 +16,6 @@ from pytensor.graph.rewriting.basic import check_stack_trace, out2in from pytensor.graph.rewriting.db import RewriteDatabaseQuery from pytensor.graph.rewriting.utils import rewrite_graph -from pytensor.misc.safe_asarray import _asarray from pytensor.raise_op import assert_op from pytensor.scalar.basic import Composite, float64 from pytensor.tensor.basic import MakeVector @@ -259,12 +258,12 @@ def my_init(dtype="float64", num=0): fxv = my_init("float32", 2) fyv = my_init("float32", 3) fzv = my_init("float32", 4) - fvv = _asarray(np.random.random(5), dtype="float32") + fvv = np.asarray(np.random.random(5), dtype="float32") fsv = np.asarray(np.random.random(), dtype="float32") dwv = my_init("float64", 5) - ixv = _asarray(my_init(num=60), dtype="int32") - iyv = _asarray(my_init(num=70), dtype="int32") - izv = _asarray(my_init(num=70), dtype="int32") + ixv = np.asarray(my_init(num=60), dtype="int32") + iyv = np.asarray(my_init(num=70), dtype="int32") + izv = np.asarray(my_init(num=70), dtype="int32") fwx = fw + fx ftanx = tan(fx) diff --git a/tests/tensor/rewriting/test_math.py b/tests/tensor/rewriting/test_math.py index 364e96a31c..019833a9d5 100644 --- a/tests/tensor/rewriting/test_math.py +++ b/tests/tensor/rewriting/test_math.py @@ -27,7 +27,6 @@ ) from pytensor.graph.rewriting.db import RewriteDatabaseQuery from pytensor.graph.rewriting.utils import is_same_graph, rewrite_graph -from pytensor.misc.safe_asarray import _asarray from pytensor.printing import debugprint from pytensor.scalar import PolyGamma, Psi, TriGamma from pytensor.tensor import inplace @@ -186,7 +185,7 @@ def inputs(xbc=(0, 0), ybc=(0, 0), zbc=(0, 0)): def test_add_canonizer_problem0(): n_segments = 10 label = lscalar("label") - segment_labels = label + _asarray([0] * n_segments, dtype="int64") + segment_labels = label + np.asarray([0] * n_segments, dtype="int64") r = segment_labels * 5 f = function([label], r) @@ -278,14 +277,14 @@ def test_elemwise_multiple_inputs_rewrites(self): dx, dy, dz = dmatrices("xyz") # fv = fvector('r').dimshuffle('x', 0) # dv = dvector('s').dimshuffle('x', 0) - fxv = _asarray(np.random.random(shp), dtype="float32") - fyv = _asarray(np.random.random(shp), dtype="float32") - fzv = _asarray(np.random.random(shp), dtype="float32") - # fvv = _asarray(np.random.random((shp[0]), dtype='float32').reshape(1, shp[0]) - # dxv = _asarray(np.random.random((*shp), dtype='float64') - # dyv = _asarray(np.random.random((*shp), dtype='float64') - # dzv = _asarray(np.random.random((*shp), dtype='float64') - # dvv = _asarray(np.random.random((shp[0]), dtype='float64').reshape(1, shp[0]) + fxv = np.asarray(np.random.random(shp), dtype="float32") + fyv = np.asarray(np.random.random(shp), dtype="float32") + fzv = np.asarray(np.random.random(shp), dtype="float32") + # fvv = np.asarray(np.random.random((shp[0]), dtype='float32').reshape(1, shp[0]) + # dxv = np.asarray(np.random.random((*shp), dtype='float64') + # dyv = np.asarray(np.random.random((*shp), dtype='float64') + # dzv = np.asarray(np.random.random((*shp), dtype='float64') + # dvv = np.asarray(np.random.random((shp[0]), dtype='float64').reshape(1, shp[0]) cases = [ (fx + fy, (fx, fy), (fxv, fyv), 1, "float32"), (fx * fy, (fx, fy), (fxv, fyv), 1, "float32"), @@ -409,14 +408,14 @@ def test_elemwise_multiple_inputs_rewrites_2(self): dx, dy, dz = dmatrices("xyz") fv = fvector("r").dimshuffle("x", 0) dv = dvector("s").dimshuffle("x", 0) - fxv = _asarray(np.random.random(shp), dtype="float32") - fyv = _asarray(np.random.random(shp), dtype="float32") - fzv = _asarray(np.random.random(shp), dtype="float32") - fvv = _asarray(np.random.random(shp[0]), dtype="float32").reshape(1, shp[0]) - dxv = _asarray(np.random.random(shp), dtype="float64") - dyv = _asarray(np.random.random(shp), dtype="float64") - dzv = _asarray(np.random.random(shp), dtype="float64") - dvv = _asarray(np.random.random(shp[0]), dtype="float64").reshape(1, shp[0]) + fxv = np.asarray(np.random.random(shp), dtype="float32") + fyv = np.asarray(np.random.random(shp), dtype="float32") + fzv = np.asarray(np.random.random(shp), dtype="float32") + fvv = np.asarray(np.random.random(shp[0]), dtype="float32").reshape(1, shp[0]) + dxv = np.asarray(np.random.random(shp), dtype="float64") + dyv = np.asarray(np.random.random(shp), dtype="float64") + dzv = np.asarray(np.random.random(shp), dtype="float64") + dvv = np.asarray(np.random.random(shp[0]), dtype="float64").reshape(1, shp[0]) cases = [ (fx + fy, (fx, fy), (fxv, fyv), 1, "float32"), (fx * fy, (fx, fy), (fxv, fyv), 1, "float32"), @@ -548,16 +547,16 @@ def test_mul_div_cases(self): dx, dy, dz, dw = dmatrices("xyzw") fv = fvector("r").dimshuffle("x", 0) dv = dvector("s").dimshuffle("x", 0) - fxv = _asarray(np.random.random(shp), dtype="float32") - fyv = _asarray(np.random.random(shp), dtype="float32") - fzv = _asarray(np.random.random(shp), dtype="float32") - fwv = _asarray(np.random.random(shp), dtype="float32") - fvv = _asarray(np.random.random(shp[0]), dtype="float32").reshape(1, shp[0]) - dxv = _asarray(np.random.random(shp), dtype="float64") - dyv = _asarray(np.random.random(shp), dtype="float64") - dzv = _asarray(np.random.random(shp), dtype="float64") - dwv = _asarray(np.random.random(shp), dtype="float64") - dvv = _asarray(np.random.random(shp[0]), dtype="float64").reshape(1, shp[0]) + fxv = np.asarray(np.random.random(shp), dtype="float32") + fyv = np.asarray(np.random.random(shp), dtype="float32") + fzv = np.asarray(np.random.random(shp), dtype="float32") + fwv = np.asarray(np.random.random(shp), dtype="float32") + fvv = np.asarray(np.random.random(shp[0]), dtype="float32").reshape(1, shp[0]) + dxv = np.asarray(np.random.random(shp), dtype="float64") + dyv = np.asarray(np.random.random(shp), dtype="float64") + dzv = np.asarray(np.random.random(shp), dtype="float64") + dwv = np.asarray(np.random.random(shp), dtype="float64") + dvv = np.asarray(np.random.random(shp[0]), dtype="float64").reshape(1, shp[0]) # We must be sure that the `AlgebraicCanonizer` is working, but that we don't have other # rewrites that could hide bugs in the `AlgebraicCanonizer` as `local_elemwise_fusion` @@ -911,13 +910,13 @@ def test_multiple_case_that_fail(self): shp = (4, 4) fx, fy, fz = fmatrices("xyz") dx, dy, dz = dmatrices("xyz") - fxv = _asarray(np.random.random(shp), dtype="float32") - fyv = _asarray(np.random.random(shp), dtype="float32") - fzv = _asarray(np.random.random(shp), dtype="float32") - dxv = _asarray(np.random.random(shp), dtype="float32") - dyv = _asarray(np.random.random(shp), dtype="float32") - dzv = _asarray(np.random.random(shp), dtype="float32") - # fvv = _asarray(np.random.random((shp[0]), dtype='float32').reshape(1, shp[0]) + fxv = np.asarray(np.random.random(shp), dtype="float32") + fyv = np.asarray(np.random.random(shp), dtype="float32") + fzv = np.asarray(np.random.random(shp), dtype="float32") + dxv = np.asarray(np.random.random(shp), dtype="float32") + dyv = np.asarray(np.random.random(shp), dtype="float32") + dzv = np.asarray(np.random.random(shp), dtype="float32") + # fvv = np.asarray(np.random.random((shp[0]), dtype='float32').reshape(1, shp[0]) mode = get_default_mode() diff --git a/tests/tensor/test_basic.py b/tests/tensor/test_basic.py index c3db43dddd..323d401f42 100644 --- a/tests/tensor/test_basic.py +++ b/tests/tensor/test_basic.py @@ -18,7 +18,6 @@ from pytensor.graph.basic import Apply, equal_computations from pytensor.graph.op import Op from pytensor.graph.replace import clone_replace -from pytensor.misc.safe_asarray import _asarray from pytensor.raise_op import Assert from pytensor.scalar import autocast_float, autocast_float_as from pytensor.tensor import NoneConst, vectorize @@ -2264,8 +2263,8 @@ def test_flatten_ndim_default(): a = dmatrix() c = flatten(a) f = inplace_func([a], c) - a_val = _asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") - c_val = _asarray([0, 1, 2, 3, 4, 5], dtype="float64") + a_val = np.asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") + c_val = np.asarray([0, 1, 2, 3, 4, 5], dtype="float64") assert np.all(f(a_val) == c_val) f = inplace_func([a], c) assert np.all(f(a_val) == c_val) @@ -2277,8 +2276,8 @@ def test_flatten_scalar(): a = dscalar() c = flatten(a) f = inplace_func([a], c) - a_val = _asarray(3.0, dtype="float64") - c_val = _asarray([3.0], dtype="float64") + a_val = np.asarray(3.0, dtype="float64") + c_val = np.asarray([3.0], dtype="float64") assert np.all(f(a_val) == c_val) f = inplace_func([a], c) assert np.all(f(a_val) == c_val) @@ -2290,8 +2289,8 @@ def test_flatten_ndim1(): a = dmatrix() c = flatten(a, 1) f = inplace_func([a], c) - a_val = _asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") - c_val = _asarray([0, 1, 2, 3, 4, 5], dtype="float64") + a_val = np.asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") + c_val = np.asarray([0, 1, 2, 3, 4, 5], dtype="float64") assert np.all(f(a_val) == c_val) f = inplace_func([a], c) assert np.all(f(a_val) == c_val) @@ -2303,7 +2302,7 @@ def test_flatten_ndim2(): a = dmatrix() c = flatten(a, 2) f = inplace_func([a], c) - a_val = _asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") + a_val = np.asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") assert np.all(f(a_val) == a_val) f = inplace_func([a], c) assert np.all(f(a_val) == a_val) @@ -2316,8 +2315,8 @@ def test_flatten_ndim2_of_3(): a = TensorType("float64", shape=(None, None, None))() c = flatten(a, 2) f = inplace_func([a], c) - a_val = _asarray([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype="float64") - c_val = _asarray([[0, 1, 2, 3], [4, 5, 6, 7]], dtype="float64") + a_val = np.asarray([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype="float64") + c_val = np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]], dtype="float64") assert np.all(f(a_val) == c_val) f = inplace_func([a], c) assert np.all(f(a_val) == c_val) @@ -3240,8 +3239,8 @@ def test_autocast_custom(): with autocast_float_as("float32"): assert (dvector() + 1.1).dtype == "float64" assert (fvector() + 1.1).dtype == "float32" - assert (fvector() + _asarray(1.1, dtype="float64")).dtype == "float64" - assert (fvector() + _asarray(1.1, dtype="float32")).dtype == "float32" + assert (fvector() + np.asarray(1.1, dtype="float64")).dtype == "float64" + assert (fvector() + np.asarray(1.1, dtype="float32")).dtype == "float32" assert (dvector() + 1).dtype == "float64" assert (fvector() + 1).dtype == "float32" @@ -3251,8 +3250,8 @@ def test_autocast_custom(): assert (dvector() + 1.1).dtype == "float64" assert (fvector() + 1.1).dtype == "float64" assert (fvector() + 1.0).dtype == "float64" - assert (fvector() + _asarray(1.1, dtype="float64")).dtype == "float64" - assert (fvector() + _asarray(1.1, dtype="float32")).dtype == "float32" + assert (fvector() + np.asarray(1.1, dtype="float64")).dtype == "float64" + assert (fvector() + np.asarray(1.1, dtype="float32")).dtype == "float32" assert (dvector() + 1).dtype == "float64" assert (fvector() + 1).dtype == "float32" diff --git a/tests/tensor/test_blas.py b/tests/tensor/test_blas.py index c2a9c95e18..3b6115a107 100644 --- a/tests/tensor/test_blas.py +++ b/tests/tensor/test_blas.py @@ -19,7 +19,6 @@ from pytensor.graph.fg import FunctionGraph from pytensor.graph.rewriting.basic import in2out from pytensor.graph.utils import InconsistencyError -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor import inplace from pytensor.tensor.basic import as_tensor_variable from pytensor.tensor.blas import ( @@ -309,7 +308,7 @@ def test_transposes(self): C = rng.random((4, 5))[:, :4] def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): - z, a, x, y, b = (_asarray(p, dtype=dt) for p in (z, a, x, y, b)) + z, a, x, y, b = (np.asarray(p, dtype=dt) for p in (z, a, x, y, b)) # z_orig = z.copy() z_after = self._gemm(z, a, x, y, b) @@ -368,7 +367,7 @@ def test_non_contiguous(self): C = rng.random((4, 4, 3)) def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"): - z, a, x, y, b = (_asarray(p, dtype=dt) for p in (z, a, x, y, b)) + z, a, x, y, b = (np.asarray(p, dtype=dt) for p in (z, a, x, y, b)) z_orig = z.copy() z_after = np.zeros_like(z_orig) for i in range(3): @@ -1495,8 +1494,8 @@ def test_gemv_broadcast(self): def test_gemv_dimensions(self): A = matrix("A") x, y = vectors("x", "y") - alpha = shared(_asarray(1.0, dtype=config.floatX), name="alpha") - beta = shared(_asarray(1.0, dtype=config.floatX), name="beta") + alpha = shared(np.asarray(1.0, dtype=config.floatX), name="alpha") + beta = shared(np.asarray(1.0, dtype=config.floatX), name="beta") z = beta * y + alpha * dot(A, x) f = function([A, x, y], z) @@ -2092,7 +2091,7 @@ class TestBlasStrides: mode = mode.including("fast_run").excluding("gpu", "c_blas", "scipy_blas") def random(self, *shape, rng=None): - return _asarray(rng.random(shape), dtype=self.dtype) + return np.asarray(rng.random(shape), dtype=self.dtype) def cmp_dot22(self, b_shp, c_shp, rng): av = np.zeros((0, 0), dtype=self.dtype) diff --git a/tests/tensor/test_blas_c.py b/tests/tensor/test_blas_c.py index ee02bff71d..8298cae5ba 100644 --- a/tests/tensor/test_blas_c.py +++ b/tests/tensor/test_blas_c.py @@ -5,7 +5,6 @@ import pytensor import pytensor.tensor as pt -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor.basic import AllocEmpty from pytensor.tensor.blas import Ger from pytensor.tensor.blas_c import CGemv, CGer, check_force_gemv_init @@ -249,8 +248,8 @@ def test_gemv1(self): self.t_gemv1((0, 0)) def test_gemv_dimensions(self, dtype="float32"): - alpha = pytensor.shared(_asarray(1.0, dtype=dtype), name="alpha") - beta = pytensor.shared(_asarray(1.0, dtype=dtype), name="beta") + alpha = pytensor.shared(np.asarray(1.0, dtype=dtype), name="alpha") + beta = pytensor.shared(np.asarray(1.0, dtype=dtype), name="beta") z = beta * self.y + alpha * pt.dot(self.A, self.x) f = pytensor.function([self.A, self.x, self.y], z, mode=self.mode) diff --git a/tests/tensor/test_casting.py b/tests/tensor/test_casting.py index 4ddfd40ed8..6907988369 100644 --- a/tests/tensor/test_casting.py +++ b/tests/tensor/test_casting.py @@ -4,7 +4,6 @@ import pytensor from pytensor import function from pytensor.compile.io import In -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor.basic import ( _convert_to_complex64, _convert_to_complex128, @@ -36,7 +35,7 @@ def test_0(self, op_fn, type_fn): x = type_fn() f = function([x], op_fn(x)) - xval = _asarray(np.random.random(10) * 10, dtype=type_fn.dtype) + xval = np.asarray(np.random.random(10) * 10, dtype=type_fn.dtype) yval = f(xval) assert str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype diff --git a/tests/tensor/test_inplace.py b/tests/tensor/test_inplace.py index dc5a432eca..a31a26df07 100644 --- a/tests/tensor/test_inplace.py +++ b/tests/tensor/test_inplace.py @@ -2,7 +2,6 @@ import pytest from pytensor import config -from pytensor.misc.safe_asarray import _asarray from pytensor.scalar.basic import round_half_away_from_zero_vec, upcast from pytensor.tensor.inplace import ( abs_inplace, @@ -456,8 +455,8 @@ def test_XOR_inplace(): for dtype in dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) - l = _asarray([0, 0, 1, 1], dtype=dtype) - r = _asarray([0, 1, 0, 1], dtype=dtype) + l = np.asarray([0, 0, 1, 1], dtype=dtype) + r = np.asarray([0, 1, 0, 1], dtype=dtype) ix = x ix = xor_inplace(ix, y) gn = inplace_func([x, y], ix) diff --git a/tests/tensor/test_math.py b/tests/tensor/test_math.py index d793834817..6cee6d9125 100644 --- a/tests/tensor/test_math.py +++ b/tests/tensor/test_math.py @@ -23,7 +23,6 @@ from pytensor.graph.fg import FunctionGraph from pytensor.graph.replace import vectorize_node from pytensor.link.c.basic import DualLinker -from pytensor.misc.safe_asarray import _asarray from pytensor.printing import pprint from pytensor.raise_op import Assert from pytensor.tensor import blas, blas_c @@ -1802,8 +1801,8 @@ def test_or(self): for dtype in self.dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) fn = inplace_func([x, y], x | y) - l = _asarray([0, 0, 1, 1], dtype=dtype) - r = _asarray([0, 1, 0, 1], dtype=dtype) + l = np.asarray([0, 0, 1, 1], dtype=dtype) + r = np.asarray([0, 1, 0, 1], dtype=dtype) v = fn(l, r) assert np.all(v == operator.or_(l, r)), (l, r, v) @@ -1811,8 +1810,8 @@ def test_XOR(self): for dtype in self.dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) fn = inplace_func([x, y], x ^ y) - l = _asarray([0, 0, 1, 1], dtype=dtype) - r = _asarray([0, 1, 0, 1], dtype=dtype) + l = np.asarray([0, 0, 1, 1], dtype=dtype) + r = np.asarray([0, 1, 0, 1], dtype=dtype) v = fn(l, r) assert np.all(v == operator.xor(l, r)), (l, r, v) @@ -1820,8 +1819,8 @@ def test_and(self): for dtype in self.dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) fn = inplace_func([x, y], x & y) - l = _asarray([0, 0, 1, 1], dtype=dtype) - r = _asarray([0, 1, 0, 1], dtype=dtype) + l = np.asarray([0, 0, 1, 1], dtype=dtype) + r = np.asarray([0, 1, 0, 1], dtype=dtype) v = fn(l, r) assert np.all(v == operator.and_(l, r)), (l, r, v) @@ -1836,7 +1835,7 @@ def test_inv(self): [0, 1, 0, 1], [-1, 2**16, 2**16 - 1], ]: - l = _asarray([0, 0, 1, 1], dtype=dtype) + l = np.asarray([0, 0, 1, 1], dtype=dtype) v = fn(l) assert np.all(v == ~l), (l, v) diff --git a/tests/tensor/test_shape.py b/tests/tensor/test_shape.py index e30ff312c1..2ffcb25fe5 100644 --- a/tests/tensor/test_shape.py +++ b/tests/tensor/test_shape.py @@ -10,7 +10,6 @@ from pytensor.graph.basic import Variable, equal_computations from pytensor.graph.replace import clone_replace, vectorize_node from pytensor.graph.type import Type -from pytensor.misc.safe_asarray import _asarray from pytensor.scalar.basic import ScalarConstant from pytensor.tensor import as_tensor_variable, broadcast_to, get_vector_length, row from pytensor.tensor.basic import MakeVector, constant, stack @@ -165,9 +164,9 @@ def test_basics(self): assert np.array_equal(a_val, a_val_copy) # test that it works with inplace operations - a_val = _asarray([0, 1, 2, 3, 4, 5], dtype="float64") - a_val_copy = _asarray([0, 1, 2, 3, 4, 5], dtype="float64") - b_val = _asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") + a_val = np.asarray([0, 1, 2, 3, 4, 5], dtype="float64") + a_val_copy = np.asarray([0, 1, 2, 3, 4, 5], dtype="float64") + b_val = np.asarray([[0, 1, 2], [3, 4, 5]], dtype="float64") f_sub = self.function([a, b], c - b) assert np.array_equal(f_sub(a_val, b_val), np.zeros_like(b_val)) @@ -175,7 +174,7 @@ def test_basics(self): # verify gradient def just_vals(v): - return Reshape(2)(v, _asarray([2, 3], dtype="int32")) + return Reshape(2)(v, np.asarray([2, 3], dtype="int32")) utt.verify_grad(just_vals, [a_val], mode=self.mode) diff --git a/tests/tensor/utils.py b/tests/tensor/utils.py index 85c48a42dd..1ed3b55a89 100644 --- a/tests/tensor/utils.py +++ b/tests/tensor/utils.py @@ -12,7 +12,6 @@ from pytensor.compile.mode import get_default_mode from pytensor.configdefaults import config from pytensor.graph.utils import MethodNotDefined -from pytensor.misc.safe_asarray import _asarray from pytensor.tensor.type import ( TensorType, complex_dtypes, @@ -315,7 +314,7 @@ def _numpy_true_div(x, y): out = np.true_divide(x, y) # Use floatX as the result of int / int if x.dtype in discrete_dtypes and y.dtype in discrete_dtypes: - out = _asarray(out, dtype=config.floatX) + out = np.asarray(out, dtype=config.floatX) return out