I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,34 @@
# mypy: ignore-errors
from . import fft, linalg, random
from ._dtypes import * # noqa: F403
from ._funcs import * # noqa: F403
from ._getlimits import finfo, iinfo
from ._ndarray import (
array,
asarray,
ascontiguousarray,
can_cast,
from_dlpack,
ndarray,
newaxis,
result_type,
)
from ._ufuncs import * # noqa: F403
from ._util import AxisError, UFuncTypeError
from math import pi, e # usort: skip
all = all
alltrue = all
any = any
sometrue = any
inf = float("inf")
nan = float("nan")
False_ = False
True_ = True

View File

@ -0,0 +1,85 @@
# mypy: ignore-errors
"""Export torch work functions for binary ufuncs, rename/tweak to match numpy.
This listing is further exported to public symbols in the `torch._numpy/_ufuncs.py` module.
"""
import torch
from torch import ( # noqa: F401
add,
arctan2,
bitwise_and,
bitwise_left_shift as left_shift,
bitwise_or,
bitwise_right_shift as right_shift,
bitwise_xor,
copysign,
divide,
eq as equal,
float_power,
floor_divide,
fmax,
fmin,
fmod,
gcd,
greater,
greater_equal,
heaviside,
hypot,
lcm,
ldexp,
less,
less_equal,
logaddexp,
logaddexp2,
logical_and,
logical_or,
logical_xor,
maximum,
minimum,
multiply,
nextafter,
not_equal,
pow as power,
remainder,
remainder as mod,
subtract,
true_divide,
)
from . import _dtypes_impl, _util
# work around torch limitations w.r.t. numpy
def matmul(x, y):
# work around:
# - RuntimeError: expected scalar type Int but found Double
# - RuntimeError: "addmm_impl_cpu_" not implemented for 'Bool'
# - RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
dtype = _dtypes_impl.result_type_impl(x, y)
is_bool = dtype == torch.bool
is_half = (x.dtype == torch.float16 or y.dtype == torch.float16) and (
x.is_cpu or y.is_cpu
)
work_dtype = dtype
if is_bool:
work_dtype = torch.uint8
if is_half:
work_dtype = torch.float32
x = _util.cast_if_needed(x, work_dtype)
y = _util.cast_if_needed(y, work_dtype)
result = torch.matmul(x, y)
if work_dtype != dtype:
result = result.to(dtype)
return result
# a stub implementation of divmod, should be improved after
# https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch
def divmod(x, y):
return x // y, x % y

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,453 @@
# mypy: ignore-errors
""" Define analogs of numpy dtypes supported by pytorch.
Define the scalar types and supported dtypes and numpy <--> torch dtype mappings.
"""
import builtins
import torch
from . import _dtypes_impl
# ### Scalar types ###
class generic:
name = "generic"
def __new__(cls, value):
# NumPy scalars are modelled as 0-D arrays
# so a call to np.float32(4) produces a 0-D array.
from ._ndarray import asarray, ndarray
if isinstance(value, str) and value in ["inf", "nan"]:
value = {"inf": torch.inf, "nan": torch.nan}[value]
if isinstance(value, ndarray):
return value.astype(cls)
else:
return asarray(value, dtype=cls)
##################
# abstract types #
##################
class number(generic):
name = "number"
class integer(number):
name = "integer"
class inexact(number):
name = "inexact"
class signedinteger(integer):
name = "signedinteger"
class unsignedinteger(integer):
name = "unsignedinteger"
class floating(inexact):
name = "floating"
class complexfloating(inexact):
name = "complexfloating"
_abstract_dtypes = [
"generic",
"number",
"integer",
"signedinteger",
"unsignedinteger",
"inexact",
"floating",
"complexfloating",
]
# ##### concrete types
# signed integers
class int8(signedinteger):
name = "int8"
typecode = "b"
torch_dtype = torch.int8
class int16(signedinteger):
name = "int16"
typecode = "h"
torch_dtype = torch.int16
class int32(signedinteger):
name = "int32"
typecode = "i"
torch_dtype = torch.int32
class int64(signedinteger):
name = "int64"
typecode = "l"
torch_dtype = torch.int64
# unsigned integers
class uint8(unsignedinteger):
name = "uint8"
typecode = "B"
torch_dtype = torch.uint8
class uint16(unsignedinteger):
name = "uint16"
typecode = "H"
torch_dtype = torch.uint16
class uint32(signedinteger):
name = "uint32"
typecode = "I"
torch_dtype = torch.uint32
class uint64(signedinteger):
name = "uint64"
typecode = "L"
torch_dtype = torch.uint64
# floating point
class float16(floating):
name = "float16"
typecode = "e"
torch_dtype = torch.float16
class float32(floating):
name = "float32"
typecode = "f"
torch_dtype = torch.float32
class float64(floating):
name = "float64"
typecode = "d"
torch_dtype = torch.float64
class complex64(complexfloating):
name = "complex64"
typecode = "F"
torch_dtype = torch.complex64
class complex128(complexfloating):
name = "complex128"
typecode = "D"
torch_dtype = torch.complex128
class bool_(generic):
name = "bool_"
typecode = "?"
torch_dtype = torch.bool
# name aliases
_name_aliases = {
"intp": int64,
"int_": int64,
"intc": int32,
"byte": int8,
"short": int16,
"longlong": int64, # XXX: is this correct?
"ulonglong": uint64,
"ubyte": uint8,
"half": float16,
"single": float32,
"double": float64,
"float_": float64,
"csingle": complex64,
"singlecomplex": complex64,
"cdouble": complex128,
"cfloat": complex128,
"complex_": complex128,
}
# We register float_ = float32 and so on
for name, obj in _name_aliases.items():
vars()[name] = obj
# Replicate this NumPy-defined way of grouping scalar types,
# cf tests/core/test_scalar_methods.py
sctypes = {
"int": [int8, int16, int32, int64],
"uint": [uint8, uint16, uint32, uint64],
"float": [float16, float32, float64],
"complex": [complex64, complex128],
"others": [bool_],
}
# Support mappings/functions
_names = {st.name: st for cat in sctypes for st in sctypes[cat]}
_typecodes = {st.typecode: st for cat in sctypes for st in sctypes[cat]}
_torch_dtypes = {st.torch_dtype: st for cat in sctypes for st in sctypes[cat]}
_aliases = {
"u1": uint8,
"i1": int8,
"i2": int16,
"i4": int32,
"i8": int64,
"b": int8, # XXX: srsly?
"f2": float16,
"f4": float32,
"f8": float64,
"c8": complex64,
"c16": complex128,
# numpy-specific trailing underscore
"bool_": bool_,
}
_python_types = {
int: int64,
float: float64,
complex: complex128,
builtins.bool: bool_,
# also allow stringified names of python types
int.__name__: int64,
float.__name__: float64,
complex.__name__: complex128,
builtins.bool.__name__: bool_,
}
def sctype_from_string(s):
"""Normalize a string value: a type 'name' or a typecode or a width alias."""
if s in _names:
return _names[s]
if s in _name_aliases.keys():
return _name_aliases[s]
if s in _typecodes:
return _typecodes[s]
if s in _aliases:
return _aliases[s]
if s in _python_types:
return _python_types[s]
raise TypeError(f"data type {s!r} not understood")
def sctype_from_torch_dtype(torch_dtype):
return _torch_dtypes[torch_dtype]
# ### DTypes. ###
def dtype(arg):
if arg is None:
arg = _dtypes_impl.default_dtypes().float_dtype
return DType(arg)
class DType:
def __init__(self, arg):
# a pytorch object?
if isinstance(arg, torch.dtype):
sctype = _torch_dtypes[arg]
elif isinstance(arg, torch.Tensor):
sctype = _torch_dtypes[arg.dtype]
# a scalar type?
elif issubclass_(arg, generic):
sctype = arg
# a dtype already?
elif isinstance(arg, DType):
sctype = arg._scalar_type
# a has a right attribute?
elif hasattr(arg, "dtype"):
sctype = arg.dtype._scalar_type
else:
sctype = sctype_from_string(arg)
self._scalar_type = sctype
@property
def name(self):
return self._scalar_type.name
@property
def type(self):
return self._scalar_type
@property
def kind(self):
# https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html
return _torch_dtypes[self.torch_dtype].name[0]
@property
def typecode(self):
return self._scalar_type.typecode
def __eq__(self, other):
if isinstance(other, DType):
return self._scalar_type == other._scalar_type
try:
other_instance = DType(other)
except TypeError:
return False
return self._scalar_type == other_instance._scalar_type
@property
def torch_dtype(self):
return self._scalar_type.torch_dtype
def __hash__(self):
return hash(self._scalar_type.name)
def __repr__(self):
return f'dtype("{self.name}")'
__str__ = __repr__
@property
def itemsize(self):
elem = self.type(1)
return elem.tensor.element_size()
def __getstate__(self):
return self._scalar_type
def __setstate__(self, value):
self._scalar_type = value
typecodes = {
"All": "efdFDBbhil?",
"AllFloat": "efdFD",
"AllInteger": "Bbhil",
"Integer": "bhil",
"UnsignedInteger": "B",
"Float": "efd",
"Complex": "FD",
}
# ### Defaults and dtype discovery
def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"):
"""Set the (global) defaults for fp, complex, and int dtypes.
The complex dtype is inferred from the float (fp) dtype. It has
a width at least twice the width of the float dtype,
i.e., it's complex128 for float64 and complex64 for float32.
Parameters
----------
fp_dtype
Allowed values are "numpy", "pytorch" or dtype_like things which
can be converted into a DType instance.
Default is "numpy" (i.e. float64).
int_dtype
Allowed values are "numpy", "pytorch" or dtype_like things which
can be converted into a DType instance.
Default is "numpy" (i.e. int64).
Returns
-------
The old default dtype state: a namedtuple with attributes ``float_dtype``,
``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch*
dtypes.
Notes
------------
This functions has a side effect: it sets the global state with the provided dtypes.
The complex dtype has bit width of at least twice the width of the float
dtype, i.e. it's complex128 for float64 and complex64 for float32.
"""
if fp_dtype not in ["numpy", "pytorch"]:
fp_dtype = dtype(fp_dtype).torch_dtype
if int_dtype not in ["numpy", "pytorch"]:
int_dtype = dtype(int_dtype).torch_dtype
if fp_dtype == "numpy":
float_dtype = torch.float64
elif fp_dtype == "pytorch":
float_dtype = torch.float32
else:
float_dtype = fp_dtype
complex_dtype = {
torch.float64: torch.complex128,
torch.float32: torch.complex64,
torch.float16: torch.complex64,
}[float_dtype]
if int_dtype in ["numpy", "pytorch"]:
int_dtype = torch.int64
else:
int_dtype = int_dtype
new_defaults = _dtypes_impl.DefaultDTypes(
float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype
)
# set the new global state and return the old state
old_defaults = _dtypes_impl.default_dtypes
_dtypes_impl._default_dtypes = new_defaults
return old_defaults
def issubclass_(arg, klass):
try:
return issubclass(arg, klass)
except TypeError:
return False
def issubdtype(arg1, arg2):
# cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420
# We also accept strings even if NumPy doesn't as dtypes are serialized as their
# string representation in dynamo's graph
def str_to_abstract(t):
if isinstance(t, str) and t in _abstract_dtypes:
return globals()[t]
return t
arg1 = str_to_abstract(arg1)
arg2 = str_to_abstract(arg2)
if not issubclass_(arg1, generic):
arg1 = dtype(arg1).type
if not issubclass_(arg2, generic):
arg2 = dtype(arg2).type
return issubclass(arg1, arg2)
__all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype", "sctypes"]
__all__ += list(_names.keys()) # noqa: PLE0605
__all__ += list(_name_aliases.keys()) # noqa: PLE0605
__all__ += _abstract_dtypes # noqa: PLE0605

View File

@ -0,0 +1,217 @@
# mypy: ignore-errors
"""Dtypes/scalar type implementaions with torch dtypes.
Here `dtype` is always a torch.dtype, this module knows nothing about
scalar types, wrapper dtypes or anything like that. PyTorch only.
"""
from collections import namedtuple
import torch
# defaults : mimic NumPy, allow user control
DefaultDTypes = namedtuple(
"DefaultDTypes", ["float_dtype", "complex_dtype", "int_dtype"]
)
# a global state
# We set it the first time we call default_dtypes() to avoid importing
# torch._dynamo.config and create a circular reference
_default_dtypes = None
def default_dtypes():
global _default_dtypes
if _default_dtypes is None:
import torch._dynamo.config as config
_default_dtypes = DefaultDTypes(
float_dtype=getattr(torch, config.numpy_default_float),
complex_dtype=getattr(torch, config.numpy_default_complex),
int_dtype=getattr(torch, config.numpy_default_int),
)
assert isinstance(_default_dtypes.float_dtype, torch.dtype)
assert isinstance(_default_dtypes.complex_dtype, torch.dtype)
assert isinstance(_default_dtypes.int_dtype, torch.dtype)
return _default_dtypes
def get_default_dtype_for(dtype):
"""Default scalar type given sctype category."""
if dtype == torch.bool:
return dtype
if dtype.is_complex:
return default_dtypes().complex_dtype
if dtype.is_floating_point:
return default_dtypes().float_dtype
# else, it must be (some) integer
return default_dtypes().int_dtype
from . import _casting_dicts as _cd
def can_cast_impl(from_torch_dtype, to_torch_dtype, casting):
return _cd._can_cast_dict[casting][from_torch_dtype][to_torch_dtype]
def result_type_impl(*tensors):
# NB: torch dtypes here
dtyp = tensors[0].dtype
if len(tensors) == 1:
return dtyp
for curr in tensors[1:]:
dtyp = _cd._result_type_dict[dtyp][curr.dtype]
return dtyp
def python_type_for_torch(dtyp):
"""Get a python scalar type a torch dtype"""
if dtyp.is_floating_point:
typ = float
elif dtyp.is_complex:
typ = complex
elif dtyp == torch.bool:
typ = bool
else:
typ = int
return typ
# ### NEP 50 helpers ###
_SCALAR_TYPES = (int, bool, float, complex)
_SCALAR_AND_SYMBOLIC_TYPES = (
*_SCALAR_TYPES,
torch.SymInt,
torch.SymFloat,
torch.SymBool,
)
_NEP50_FUNCS_TENSOR_ONLY = (
"minimum",
"maximum",
"logaddexp",
"logaddexp2",
"lcm",
"gcd",
"hypot",
"heaviside",
"fmod",
"fmin",
"fmax",
"copysign",
"arctan2",
)
def is_scalar(x):
return isinstance(x, _SCALAR_TYPES)
def is_scalar_or_symbolic(x):
return isinstance(x, _SCALAR_AND_SYMBOLIC_TYPES)
def _dtype_for_scalar(py_type):
return {
bool: torch.bool,
torch.SymBool: torch.bool,
int: torch.int64,
torch.SymInt: torch.int64,
float: torch.float64,
torch.SymFloat: torch.float64,
complex: torch.complex128,
}[py_type]
def _dtype_for_scalar_or_tensor(x):
return x.dtype if isinstance(x, torch.Tensor) else _dtype_for_scalar(type(x))
def is_float_or_fp_tensor(x):
return _dtype_for_scalar_or_tensor(x).is_floating_point
def is_complex_or_complex_tensor(x):
return _dtype_for_scalar_or_tensor(x).is_complex
def _category(dtype):
return {
torch.bool: 0,
torch.SymBool: 0,
# int
torch.uint8: 1,
torch.int8: 1,
torch.int16: 1,
torch.int32: 1,
torch.int64: 1,
torch.SymInt: 1,
# float
torch.float16: 2,
torch.float32: 2,
torch.float64: 2,
torch.SymFloat: 2,
# complex
torch.complex64: 3,
torch.complex128: 3,
}[dtype]
def nep50_to_tensors(x1, x2, handle_weaks, function_name):
"""If either of inputs is a python scalar, type-promote with NEP 50."""
def to_tensor(scalar, dtype=None):
if dtype is None:
dtype = _dtype_for_scalar(type(scalar))
dtype = get_default_dtype_for(dtype)
return torch.as_tensor(scalar, dtype=dtype)
x1_is_weak = not isinstance(x1, torch.Tensor)
x2_is_weak = not isinstance(x2, torch.Tensor)
if not handle_weaks or (x1_is_weak and x2_is_weak):
x1 = to_tensor(x1) if x1_is_weak else x1
x2 = to_tensor(x2) if x2_is_weak else x2
return x1, x2
# scalar <op> tensor: NEP 50
assert x1_is_weak != x2_is_weak
weak, not_weak = (x1, x2) if x1_is_weak else (x2, x1)
# find the dtype for the weak's type
weak_dtype = _dtype_for_scalar(type(weak))
cat_weak = _category(weak_dtype)
cat_not_weak = _category(not_weak.dtype)
dt = not_weak.dtype if cat_weak <= cat_not_weak else None
# special-case complex + float32
if weak_dtype.is_complex and not_weak.dtype == torch.float32:
dt = torch.complex64
# detect overflows: in PyTorch, uint8(-1) wraps around to 255,
# while NEP50 mandates an exception.
#
# Note that we only check if each element of the binop overflows,
# not the result. Consider, e.g. `uint8(100) + 200`. Operands are OK
# in uint8, but the result overflows and wrap around 255.
# Numpy emits a RuntimeWarning, PyTorch does not, and we do not either.
if cat_weak == 1 and cat_not_weak == 1:
# integers
iinfo = torch.iinfo(not_weak.dtype)
if not (iinfo.min <= weak <= iinfo.max):
raise OverflowError(
f"Python integer {weak} out of bounds for {not_weak.dtype}"
)
if weak_dtype != dt or function_name in _NEP50_FUNCS_TENSOR_ONLY:
# finally, can make `weak` into a 0D tensor, if both parameters are required to be tensor.
weak = to_tensor(weak, dt)
return (weak, not_weak) if x1_is_weak else (not_weak, weak)

View File

@ -0,0 +1,76 @@
# mypy: ignore-errors
import inspect
import itertools
from . import _funcs_impl, _reductions_impl
from ._normalizations import normalizer
# _funcs_impl.py contains functions which mimic NumPy's eponymous equivalents,
# and consume/return PyTorch tensors/dtypes.
# They are also type annotated.
# Pull these functions from _funcs_impl and decorate them with @normalizer, which
# - Converts any input `np.ndarray`, `torch._numpy.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`.
# - Maps NumPy dtypes to PyTorch dtypes
# - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple
# - Implements the semantics for the `out=` arg
# - Wraps back the outputs into `torch._numpy.ndarrays`
def _public_functions(mod):
def is_public_function(f):
return inspect.isfunction(f) and not f.__name__.startswith("_")
return inspect.getmembers(mod, is_public_function)
# We fill in __all__ in the loop below
__all__ = []
# decorate implementer functions with argument normalizers and export to the top namespace
for name, func in itertools.chain(
_public_functions(_funcs_impl), _public_functions(_reductions_impl)
):
if name in ["percentile", "quantile", "median"]:
decorated = normalizer(func, promote_scalar_result=True)
elif name == "einsum":
# normalized manually
decorated = func
else:
decorated = normalizer(func)
decorated.__qualname__ = name
decorated.__name__ = name
vars()[name] = decorated
__all__.append(name)
"""
Vendored objects from numpy.lib.index_tricks
"""
class IndexExpression:
"""
Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
last revision: 1999-7-23
Cosmetic changes by T. Oliphant 2001
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
__all__ += ["index_exp", "s_"]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
# mypy: ignore-errors
import torch
from . import _dtypes
def finfo(dtyp):
torch_dtype = _dtypes.dtype(dtyp).torch_dtype
return torch.finfo(torch_dtype)
def iinfo(dtyp):
torch_dtype = _dtypes.dtype(dtyp).torch_dtype
return torch.iinfo(torch_dtype)

View File

@ -0,0 +1,592 @@
# mypy: ignore-errors
from __future__ import annotations
import builtins
import math
import operator
from typing import Sequence
import torch
from . import _dtypes, _dtypes_impl, _funcs, _ufuncs, _util
from ._normalizations import (
ArrayLike,
normalize_array_like,
normalizer,
NotImplementedType,
)
newaxis = None
FLAGS = [
"C_CONTIGUOUS",
"F_CONTIGUOUS",
"OWNDATA",
"WRITEABLE",
"ALIGNED",
"WRITEBACKIFCOPY",
"FNC",
"FORC",
"BEHAVED",
"CARRAY",
"FARRAY",
]
SHORTHAND_TO_FLAGS = {
"C": "C_CONTIGUOUS",
"F": "F_CONTIGUOUS",
"O": "OWNDATA",
"W": "WRITEABLE",
"A": "ALIGNED",
"X": "WRITEBACKIFCOPY",
"B": "BEHAVED",
"CA": "CARRAY",
"FA": "FARRAY",
}
class Flags:
def __init__(self, flag_to_value: dict):
assert all(k in FLAGS for k in flag_to_value.keys()) # sanity check
self._flag_to_value = flag_to_value
def __getattr__(self, attr: str):
if attr.islower() and attr.upper() in FLAGS:
return self[attr.upper()]
else:
raise AttributeError(f"No flag attribute '{attr}'")
def __getitem__(self, key):
if key in SHORTHAND_TO_FLAGS.keys():
key = SHORTHAND_TO_FLAGS[key]
if key in FLAGS:
try:
return self._flag_to_value[key]
except KeyError as e:
raise NotImplementedError(f"{key=}") from e
else:
raise KeyError(f"No flag key '{key}'")
def __setattr__(self, attr, value):
if attr.islower() and attr.upper() in FLAGS:
self[attr.upper()] = value
else:
super().__setattr__(attr, value)
def __setitem__(self, key, value):
if key in FLAGS or key in SHORTHAND_TO_FLAGS.keys():
raise NotImplementedError("Modifying flags is not implemented")
else:
raise KeyError(f"No flag key '{key}'")
def create_method(fn, name=None):
name = name or fn.__name__
def f(*args, **kwargs):
return fn(*args, **kwargs)
f.__name__ = name
f.__qualname__ = f"ndarray.{name}"
return f
# Map ndarray.name_method -> np.name_func
# If name_func == None, it means that name_method == name_func
methods = {
"clip": None,
"nonzero": None,
"repeat": None,
"round": None,
"squeeze": None,
"swapaxes": None,
"ravel": None,
# linalg
"diagonal": None,
"dot": None,
"trace": None,
# sorting
"argsort": None,
"searchsorted": None,
# reductions
"argmax": None,
"argmin": None,
"any": None,
"all": None,
"max": None,
"min": None,
"ptp": None,
"sum": None,
"prod": None,
"mean": None,
"var": None,
"std": None,
# scans
"cumsum": None,
"cumprod": None,
# advanced indexing
"take": None,
"choose": None,
}
dunder = {
"abs": "absolute",
"invert": None,
"pos": "positive",
"neg": "negative",
"gt": "greater",
"lt": "less",
"ge": "greater_equal",
"le": "less_equal",
}
# dunder methods with right-looking and in-place variants
ri_dunder = {
"add": None,
"sub": "subtract",
"mul": "multiply",
"truediv": "divide",
"floordiv": "floor_divide",
"pow": "power",
"mod": "remainder",
"and": "bitwise_and",
"or": "bitwise_or",
"xor": "bitwise_xor",
"lshift": "left_shift",
"rshift": "right_shift",
"matmul": None,
}
def _upcast_int_indices(index):
if isinstance(index, torch.Tensor):
if index.dtype in (torch.int8, torch.int16, torch.int32, torch.uint8):
return index.to(torch.int64)
elif isinstance(index, tuple):
return tuple(_upcast_int_indices(i) for i in index)
return index
# Used to indicate that a parameter is unspecified (as opposed to explicitly
# `None`)
class _Unspecified:
pass
_Unspecified.unspecified = _Unspecified()
###############################################################
# ndarray class #
###############################################################
class ndarray:
def __init__(self, t=None):
if t is None:
self.tensor = torch.Tensor()
elif isinstance(t, torch.Tensor):
self.tensor = t
else:
raise ValueError(
"ndarray constructor is not recommended; prefer"
"either array(...) or zeros/empty(...)"
)
# Register NumPy functions as methods
for method, name in methods.items():
fn = getattr(_funcs, name or method)
vars()[method] = create_method(fn, method)
# Regular methods but coming from ufuncs
conj = create_method(_ufuncs.conjugate, "conj")
conjugate = create_method(_ufuncs.conjugate)
for method, name in dunder.items():
fn = getattr(_ufuncs, name or method)
method = f"__{method}__"
vars()[method] = create_method(fn, method)
for method, name in ri_dunder.items():
fn = getattr(_ufuncs, name or method)
plain = f"__{method}__"
vars()[plain] = create_method(fn, plain)
rvar = f"__r{method}__"
vars()[rvar] = create_method(lambda self, other, fn=fn: fn(other, self), rvar)
ivar = f"__i{method}__"
vars()[ivar] = create_method(
lambda self, other, fn=fn: fn(self, other, out=self), ivar
)
# There's no __idivmod__
__divmod__ = create_method(_ufuncs.divmod, "__divmod__")
__rdivmod__ = create_method(
lambda self, other: _ufuncs.divmod(other, self), "__rdivmod__"
)
# prevent loop variables leaking into the ndarray class namespace
del ivar, rvar, name, plain, fn, method
@property
def shape(self):
return tuple(self.tensor.shape)
@property
def size(self):
return self.tensor.numel()
@property
def ndim(self):
return self.tensor.ndim
@property
def dtype(self):
return _dtypes.dtype(self.tensor.dtype)
@property
def strides(self):
elsize = self.tensor.element_size()
return tuple(stride * elsize for stride in self.tensor.stride())
@property
def itemsize(self):
return self.tensor.element_size()
@property
def flags(self):
# Note contiguous in torch is assumed C-style
return Flags(
{
"C_CONTIGUOUS": self.tensor.is_contiguous(),
"F_CONTIGUOUS": self.T.tensor.is_contiguous(),
"OWNDATA": self.tensor._base is None,
"WRITEABLE": True, # pytorch does not have readonly tensors
}
)
@property
def data(self):
return self.tensor.data_ptr()
@property
def nbytes(self):
return self.tensor.storage().nbytes()
@property
def T(self):
return self.transpose()
@property
def real(self):
return _funcs.real(self)
@real.setter
def real(self, value):
self.tensor.real = asarray(value).tensor
@property
def imag(self):
return _funcs.imag(self)
@imag.setter
def imag(self, value):
self.tensor.imag = asarray(value).tensor
# ctors
def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True):
if order != "K":
raise NotImplementedError(f"astype(..., order={order} is not implemented.")
if casting != "unsafe":
raise NotImplementedError(
f"astype(..., casting={casting} is not implemented."
)
if not subok:
raise NotImplementedError(f"astype(..., subok={subok} is not implemented.")
if not copy:
raise NotImplementedError(f"astype(..., copy={copy} is not implemented.")
torch_dtype = _dtypes.dtype(dtype).torch_dtype
t = self.tensor.to(torch_dtype)
return ndarray(t)
@normalizer
def copy(self: ArrayLike, order: NotImplementedType = "C"):
return self.clone()
@normalizer
def flatten(self: ArrayLike, order: NotImplementedType = "C"):
return torch.flatten(self)
def resize(self, *new_shape, refcheck=False):
# NB: differs from np.resize: fills with zeros instead of making repeated copies of input.
if refcheck:
raise NotImplementedError(
f"resize(..., refcheck={refcheck} is not implemented."
)
if new_shape in [(), (None,)]:
return
# support both x.resize((2, 2)) and x.resize(2, 2)
if len(new_shape) == 1:
new_shape = new_shape[0]
if isinstance(new_shape, int):
new_shape = (new_shape,)
if builtins.any(x < 0 for x in new_shape):
raise ValueError("all elements of `new_shape` must be non-negative")
new_numel, old_numel = math.prod(new_shape), self.tensor.numel()
self.tensor.resize_(new_shape)
if new_numel >= old_numel:
# zero-fill new elements
assert self.tensor.is_contiguous()
b = self.tensor.flatten() # does not copy
b[old_numel:].zero_()
def view(self, dtype=_Unspecified.unspecified, type=_Unspecified.unspecified):
if dtype is _Unspecified.unspecified:
dtype = self.dtype
if type is not _Unspecified.unspecified:
raise NotImplementedError(f"view(..., type={type} is not implemented.")
torch_dtype = _dtypes.dtype(dtype).torch_dtype
tview = self.tensor.view(torch_dtype)
return ndarray(tview)
@normalizer
def fill(self, value: ArrayLike):
# Both Pytorch and NumPy accept 0D arrays/tensors and scalars, and
# error out on D > 0 arrays
self.tensor.fill_(value)
def tolist(self):
return self.tensor.tolist()
def __iter__(self):
return (ndarray(x) for x in self.tensor.__iter__())
def __str__(self):
return (
str(self.tensor)
.replace("tensor", "torch.ndarray")
.replace("dtype=torch.", "dtype=")
)
__repr__ = create_method(__str__)
def __eq__(self, other):
try:
return _ufuncs.equal(self, other)
except (RuntimeError, TypeError):
# Failed to convert other to array: definitely not equal.
falsy = torch.full(self.shape, fill_value=False, dtype=bool)
return asarray(falsy)
def __ne__(self, other):
return ~(self == other)
def __index__(self):
try:
return operator.index(self.tensor.item())
except Exception as exc:
raise TypeError(
"only integer scalar arrays can be converted to a scalar index"
) from exc
def __bool__(self):
return bool(self.tensor)
def __int__(self):
return int(self.tensor)
def __float__(self):
return float(self.tensor)
def __complex__(self):
return complex(self.tensor)
def is_integer(self):
try:
v = self.tensor.item()
result = int(v) == v
except Exception:
result = False
return result
def __len__(self):
return self.tensor.shape[0]
def __contains__(self, x):
return self.tensor.__contains__(x)
def transpose(self, *axes):
# np.transpose(arr, axis=None) but arr.transpose(*axes)
return _funcs.transpose(self, axes)
def reshape(self, *shape, order="C"):
# arr.reshape(shape) and arr.reshape(*shape)
return _funcs.reshape(self, shape, order=order)
def sort(self, axis=-1, kind=None, order=None):
# ndarray.sort works in-place
_funcs.copyto(self, _funcs.sort(self, axis, kind, order))
def item(self, *args):
# Mimic NumPy's implementation with three special cases (no arguments,
# a flat index and a multi-index):
# https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/methods.c#L702
if args == ():
return self.tensor.item()
elif len(args) == 1:
# int argument
return self.ravel()[args[0]]
else:
return self.__getitem__(args)
def __getitem__(self, index):
tensor = self.tensor
def neg_step(i, s):
if not (isinstance(s, slice) and s.step is not None and s.step < 0):
return s
nonlocal tensor
tensor = torch.flip(tensor, (i,))
# Account for the fact that a slice includes the start but not the end
assert isinstance(s.start, int) or s.start is None
assert isinstance(s.stop, int) or s.stop is None
start = s.stop + 1 if s.stop else None
stop = s.start + 1 if s.start else None
return slice(start, stop, -s.step)
if isinstance(index, Sequence):
index = type(index)(neg_step(i, s) for i, s in enumerate(index))
else:
index = neg_step(0, index)
index = _util.ndarrays_to_tensors(index)
index = _upcast_int_indices(index)
return ndarray(tensor.__getitem__(index))
def __setitem__(self, index, value):
index = _util.ndarrays_to_tensors(index)
index = _upcast_int_indices(index)
if not _dtypes_impl.is_scalar(value):
value = normalize_array_like(value)
value = _util.cast_if_needed(value, self.tensor.dtype)
return self.tensor.__setitem__(index, value)
take = _funcs.take
put = _funcs.put
def __dlpack__(self, *, stream=None):
return self.tensor.__dlpack__(stream=stream)
def __dlpack_device__(self):
return self.tensor.__dlpack_device__()
def _tolist(obj):
"""Recursively convert tensors into lists."""
a1 = []
for elem in obj:
if isinstance(elem, (list, tuple)):
elem = _tolist(elem)
if isinstance(elem, ndarray):
a1.append(elem.tensor.tolist())
else:
a1.append(elem)
return a1
# This is the ideally the only place which talks to ndarray directly.
# The rest goes through asarray (preferred) or array.
def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None):
if subok is not False:
raise NotImplementedError("'subok' parameter is not supported.")
if like is not None:
raise NotImplementedError("'like' parameter is not supported.")
if order != "K":
raise NotImplementedError
# a happy path
if (
isinstance(obj, ndarray)
and copy is False
and dtype is None
and ndmin <= obj.ndim
):
return obj
if isinstance(obj, (list, tuple)):
# FIXME and they have the same dtype, device, etc
if obj and all(isinstance(x, torch.Tensor) for x in obj):
# list of arrays: *under torch.Dynamo* these are FakeTensors
obj = torch.stack(obj)
else:
# XXX: remove tolist
# lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists
obj = _tolist(obj)
# is obj an ndarray already?
if isinstance(obj, ndarray):
obj = obj.tensor
# is a specific dtype requested?
torch_dtype = None
if dtype is not None:
torch_dtype = _dtypes.dtype(dtype).torch_dtype
tensor = _util._coerce_to_tensor(obj, torch_dtype, copy, ndmin)
return ndarray(tensor)
def asarray(a, dtype=None, order="K", *, like=None):
return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0)
def ascontiguousarray(a, dtype=None, *, like=None):
arr = asarray(a, dtype=dtype, like=like)
if not arr.tensor.is_contiguous():
arr.tensor = arr.tensor.contiguous()
return arr
def from_dlpack(x, /):
t = torch.from_dlpack(x)
return ndarray(t)
def _extract_dtype(entry):
try:
dty = _dtypes.dtype(entry)
except Exception:
dty = asarray(entry).dtype
return dty
def can_cast(from_, to, casting="safe"):
from_ = _extract_dtype(from_)
to_ = _extract_dtype(to)
return _dtypes_impl.can_cast_impl(from_.torch_dtype, to_.torch_dtype, casting)
def result_type(*arrays_and_dtypes):
tensors = []
for entry in arrays_and_dtypes:
try:
t = asarray(entry).tensor
except (RuntimeError, ValueError, TypeError):
dty = _dtypes.dtype(entry)
t = torch.empty(1, dtype=dty.torch_dtype)
tensors.append(t)
torch_dtype = _dtypes_impl.result_type_impl(*tensors)
return _dtypes.dtype(torch_dtype)

View File

@ -0,0 +1,259 @@
# mypy: ignore-errors
""" "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on.
"""
from __future__ import annotations
import functools
import inspect
import operator
import typing
import torch
from . import _dtypes, _dtypes_impl, _util
ArrayLike = typing.TypeVar("ArrayLike")
Scalar = typing.Union[int, float, complex, bool]
ArrayLikeOrScalar = typing.Union[ArrayLike, Scalar]
DTypeLike = typing.TypeVar("DTypeLike")
AxisLike = typing.TypeVar("AxisLike")
NDArray = typing.TypeVar("NDArray")
CastingModes = typing.TypeVar("CastingModes")
KeepDims = typing.TypeVar("KeepDims")
# OutArray is to annotate the out= array argument.
#
# This one is special is several respects:
# First, It needs to be an NDArray, and we need to preserve the `result is out`
# semantics. Therefore, we cannot just extract the Tensor from the out array.
# So we never pass the out array to implementer functions and handle it in the
# `normalizer` below.
# Second, the out= argument can be either keyword or positional argument, and
# as a positional arg, it can be anywhere in the signature.
# To handle all this, we define a special `OutArray` annotation and dispatch on it.
#
OutArray = typing.TypeVar("OutArray")
try:
from typing import NotImplementedType
except ImportError:
NotImplementedType = typing.TypeVar("NotImplementedType")
def normalize_array_like(x, parm=None):
from ._ndarray import asarray
return asarray(x).tensor
def normalize_array_like_or_scalar(x, parm=None):
if _dtypes_impl.is_scalar_or_symbolic(x):
return x
return normalize_array_like(x, parm)
def normalize_optional_array_like_or_scalar(x, parm=None):
if x is None:
return None
return normalize_array_like_or_scalar(x, parm)
def normalize_optional_array_like(x, parm=None):
# This explicit normalizer is needed because otherwise normalize_array_like
# does not run for a parameter annotated as Optional[ArrayLike]
return None if x is None else normalize_array_like(x, parm)
def normalize_seq_array_like(x, parm=None):
return tuple(normalize_array_like(value) for value in x)
def normalize_dtype(dtype, parm=None):
# cf _decorators.dtype_to_torch
torch_dtype = None
if dtype is not None:
dtype = _dtypes.dtype(dtype)
torch_dtype = dtype.torch_dtype
return torch_dtype
def normalize_not_implemented(arg, parm):
if arg != parm.default:
raise NotImplementedError(f"'{parm.name}' parameter is not supported.")
def normalize_axis_like(arg, parm=None):
from ._ndarray import ndarray
if isinstance(arg, ndarray):
arg = operator.index(arg)
return arg
def normalize_ndarray(arg, parm=None):
# check the arg is an ndarray, extract its tensor attribute
if arg is None:
return arg
from ._ndarray import ndarray
if not isinstance(arg, ndarray):
raise TypeError(f"'{parm.name}' must be an array")
return arg.tensor
def normalize_outarray(arg, parm=None):
# almost normalize_ndarray, only return the array, not its tensor
if arg is None:
return arg
from ._ndarray import ndarray
# Dynamo can pass torch tensors as out arguments,
# wrap it in an ndarray before processing
if isinstance(arg, torch.Tensor):
arg = ndarray(arg)
if not isinstance(arg, ndarray):
raise TypeError(f"'{parm.name}' must be an array")
return arg
def normalize_casting(arg, parm=None):
if arg not in ["no", "equiv", "safe", "same_kind", "unsafe"]:
raise ValueError(
f"casting must be one of 'no', 'equiv', 'safe', 'same_kind', or 'unsafe' (got '{arg}')"
)
return arg
normalizers = {
"ArrayLike": normalize_array_like,
"ArrayLikeOrScalar": normalize_array_like_or_scalar,
"Optional[ArrayLike]": normalize_optional_array_like,
"Sequence[ArrayLike]": normalize_seq_array_like,
"Optional[ArrayLikeOrScalar]": normalize_optional_array_like_or_scalar,
"Optional[NDArray]": normalize_ndarray,
"Optional[OutArray]": normalize_outarray,
"NDArray": normalize_ndarray,
"Optional[DTypeLike]": normalize_dtype,
"AxisLike": normalize_axis_like,
"NotImplementedType": normalize_not_implemented,
"Optional[CastingModes]": normalize_casting,
}
def maybe_normalize(arg, parm):
"""Normalize arg if a normalizer is registered."""
normalizer = normalizers.get(parm.annotation, None)
return normalizer(arg, parm) if normalizer else arg
# ### Return value helpers ###
def maybe_copy_to(out, result, promote_scalar_result=False):
# NB: here out is either an ndarray or None
if out is None:
return result
elif isinstance(result, torch.Tensor):
if result.shape != out.shape:
can_fit = result.numel() == 1 and out.ndim == 0
if promote_scalar_result and can_fit:
result = result.squeeze()
else:
raise ValueError(
f"Bad size of the out array: out.shape = {out.shape}"
f" while result.shape = {result.shape}."
)
out.tensor.copy_(result)
return out
elif isinstance(result, (tuple, list)):
return type(result)(
maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result)
)
else:
raise AssertionError # We should never hit this path
def wrap_tensors(result):
from ._ndarray import ndarray
if isinstance(result, torch.Tensor):
return ndarray(result)
elif isinstance(result, (tuple, list)):
result = type(result)(wrap_tensors(x) for x in result)
return result
def array_or_scalar(values, py_type=float, return_scalar=False):
if return_scalar:
return py_type(values.item())
else:
from ._ndarray import ndarray
return ndarray(values)
# ### The main decorator to normalize arguments / postprocess the output ###
def normalizer(_func=None, *, promote_scalar_result=False):
def normalizer_inner(func):
@functools.wraps(func)
def wrapped(*args, **kwds):
sig = inspect.signature(func)
params = sig.parameters
first_param = next(iter(params.values()))
# NumPy's API does not have positional args before variadic positional args
if first_param.kind == inspect.Parameter.VAR_POSITIONAL:
args = [maybe_normalize(arg, first_param) for arg in args]
else:
# NB: extra unknown arguments: pass through, will raise in func(*args) below
args = (
tuple(
maybe_normalize(arg, parm)
for arg, parm in zip(args, params.values())
)
+ args[len(params.values()) :]
)
kwds = {
name: maybe_normalize(arg, params[name]) if name in params else arg
for name, arg in kwds.items()
}
result = func(*args, **kwds)
# keepdims
bound_args = None
if "keepdims" in params and params["keepdims"].annotation == "KeepDims":
# keepdims can be in any position so we need sig.bind
bound_args = sig.bind(*args, **kwds).arguments
if bound_args.get("keepdims", False):
# In this case the first arg is the initial tensor and
# the second arg is (optionally) the axis
tensor = args[0]
axis = bound_args.get("axis")
result = _util.apply_keepdims(result, axis, tensor.ndim)
# out
if "out" in params:
# out can be in any position so we need sig.bind
if bound_args is None:
bound_args = sig.bind(*args, **kwds).arguments
out = bound_args.get("out")
result = maybe_copy_to(out, result, promote_scalar_result)
result = wrap_tensors(result)
return result
return wrapped
if _func is None:
return normalizer_inner
else:
return normalizer_inner(_func)

View File

@ -0,0 +1,459 @@
# mypy: ignore-errors
""" Implementation of reduction operations, to be wrapped into arrays, dtypes etc
in the 'public' layer.
Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc
"""
from __future__ import annotations
import functools
from typing import Optional, TYPE_CHECKING
import torch
from . import _dtypes_impl, _util
if TYPE_CHECKING:
from ._normalizations import (
ArrayLike,
AxisLike,
DTypeLike,
KeepDims,
NotImplementedType,
OutArray,
)
def _deco_axis_expand(func):
"""
Generically handle axis arguments in reductions.
axis is *always* the 2nd arg in the function so no need to have a look at its signature
"""
@functools.wraps(func)
def wrapped(a, axis=None, *args, **kwds):
if axis is not None:
axis = _util.normalize_axis_tuple(axis, a.ndim)
if axis == ():
# So we insert a length-one axis and run the reduction along it.
# We cannot return a.clone() as this would sidestep the checks inside the function
newshape = _util.expand_shape(a.shape, axis=0)
a = a.reshape(newshape)
axis = (0,)
return func(a, axis, *args, **kwds)
return wrapped
def _atleast_float(dtype, other_dtype):
"""Return a dtype that is real or complex floating-point.
For inputs that are boolean or integer dtypes, this returns the default
float dtype; inputs that are complex get converted to the default complex
dtype; real floating-point dtypes (`float*`) get passed through unchanged
"""
if dtype is None:
dtype = other_dtype
if not (dtype.is_floating_point or dtype.is_complex):
return _dtypes_impl.default_dtypes().float_dtype
return dtype
@_deco_axis_expand
def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims: KeepDims = False):
return a.count_nonzero(axis)
@_deco_axis_expand
def argmax(
a: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
*,
keepdims: KeepDims = False,
):
if a.is_complex():
raise NotImplementedError(f"argmax with dtype={a.dtype}.")
axis = _util.allow_only_single_axis(axis)
if a.dtype == torch.bool:
# RuntimeError: "argmax_cpu" not implemented for 'Bool'
a = a.to(torch.uint8)
return torch.argmax(a, axis)
@_deco_axis_expand
def argmin(
a: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
*,
keepdims: KeepDims = False,
):
if a.is_complex():
raise NotImplementedError(f"argmin with dtype={a.dtype}.")
axis = _util.allow_only_single_axis(axis)
if a.dtype == torch.bool:
# RuntimeError: "argmin_cpu" not implemented for 'Bool'
a = a.to(torch.uint8)
return torch.argmin(a, axis)
@_deco_axis_expand
def any(
a: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
*,
where: NotImplementedType = None,
):
axis = _util.allow_only_single_axis(axis)
axis_kw = {} if axis is None else {"dim": axis}
return torch.any(a, **axis_kw)
@_deco_axis_expand
def all(
a: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
*,
where: NotImplementedType = None,
):
axis = _util.allow_only_single_axis(axis)
axis_kw = {} if axis is None else {"dim": axis}
return torch.all(a, **axis_kw)
@_deco_axis_expand
def amax(
a: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
initial: NotImplementedType = None,
where: NotImplementedType = None,
):
if a.is_complex():
raise NotImplementedError(f"amax with dtype={a.dtype}")
return a.amax(axis)
max = amax
@_deco_axis_expand
def amin(
a: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
initial: NotImplementedType = None,
where: NotImplementedType = None,
):
if a.is_complex():
raise NotImplementedError(f"amin with dtype={a.dtype}")
return a.amin(axis)
min = amin
@_deco_axis_expand
def ptp(
a: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
):
return a.amax(axis) - a.amin(axis)
@_deco_axis_expand
def sum(
a: ArrayLike,
axis: AxisLike = None,
dtype: Optional[DTypeLike] = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
initial: NotImplementedType = None,
where: NotImplementedType = None,
):
assert dtype is None or isinstance(dtype, torch.dtype)
if dtype == torch.bool:
dtype = _dtypes_impl.default_dtypes().int_dtype
axis_kw = {} if axis is None else {"dim": axis}
return a.sum(dtype=dtype, **axis_kw)
@_deco_axis_expand
def prod(
a: ArrayLike,
axis: AxisLike = None,
dtype: Optional[DTypeLike] = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
initial: NotImplementedType = None,
where: NotImplementedType = None,
):
axis = _util.allow_only_single_axis(axis)
if dtype == torch.bool:
dtype = _dtypes_impl.default_dtypes().int_dtype
axis_kw = {} if axis is None else {"dim": axis}
return a.prod(dtype=dtype, **axis_kw)
product = prod
@_deco_axis_expand
def mean(
a: ArrayLike,
axis: AxisLike = None,
dtype: Optional[DTypeLike] = None,
out: Optional[OutArray] = None,
keepdims: KeepDims = False,
*,
where: NotImplementedType = None,
):
dtype = _atleast_float(dtype, a.dtype)
axis_kw = {} if axis is None else {"dim": axis}
result = a.mean(dtype=dtype, **axis_kw)
return result
@_deco_axis_expand
def std(
a: ArrayLike,
axis: AxisLike = None,
dtype: Optional[DTypeLike] = None,
out: Optional[OutArray] = None,
ddof=0,
keepdims: KeepDims = False,
*,
where: NotImplementedType = None,
):
in_dtype = dtype
dtype = _atleast_float(dtype, a.dtype)
tensor = _util.cast_if_needed(a, dtype)
result = tensor.std(dim=axis, correction=ddof)
return _util.cast_if_needed(result, in_dtype)
@_deco_axis_expand
def var(
a: ArrayLike,
axis: AxisLike = None,
dtype: Optional[DTypeLike] = None,
out: Optional[OutArray] = None,
ddof=0,
keepdims: KeepDims = False,
*,
where: NotImplementedType = None,
):
in_dtype = dtype
dtype = _atleast_float(dtype, a.dtype)
tensor = _util.cast_if_needed(a, dtype)
result = tensor.var(dim=axis, correction=ddof)
return _util.cast_if_needed(result, in_dtype)
# cumsum / cumprod are almost reductions:
# 1. no keepdims
# 2. axis=None flattens
def cumsum(
a: ArrayLike,
axis: AxisLike = None,
dtype: Optional[DTypeLike] = None,
out: Optional[OutArray] = None,
):
if dtype == torch.bool:
dtype = _dtypes_impl.default_dtypes().int_dtype
if dtype is None:
dtype = a.dtype
(a,), axis = _util.axis_none_flatten(a, axis=axis)
axis = _util.normalize_axis_index(axis, a.ndim)
return a.cumsum(axis=axis, dtype=dtype)
def cumprod(
a: ArrayLike,
axis: AxisLike = None,
dtype: Optional[DTypeLike] = None,
out: Optional[OutArray] = None,
):
if dtype == torch.bool:
dtype = _dtypes_impl.default_dtypes().int_dtype
if dtype is None:
dtype = a.dtype
(a,), axis = _util.axis_none_flatten(a, axis=axis)
axis = _util.normalize_axis_index(axis, a.ndim)
return a.cumprod(axis=axis, dtype=dtype)
cumproduct = cumprod
def average(
a: ArrayLike,
axis=None,
weights: ArrayLike = None,
returned=False,
*,
keepdims=False,
):
if weights is None:
result = mean(a, axis=axis)
wsum = torch.as_tensor(a.numel() / result.numel(), dtype=result.dtype)
else:
if not a.dtype.is_floating_point:
a = a.double()
# axis & weights
if a.shape != weights.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights differ."
)
if weights.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ."
)
if weights.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis."
)
# setup weight to broadcast along axis
weights = torch.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape)
weights = weights.swapaxes(-1, axis)
# do the work
result_dtype = _dtypes_impl.result_type_impl(a, weights)
numerator = sum(a * weights, axis, dtype=result_dtype)
wsum = sum(weights, axis, dtype=result_dtype)
result = numerator / wsum
# We process keepdims manually because the decorator does not deal with variadic returns
if keepdims:
result = _util.apply_keepdims(result, axis, a.ndim)
if returned:
if wsum.shape != result.shape:
wsum = torch.broadcast_to(wsum, result.shape).clone()
return result, wsum
else:
return result
# Not using deco_axis_expand as it assumes that axis is the second arg
def quantile(
a: ArrayLike,
q: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
overwrite_input=False,
method="linear",
keepdims: KeepDims = False,
*,
interpolation: NotImplementedType = None,
):
if overwrite_input:
# raise NotImplementedError("overwrite_input in quantile not implemented.")
# NumPy documents that `overwrite_input` MAY modify inputs:
# https://numpy.org/doc/stable/reference/generated/numpy.percentile.html#numpy-percentile
# Here we choose to work out-of-place because why not.
pass
if not a.dtype.is_floating_point:
dtype = _dtypes_impl.default_dtypes().float_dtype
a = a.to(dtype)
# edge case: torch.quantile only supports float32 and float64
if a.dtype == torch.float16:
a = a.to(torch.float32)
if axis is None:
a = a.flatten()
q = q.flatten()
axis = (0,)
else:
axis = _util.normalize_axis_tuple(axis, a.ndim)
# FIXME(Mario) Doesn't np.quantile accept a tuple?
# torch.quantile does accept a number. If we don't want to implement the tuple behaviour
# (it's deffo low prio) change `normalize_axis_tuple` into a normalize_axis index above.
axis = _util.allow_only_single_axis(axis)
q = _util.cast_if_needed(q, a.dtype)
return torch.quantile(a, q, axis=axis, interpolation=method)
def percentile(
a: ArrayLike,
q: ArrayLike,
axis: AxisLike = None,
out: Optional[OutArray] = None,
overwrite_input=False,
method="linear",
keepdims: KeepDims = False,
*,
interpolation: NotImplementedType = None,
):
# np.percentile(float_tensor, 30) : q.dtype is int64 => q / 100.0 is float32
if _dtypes_impl.python_type_for_torch(q.dtype) == int:
q = q.to(_dtypes_impl.default_dtypes().float_dtype)
qq = q / 100.0
return quantile(
a,
qq,
axis=axis,
overwrite_input=overwrite_input,
method=method,
keepdims=keepdims,
interpolation=interpolation,
)
def median(
a: ArrayLike,
axis=None,
out: Optional[OutArray] = None,
overwrite_input=False,
keepdims: KeepDims = False,
):
return quantile(
a,
torch.as_tensor(0.5),
axis=axis,
overwrite_input=overwrite_input,
out=out,
keepdims=keepdims,
)

View File

@ -0,0 +1,334 @@
# mypy: ignore-errors
from __future__ import annotations
from typing import Optional
import torch
from . import _binary_ufuncs_impl, _dtypes_impl, _unary_ufuncs_impl, _util
from ._normalizations import (
ArrayLike,
ArrayLikeOrScalar,
CastingModes,
DTypeLike,
normalizer,
NotImplementedType,
OutArray,
)
def _ufunc_postprocess(result, out, casting):
if out is not None:
result = _util.typecast_tensor(result, out.dtype.torch_dtype, casting)
result = torch.broadcast_to(result, out.shape)
return result
# ############# Binary ufuncs ######################
_binary = [
name
for name in dir(_binary_ufuncs_impl)
if not name.startswith("_") and name not in ["torch", "matmul", "divmod", "ldexp"]
]
NEP50_FUNCS = (
"add",
"subtract",
"multiply",
"floor_divide",
"true_divide",
"divide",
"remainder",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"bitwise_left_shift",
"bitwise_right_shift",
"hypot",
"arctan2",
"logaddexp",
"logaddexp2",
"heaviside",
"copysign",
"fmax",
"minimum",
"fmin",
"maximum",
"fmod",
"gcd",
"lcm",
"pow",
)
def deco_binary_ufunc(torch_func):
"""Common infra for binary ufuncs.
Normalize arguments, sort out type casting, broadcasting and delegate to
the pytorch functions for the actual work.
"""
@normalizer
def wrapped(
x1: ArrayLikeOrScalar,
x2: ArrayLikeOrScalar,
/,
out: Optional[OutArray] = None,
*,
where: NotImplementedType = True,
casting: Optional[CastingModes] = "same_kind",
order: NotImplementedType = "K",
dtype: Optional[DTypeLike] = None,
subok: NotImplementedType = False,
signature: NotImplementedType = None,
extobj: NotImplementedType = None,
):
if dtype is not None:
def cast(x, dtype):
if isinstance(x, torch.Tensor):
return _util.typecast_tensor(x, dtype, casting)
else:
return torch.as_tensor(x, dtype=dtype)
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
elif isinstance(x1, torch.Tensor) and isinstance(x2, torch.Tensor):
dtype = _dtypes_impl.result_type_impl(x1, x2)
x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
else:
x1, x2 = _dtypes_impl.nep50_to_tensors(
x1, x2, torch_func.__name__ in NEP50_FUNCS, torch_func.__name__
)
result = torch_func(x1, x2)
return _ufunc_postprocess(result, out, casting)
wrapped.__qualname__ = torch_func.__name__
wrapped.__name__ = torch_func.__name__
return wrapped
# matmul's signature is _slightly_ different from other ufuncs:
# - no where=...
# - additional axis=..., axes=...
# - no NEP50 scalars in or out
@normalizer
def matmul(
x1: ArrayLike,
x2: ArrayLike,
/,
out: Optional[OutArray] = None,
*,
casting: Optional[CastingModes] = "same_kind",
order: NotImplementedType = "K",
dtype: Optional[DTypeLike] = None,
subok: NotImplementedType = False,
signature: NotImplementedType = None,
extobj: NotImplementedType = None,
axes: NotImplementedType = None,
axis: NotImplementedType = None,
):
if dtype is None:
dtype = _dtypes_impl.result_type_impl(x1, x2)
x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
result = _binary_ufuncs_impl.matmul(x1, x2)
result = _ufunc_postprocess(result, out, casting)
return result
# ldexp casting is special : the dtype of the result == dtype of the 1st arg
@normalizer
def ldexp(
x1: ArrayLikeOrScalar,
x2: ArrayLikeOrScalar,
/,
out: Optional[OutArray] = None,
*,
where: NotImplementedType = True,
casting: Optional[CastingModes] = "same_kind",
order: NotImplementedType = "K",
dtype: Optional[DTypeLike] = None,
subok: NotImplementedType = False,
signature: NotImplementedType = None,
extobj: NotImplementedType = None,
):
if dtype is not None:
if isinstance(x1, torch.Tensor):
x1 = _util.typecast_tensor(x1, dtype, casting)
else:
x1 = torch.as_tensor(x1, dtype=dtype)
else:
if not isinstance(x1, torch.Tensor):
x1 = torch.as_tensor(x1)
x1 = _util.cast_int_to_float(x1)
x2 = torch.as_tensor(x2)
# the second arg must be integer
if _dtypes_impl._category(x2.dtype) != 1:
raise ValueError("ldexp 2nd arg must be integer")
result = _binary_ufuncs_impl.ldexp(x1, x2)
if x1.dtype == torch.float16:
# torch.ldexp(f16, int) -> f32, undo it
result = result.to(torch.float16)
return _ufunc_postprocess(result, out, casting)
# nin=2, nout=2
@normalizer
def divmod(
x1: ArrayLike,
x2: ArrayLike,
out1: Optional[OutArray] = None,
out2: Optional[OutArray] = None,
/,
out: tuple[Optional[OutArray], Optional[OutArray]] = (None, None),
*,
where: NotImplementedType = True,
casting: Optional[CastingModes] = "same_kind",
order: NotImplementedType = "K",
dtype: Optional[DTypeLike] = None,
subok: NotImplementedType = False,
signature: NotImplementedType = None,
extobj: NotImplementedType = None,
):
# make sure we either have no out arrays at all, or there is either
# out1, out2, or out=tuple, but not both
num_outs = sum(x is not None for x in [out1, out2])
if num_outs == 1:
raise ValueError("both out1 and out2 need to be provided")
elif num_outs == 2:
o1, o2 = out
if o1 is not None or o2 is not None:
raise TypeError(
"cannot specify 'out' as both a positional and keyword argument"
)
else:
out1, out2 = out
if dtype is None:
dtype = _dtypes_impl.result_type_impl(x1, x2)
x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
quot, rem = _binary_ufuncs_impl.divmod(x1, x2)
quot = _ufunc_postprocess(quot, out1, casting)
rem = _ufunc_postprocess(rem, out2, casting)
return quot, rem
#
# Attach ufuncs to this module, for a further export to the public namespace in __init__.py
#
for name in _binary:
ufunc = getattr(_binary_ufuncs_impl, name)
vars()[name] = deco_binary_ufunc(ufunc)
def modf(x, /, *args, **kwds):
quot, rem = divmod(x, 1, *args, **kwds)
return rem, quot
_binary = _binary + ["divmod", "modf", "matmul", "ldexp"]
# ############# Unary ufuncs ######################
_unary = [
name
for name in dir(_unary_ufuncs_impl)
if not name.startswith("_") and name != "torch"
]
# these are ufunc(int) -> float
_fp_unary = [
"arccos",
"arccosh",
"arcsin",
"arcsinh",
"arctan",
"arctanh",
"cbrt",
"cos",
"cosh",
"deg2rad",
"degrees",
"exp",
"exp2",
"expm1",
"log",
"log10",
"log1p",
"log2",
"rad2deg",
"radians",
"reciprocal",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"trunc",
]
def deco_unary_ufunc(torch_func):
"""Common infra for unary ufuncs.
Normalize arguments, sort out type casting, broadcasting and delegate to
the pytorch functions for the actual work.
"""
@normalizer
def wrapped(
x: ArrayLike,
/,
out: Optional[OutArray] = None,
*,
where=True,
casting: Optional[CastingModes] = "same_kind",
order="K",
dtype: Optional[DTypeLike] = None,
subok: NotImplementedType = False,
signature=None,
extobj=None,
):
if dtype is not None:
x = _util.typecast_tensor(x, dtype, casting)
if torch_func.__name__ in _fp_unary:
x = _util.cast_int_to_float(x)
result = torch_func(x)
result = _ufunc_postprocess(result, out, casting)
return result
wrapped.__qualname__ = torch_func.__name__
wrapped.__name__ = torch_func.__name__
return wrapped
#
# Attach ufuncs to this module, for a further export to the public namespace in __init__.py
#
for name in _unary:
ufunc = getattr(_unary_ufuncs_impl, name)
vars()[name] = deco_unary_ufunc(ufunc)
__all__ = _binary + _unary # noqa: PLE0605

View File

@ -0,0 +1,72 @@
# mypy: ignore-errors
"""Export torch work functions for unary ufuncs, rename/tweak to match numpy.
This listing is further exported to public symbols in the `_numpy/_ufuncs.py` module.
"""
import torch
from torch import ( # noqa: F401
absolute as fabs,
arccos,
arccosh,
arcsin,
arcsinh,
arctan,
arctanh,
bitwise_not,
bitwise_not as invert,
ceil,
conj_physical as conjugate,
cos,
cosh,
deg2rad,
deg2rad as radians,
exp,
exp2,
expm1,
floor,
isfinite,
isinf,
isnan,
log,
log10,
log1p,
log2,
logical_not,
negative,
rad2deg,
rad2deg as degrees,
reciprocal,
round as fix,
round as rint,
sign,
signbit,
sin,
sinh,
sqrt,
square,
tan,
tanh,
trunc,
)
# special cases: torch does not export these names
def cbrt(x):
return torch.pow(x, 1 / 3)
def positive(x):
return +x
def absolute(x):
# work around torch.absolute not impl for bools
if x.dtype == torch.bool:
return x
return torch.absolute(x)
# TODO set __name__ and __qualname__
abs = absolute
conj = conjugate

View File

@ -0,0 +1,261 @@
# mypy: ignore-errors
"""Assorted utilities, which do not need anything other then torch and stdlib.
"""
import operator
import torch
from . import _dtypes_impl
# https://github.com/numpy/numpy/blob/v1.23.0/numpy/distutils/misc_util.py#L497-L504
def is_sequence(seq):
if isinstance(seq, str):
return False
try:
len(seq)
except Exception:
return False
return True
class AxisError(ValueError, IndexError):
pass
class UFuncTypeError(TypeError, RuntimeError):
pass
def cast_if_needed(tensor, dtype):
# NB: no casting if dtype=None
if dtype is not None and tensor.dtype != dtype:
tensor = tensor.to(dtype)
return tensor
def cast_int_to_float(x):
# cast integers and bools to the default float dtype
if _dtypes_impl._category(x.dtype) < 2:
x = x.to(_dtypes_impl.default_dtypes().float_dtype)
return x
# a replica of the version in ./numpy/numpy/core/src/multiarray/common.h
def normalize_axis_index(ax, ndim, argname=None):
if not (-ndim <= ax < ndim):
raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}")
if ax < 0:
ax += ndim
return ax
# from https://github.com/numpy/numpy/blob/main/numpy/core/numeric.py#L1378
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
"""
# Optimization to speed-up the most common cases.
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
# Going via an iterator directly is slower than via list comprehension.
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(map(int, axis))) != len(axis):
if argname:
raise ValueError(f"repeated axis in `{argname}` argument")
else:
raise ValueError("repeated axis")
return axis
def allow_only_single_axis(axis):
if axis is None:
return axis
if len(axis) != 1:
raise NotImplementedError("does not handle tuple axis")
return axis[0]
def expand_shape(arr_shape, axis):
# taken from numpy 1.23.x, expand_dims function
if type(axis) not in (list, tuple):
axis = (axis,)
out_ndim = len(axis) + len(arr_shape)
axis = normalize_axis_tuple(axis, out_ndim)
shape_it = iter(arr_shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
return shape
def apply_keepdims(tensor, axis, ndim):
if axis is None:
# tensor was a scalar
shape = (1,) * ndim
tensor = tensor.expand(shape).contiguous()
else:
shape = expand_shape(tensor.shape, axis)
tensor = tensor.reshape(shape)
return tensor
def axis_none_flatten(*tensors, axis=None):
"""Flatten the arrays if axis is None."""
if axis is None:
tensors = tuple(ar.flatten() for ar in tensors)
return tensors, 0
else:
return tensors, axis
def typecast_tensor(t, target_dtype, casting):
"""Dtype-cast tensor to target_dtype.
Parameters
----------
t : torch.Tensor
The tensor to cast
target_dtype : torch dtype object
The array dtype to cast all tensors to
casting : str
The casting mode, see `np.can_cast`
Returns
-------
`torch.Tensor` of the `target_dtype` dtype
Raises
------
ValueError
if the argument cannot be cast according to the `casting` rule
"""
can_cast = _dtypes_impl.can_cast_impl
if not can_cast(t.dtype, target_dtype, casting=casting):
raise TypeError(
f"Cannot cast array data from {t.dtype} to"
f" {target_dtype} according to the rule '{casting}'"
)
return cast_if_needed(t, target_dtype)
def typecast_tensors(tensors, target_dtype, casting):
return tuple(typecast_tensor(t, target_dtype, casting) for t in tensors)
def _try_convert_to_tensor(obj):
try:
tensor = torch.as_tensor(obj)
except Exception as e:
mesg = f"failed to convert {obj} to ndarray. \nInternal error is: {str(e)}."
raise NotImplementedError(mesg) # noqa: B904
return tensor
def _coerce_to_tensor(obj, dtype=None, copy=False, ndmin=0):
"""The core logic of the array(...) function.
Parameters
----------
obj : tensor_like
The thing to coerce
dtype : torch.dtype object or None
Coerce to this torch dtype
copy : bool
Copy or not
ndmin : int
The results as least this many dimensions
is_weak : bool
Whether obj is a weakly typed python scalar.
Returns
-------
tensor : torch.Tensor
a tensor object with requested dtype, ndim and copy semantics.
Notes
-----
This is almost a "tensor_like" coersion function. Does not handle wrapper
ndarrays (those should be handled in the ndarray-aware layer prior to
invoking this function).
"""
if isinstance(obj, torch.Tensor):
tensor = obj
else:
# tensor.dtype is the pytorch default, typically float32. If obj's elements
# are not exactly representable in float32, we've lost precision:
# >>> torch.as_tensor(1e12).item() - 1e12
# -4096.0
default_dtype = torch.get_default_dtype()
torch.set_default_dtype(_dtypes_impl.get_default_dtype_for(torch.float32))
try:
tensor = _try_convert_to_tensor(obj)
finally:
torch.set_default_dtype(default_dtype)
# type cast if requested
tensor = cast_if_needed(tensor, dtype)
# adjust ndim if needed
ndim_extra = ndmin - tensor.ndim
if ndim_extra > 0:
tensor = tensor.view((1,) * ndim_extra + tensor.shape)
# copy if requested
if copy:
tensor = tensor.clone()
return tensor
def ndarrays_to_tensors(*inputs):
"""Convert all ndarrays from `inputs` to tensors. (other things are intact)"""
from ._ndarray import ndarray
if len(inputs) == 0:
return ValueError()
elif len(inputs) == 1:
input_ = inputs[0]
if isinstance(input_, ndarray):
return input_.tensor
elif isinstance(input_, tuple):
result = []
for sub_input in input_:
sub_result = ndarrays_to_tensors(sub_input)
result.append(sub_result)
return tuple(result)
else:
return input_
else:
assert isinstance(inputs, tuple) # sanity check
return ndarrays_to_tensors(inputs)

View File

@ -0,0 +1,130 @@
# mypy: ignore-errors
from __future__ import annotations
import functools
import torch
from . import _dtypes_impl, _util
from ._normalizations import ArrayLike, normalizer
def upcast(func):
"""NumPy fft casts inputs to 64 bit and *returns 64-bit results*."""
@functools.wraps(func)
def wrapped(tensor, *args, **kwds):
target_dtype = (
_dtypes_impl.default_dtypes().complex_dtype
if tensor.is_complex()
else _dtypes_impl.default_dtypes().float_dtype
)
tensor = _util.cast_if_needed(tensor, target_dtype)
return func(tensor, *args, **kwds)
return wrapped
@normalizer
@upcast
def fft(a: ArrayLike, n=None, axis=-1, norm=None):
return torch.fft.fft(a, n, dim=axis, norm=norm)
@normalizer
@upcast
def ifft(a: ArrayLike, n=None, axis=-1, norm=None):
return torch.fft.ifft(a, n, dim=axis, norm=norm)
@normalizer
@upcast
def rfft(a: ArrayLike, n=None, axis=-1, norm=None):
return torch.fft.rfft(a, n, dim=axis, norm=norm)
@normalizer
@upcast
def irfft(a: ArrayLike, n=None, axis=-1, norm=None):
return torch.fft.irfft(a, n, dim=axis, norm=norm)
@normalizer
@upcast
def fftn(a: ArrayLike, s=None, axes=None, norm=None):
return torch.fft.fftn(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def ifftn(a: ArrayLike, s=None, axes=None, norm=None):
return torch.fft.ifftn(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def rfftn(a: ArrayLike, s=None, axes=None, norm=None):
return torch.fft.rfftn(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def irfftn(a: ArrayLike, s=None, axes=None, norm=None):
return torch.fft.irfftn(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def fft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
return torch.fft.fft2(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def ifft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
return torch.fft.ifft2(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def rfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
return torch.fft.rfft2(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def irfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
return torch.fft.irfft2(a, s, dim=axes, norm=norm)
@normalizer
@upcast
def hfft(a: ArrayLike, n=None, axis=-1, norm=None):
return torch.fft.hfft(a, n, dim=axis, norm=norm)
@normalizer
@upcast
def ihfft(a: ArrayLike, n=None, axis=-1, norm=None):
return torch.fft.ihfft(a, n, dim=axis, norm=norm)
@normalizer
def fftfreq(n, d=1.0):
return torch.fft.fftfreq(n, d)
@normalizer
def rfftfreq(n, d=1.0):
return torch.fft.rfftfreq(n, d)
@normalizer
def fftshift(x: ArrayLike, axes=None):
return torch.fft.fftshift(x, axes)
@normalizer
def ifftshift(x: ArrayLike, axes=None):
return torch.fft.ifftshift(x, axes)

View File

@ -0,0 +1,239 @@
# mypy: ignore-errors
from __future__ import annotations
import functools
import math
from typing import Sequence
import torch
from . import _dtypes_impl, _util
from ._normalizations import ArrayLike, KeepDims, normalizer
class LinAlgError(Exception):
pass
def _atleast_float_1(a):
if not (a.dtype.is_floating_point or a.dtype.is_complex):
a = a.to(_dtypes_impl.default_dtypes().float_dtype)
return a
def _atleast_float_2(a, b):
dtyp = _dtypes_impl.result_type_impl(a, b)
if not (dtyp.is_floating_point or dtyp.is_complex):
dtyp = _dtypes_impl.default_dtypes().float_dtype
a = _util.cast_if_needed(a, dtyp)
b = _util.cast_if_needed(b, dtyp)
return a, b
def linalg_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwds):
try:
return func(*args, **kwds)
except torch._C._LinAlgError as e:
raise LinAlgError(*e.args) # noqa: B904
return wrapped
# ### Matrix and vector products ###
@normalizer
@linalg_errors
def matrix_power(a: ArrayLike, n):
a = _atleast_float_1(a)
return torch.linalg.matrix_power(a, n)
@normalizer
@linalg_errors
def multi_dot(inputs: Sequence[ArrayLike], *, out=None):
return torch.linalg.multi_dot(inputs)
# ### Solving equations and inverting matrices ###
@normalizer
@linalg_errors
def solve(a: ArrayLike, b: ArrayLike):
a, b = _atleast_float_2(a, b)
return torch.linalg.solve(a, b)
@normalizer
@linalg_errors
def lstsq(a: ArrayLike, b: ArrayLike, rcond=None):
a, b = _atleast_float_2(a, b)
# NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991
# on CUDA, only `gels` is available though, so use it instead
driver = "gels" if a.is_cuda or b.is_cuda else "gelsd"
return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
@normalizer
@linalg_errors
def inv(a: ArrayLike):
a = _atleast_float_1(a)
result = torch.linalg.inv(a)
return result
@normalizer
@linalg_errors
def pinv(a: ArrayLike, rcond=1e-15, hermitian=False):
a = _atleast_float_1(a)
return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian)
@normalizer
@linalg_errors
def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None):
a, b = _atleast_float_2(a, b)
return torch.linalg.tensorsolve(a, b, dims=axes)
@normalizer
@linalg_errors
def tensorinv(a: ArrayLike, ind=2):
a = _atleast_float_1(a)
return torch.linalg.tensorinv(a, ind=ind)
# ### Norms and other numbers ###
@normalizer
@linalg_errors
def det(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.det(a)
@normalizer
@linalg_errors
def slogdet(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.slogdet(a)
@normalizer
@linalg_errors
def cond(x: ArrayLike, p=None):
x = _atleast_float_1(x)
# check if empty
# cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
if x.numel() == 0 and math.prod(x.shape[-2:]) == 0:
raise LinAlgError("cond is not defined on empty arrays")
result = torch.linalg.cond(x, p=p)
# Convert nans to infs (numpy does it in a data-dependent way, depending on
# whether the input array has nans or not)
# XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
return torch.where(torch.isnan(result), float("inf"), result)
@normalizer
@linalg_errors
def matrix_rank(a: ArrayLike, tol=None, hermitian=False):
a = _atleast_float_1(a)
if a.ndim < 2:
return int((a != 0).any())
if tol is None:
# follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885
atol = 0
rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps
else:
atol, rtol = tol, 0
return torch.linalg.matrix_rank(a, atol=atol, rtol=rtol, hermitian=hermitian)
@normalizer
@linalg_errors
def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False):
x = _atleast_float_1(x)
return torch.linalg.norm(x, ord=ord, dim=axis)
# ### Decompositions ###
@normalizer
@linalg_errors
def cholesky(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.cholesky(a)
@normalizer
@linalg_errors
def qr(a: ArrayLike, mode="reduced"):
a = _atleast_float_1(a)
result = torch.linalg.qr(a, mode=mode)
if mode == "r":
# match NumPy
result = result.R
return result
@normalizer
@linalg_errors
def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False):
a = _atleast_float_1(a)
if not compute_uv:
return torch.linalg.svdvals(a)
# NB: ignore the hermitian= argument (no pytorch equivalent)
result = torch.linalg.svd(a, full_matrices=full_matrices)
return result
# ### Eigenvalues and eigenvectors ###
@normalizer
@linalg_errors
def eig(a: ArrayLike):
a = _atleast_float_1(a)
w, vt = torch.linalg.eig(a)
if not a.is_complex() and w.is_complex() and (w.imag == 0).all():
w = w.real
vt = vt.real
return w, vt
@normalizer
@linalg_errors
def eigh(a: ArrayLike, UPLO="L"):
a = _atleast_float_1(a)
return torch.linalg.eigh(a, UPLO=UPLO)
@normalizer
@linalg_errors
def eigvals(a: ArrayLike):
a = _atleast_float_1(a)
result = torch.linalg.eigvals(a)
if not a.is_complex() and result.is_complex() and (result.imag == 0).all():
result = result.real
return result
@normalizer
@linalg_errors
def eigvalsh(a: ArrayLike, UPLO="L"):
a = _atleast_float_1(a)
return torch.linalg.eigvalsh(a, UPLO=UPLO)

View File

@ -0,0 +1,191 @@
# mypy: ignore-errors
"""Wrapper to mimic (parts of) np.random API surface.
NumPy has strict guarantees on reproducibility etc; here we don't give any.
Q: default dtype is float64 in numpy
"""
from __future__ import annotations
import functools
from math import sqrt
from typing import Optional
import torch
from . import _dtypes_impl, _util
from ._normalizations import array_or_scalar, ArrayLike, normalizer
__all__ = [
"seed",
"random_sample",
"sample",
"random",
"rand",
"randn",
"normal",
"choice",
"randint",
"shuffle",
"uniform",
]
def use_numpy_random():
# local import to avoid ref cycles
import torch._dynamo.config as config
return config.use_numpy_random_stream
def deco_stream(func):
@functools.wraps(func)
def inner(*args, **kwds):
if not use_numpy_random():
return func(*args, **kwds)
else:
import numpy
from ._ndarray import ndarray
f = getattr(numpy.random, func.__name__)
# numpy funcs accept numpy ndarrays, unwrap
args = tuple(
arg.tensor.numpy() if isinstance(arg, ndarray) else arg for arg in args
)
kwds = {
key: val.tensor.numpy() if isinstance(val, ndarray) else val
for key, val in kwds.items()
}
value = f(*args, **kwds)
# `value` can be either numpy.ndarray or python scalar (or None)
if isinstance(value, numpy.ndarray):
value = ndarray(torch.as_tensor(value))
return value
return inner
@deco_stream
def seed(seed=None):
if seed is not None:
torch.random.manual_seed(seed)
@deco_stream
def random_sample(size=None):
if size is None:
size = ()
dtype = _dtypes_impl.default_dtypes().float_dtype
values = torch.empty(size, dtype=dtype).uniform_()
return array_or_scalar(values, return_scalar=size == ())
def rand(*size):
if size == ():
size = None
return random_sample(size)
sample = random_sample
random = random_sample
@deco_stream
def uniform(low=0.0, high=1.0, size=None):
if size is None:
size = ()
dtype = _dtypes_impl.default_dtypes().float_dtype
values = torch.empty(size, dtype=dtype).uniform_(low, high)
return array_or_scalar(values, return_scalar=size == ())
@deco_stream
def randn(*size):
dtype = _dtypes_impl.default_dtypes().float_dtype
values = torch.randn(size, dtype=dtype)
return array_or_scalar(values, return_scalar=size == ())
@deco_stream
def normal(loc=0.0, scale=1.0, size=None):
if size is None:
size = ()
dtype = _dtypes_impl.default_dtypes().float_dtype
values = torch.empty(size, dtype=dtype).normal_(loc, scale)
return array_or_scalar(values, return_scalar=size == ())
@deco_stream
def shuffle(x):
# no @normalizer because we do not cast e.g. lists to tensors
from ._ndarray import ndarray
if isinstance(x, torch.Tensor):
tensor = x
elif isinstance(x, ndarray):
tensor = x.tensor
else:
raise NotImplementedError("We do not random.shuffle lists in-place")
perm = torch.randperm(tensor.shape[0])
xp = tensor[perm]
tensor.copy_(xp)
@deco_stream
def randint(low, high=None, size=None):
if size is None:
size = ()
if not isinstance(size, (tuple, list)):
size = (size,)
if high is None:
low, high = 0, low
values = torch.randint(low, high, size=size)
return array_or_scalar(values, int, return_scalar=size == ())
@deco_stream
@normalizer
def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike] = None):
# https://stackoverflow.com/questions/59461811/random-choice-with-pytorch
if a.numel() == 1:
a = torch.arange(a)
# TODO: check a.dtype is integer -- cf np.random.choice(3.4) which raises
# number of draws
if size is None:
num_el = 1
elif _util.is_sequence(size):
num_el = 1
for el in size:
num_el *= el
else:
num_el = size
# prepare the probabilities
if p is None:
p = torch.ones_like(a) / a.shape[0]
# cf https://github.com/numpy/numpy/blob/main/numpy/random/mtrand.pyx#L973
atol = sqrt(torch.finfo(p.dtype).eps)
if abs(p.sum() - 1.0) > atol:
raise ValueError("probabilities do not sum to 1.")
# actually sample
indices = torch.multinomial(p, num_el, replacement=replace)
if _util.is_sequence(size):
indices = indices.reshape(size)
samples = a[indices]
return samples

View File

@ -0,0 +1,20 @@
# mypy: ignore-errors
from .utils import (
_gen_alignment_data,
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises_regex,
assert_warns,
HAS_REFCOUNT,
IS_WASM,
suppress_warnings,
)
# from .testing import assert_allclose # FIXME

File diff suppressed because it is too large Load Diff