I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,356 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
__all__ = [
# Constants
"ONNX_ML",
"IR_VERSION",
"IR_VERSION_2017_10_10",
"IR_VERSION_2017_10_30",
"IR_VERSION_2017_11_3",
"IR_VERSION_2019_1_22",
"IR_VERSION_2019_3_18",
"IR_VERSION_2019_9_19",
"IR_VERSION_2020_5_8",
"IR_VERSION_2021_7_30",
"IR_VERSION_2023_5_5",
"EXPERIMENTAL",
"STABLE",
# Modules
"checker",
"compose",
"defs",
"gen_proto",
"helper",
"hub",
"mapping",
"numpy_helper",
"parser",
"printer",
"shape_inference",
"utils",
"version_converter",
# Proto classes
"AttributeProto",
"FunctionProto",
"GraphProto",
"MapProto",
"ModelProto",
"NodeProto",
"OperatorProto",
"OperatorSetIdProto",
"OperatorSetProto",
"OperatorStatus",
"OptionalProto",
"SequenceProto",
"SparseTensorProto",
"StringStringEntryProto",
"TensorAnnotation",
"TensorProto",
"TensorShapeProto",
"TrainingInfoProto",
"TypeProto",
"ValueInfoProto",
"Version",
# Utility functions
"convert_model_to_external_data",
"load_external_data_for_model",
"load_model_from_string",
"load_model",
"load_tensor_from_string",
"load_tensor",
"save_model",
"save_tensor",
"write_external_data_tensors",
]
# isort:skip_file
import os
import typing
from typing import IO, Literal, Union
from onnx import serialization
from onnx.onnx_cpp2py_export import ONNX_ML
from onnx.external_data_helper import (
load_external_data_for_model,
write_external_data_tensors,
convert_model_to_external_data,
)
from onnx.onnx_pb import (
AttributeProto,
EXPERIMENTAL,
FunctionProto,
GraphProto,
IR_VERSION,
IR_VERSION_2017_10_10,
IR_VERSION_2017_10_30,
IR_VERSION_2017_11_3,
IR_VERSION_2019_1_22,
IR_VERSION_2019_3_18,
IR_VERSION_2019_9_19,
IR_VERSION_2020_5_8,
IR_VERSION_2021_7_30,
IR_VERSION_2023_5_5,
ModelProto,
NodeProto,
OperatorSetIdProto,
OperatorStatus,
STABLE,
SparseTensorProto,
StringStringEntryProto,
TensorAnnotation,
TensorProto,
TensorShapeProto,
TrainingInfoProto,
TypeProto,
ValueInfoProto,
Version,
)
from onnx.onnx_operators_pb import OperatorProto, OperatorSetProto
from onnx.onnx_data_pb import MapProto, OptionalProto, SequenceProto
from onnx.version import version as __version__
# Import common subpackages so they're available when you 'import onnx'
from onnx import (
checker,
compose,
defs,
gen_proto,
helper,
hub,
mapping,
numpy_helper,
parser,
printer,
shape_inference,
utils,
version_converter,
)
# Supported model formats that can be loaded from and saved to
# The literals are formats with built-in support. But we also allow users to
# register their own formats. So we allow str as well.
_SupportedFormat = Union[
Literal["protobuf", "textproto", "onnxtxt", "json"], str # noqa: PYI051
]
# Default serialization format
_DEFAULT_FORMAT = "protobuf"
def _load_bytes(f: IO[bytes] | str | os.PathLike) -> bytes:
if hasattr(f, "read") and callable(typing.cast(IO[bytes], f).read):
content = typing.cast(IO[bytes], f).read()
else:
f = typing.cast(Union[str, os.PathLike], f)
with open(f, "rb") as readable:
content = readable.read()
return content
def _save_bytes(content: bytes, f: IO[bytes] | str | os.PathLike) -> None:
if hasattr(f, "write") and callable(typing.cast(IO[bytes], f).write):
typing.cast(IO[bytes], f).write(content)
else:
f = typing.cast(Union[str, os.PathLike], f)
with open(f, "wb") as writable:
writable.write(content)
def _get_file_path(f: IO[bytes] | str | os.PathLike | None) -> str | None:
if isinstance(f, (str, os.PathLike)):
return os.path.abspath(f)
if hasattr(f, "name"):
assert f is not None
return os.path.abspath(f.name)
return None
def _get_serializer(
fmt: _SupportedFormat | None, f: str | os.PathLike | IO[bytes] | None = None
) -> serialization.ProtoSerializer:
"""Get the serializer for the given path and format from the serialization registry."""
# Use fmt if it is specified
if fmt is not None:
return serialization.registry.get(fmt)
if (file_path := _get_file_path(f)) is not None:
_, ext = os.path.splitext(file_path)
fmt = serialization.registry.get_format_from_file_extension(ext)
# Failed to resolve format if fmt is None. Use protobuf as default
fmt = fmt or _DEFAULT_FORMAT
assert fmt is not None
return serialization.registry.get(fmt)
def load_model(
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # noqa: A002
load_external_data: bool = True,
) -> ModelProto:
"""Loads a serialized ModelProto into memory.
Args:
f: can be a file-like object (has "read" function) or a string/PathLike containing a file name
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
load_external_data: Whether to load the external data.
Set to True if the data is under the same directory of the model.
If not, users need to call :func:`load_external_data_for_model`
with directory to load external data from.
Returns:
Loaded in-memory ModelProto.
"""
model = _get_serializer(format, f).deserialize_proto(_load_bytes(f), ModelProto())
if load_external_data:
model_filepath = _get_file_path(f)
if model_filepath:
base_dir = os.path.dirname(model_filepath)
load_external_data_for_model(model, base_dir)
return model
def load_tensor(
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # noqa: A002
) -> TensorProto:
"""Loads a serialized TensorProto into memory.
Args:
f: can be a file-like object (has "read" function) or a string/PathLike containing a file name
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
Returns:
Loaded in-memory TensorProto.
"""
return _get_serializer(format, f).deserialize_proto(_load_bytes(f), TensorProto())
def load_model_from_string(
s: bytes | str,
format: _SupportedFormat = _DEFAULT_FORMAT, # noqa: A002
) -> ModelProto:
"""Loads a binary string (bytes) that contains serialized ModelProto.
Args:
s: a string, which contains serialized ModelProto
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
Returns:
Loaded in-memory ModelProto.
"""
return _get_serializer(format).deserialize_proto(s, ModelProto())
def load_tensor_from_string(
s: bytes,
format: _SupportedFormat = _DEFAULT_FORMAT, # noqa: A002
) -> TensorProto:
"""Loads a binary string (bytes) that contains serialized TensorProto.
Args:
s: a string, which contains serialized TensorProto
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
Returns:
Loaded in-memory TensorProto.
"""
return _get_serializer(format).deserialize_proto(s, TensorProto())
def save_model(
proto: ModelProto | bytes,
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # noqa: A002
*,
save_as_external_data: bool = False,
all_tensors_to_one_file: bool = True,
location: str | None = None,
size_threshold: int = 1024,
convert_attribute: bool = False,
) -> None:
"""Saves the ModelProto to the specified path and optionally, serialize tensors with raw data as external data before saving.
Args:
proto: should be a in-memory ModelProto
f: can be a file-like object (has "write" function) or a string containing
a file name or a pathlike object
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
save_as_external_data: If true, save tensors to external file(s).
all_tensors_to_one_file: Effective only if save_as_external_data is True.
If true, save all tensors to one external file specified by location.
If false, save each tensor to a file named with the tensor name.
location: Effective only if save_as_external_data is true.
Specify the external file that all tensors to save to.
Path is relative to the model path.
If not specified, will use the model name.
size_threshold: Effective only if save_as_external_data is True.
Threshold for size of data. Only when tensor's data is >= the size_threshold it will be converted
to external data. To convert every tensor with raw data to external data set size_threshold=0.
convert_attribute: Effective only if save_as_external_data is True.
If true, convert all tensors to external data
If false, convert only non-attribute tensors to external data
"""
if isinstance(proto, bytes):
proto = _get_serializer(_DEFAULT_FORMAT).deserialize_proto(proto, ModelProto())
if save_as_external_data:
convert_model_to_external_data(
proto, all_tensors_to_one_file, location, size_threshold, convert_attribute
)
model_filepath = _get_file_path(f)
if model_filepath is not None:
basepath = os.path.dirname(model_filepath)
proto = write_external_data_tensors(proto, basepath)
serialized = _get_serializer(format, model_filepath).serialize_proto(proto)
_save_bytes(serialized, f)
def save_tensor(
proto: TensorProto,
f: IO[bytes] | str | os.PathLike,
format: _SupportedFormat | None = None, # noqa: A002
) -> None:
"""Saves the TensorProto to the specified path.
Args:
proto: should be a in-memory TensorProto
f: can be a file-like object (has "write" function) or a string
containing a file name or a pathlike object.
format: The serialization format. When it is not specified, it is inferred
from the file extension when ``f`` is a path. If not specified _and_
``f`` is not a path, 'protobuf' is used. The encoding is assumed to
be "utf-8" when the format is a text format.
"""
serialized = _get_serializer(format, f).serialize_proto(proto)
_save_bytes(serialized, f)
# For backward compatibility
load = load_model
load_from_string = load_model_from_string
save = save_model

View File

@ -0,0 +1,63 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
"""This module defines custom dtypes not supported by numpy.
Function :func:`onnx.numpy_helper.from_array`
and :func:`onnx.numpy_helper.to_array` are using them
to convert arrays from/to these types.
Class :class:`onnx.reference.ReferenceEvalutor` also uses them.
To create such an array for unit test for example, it is convenient to write
something like the following:
.. exec_code::
import numpy as np
from onnx import TensorProto
from onnx.reference.ops.op_cast import Cast_19 as Cast
tensor_bfloat16 = Cast.eval(np.array([0, 1], dtype=np.float32), to=TensorProto.BFLOAT16)
The numpy representation dtypes used below are meant for internal use. They may change in the
future based on the industry standardization of these numpy types.
"""
from __future__ import annotations
import numpy as np
import onnx
#: Defines a bfloat16 as a uint16.
bfloat16 = np.dtype((np.uint16, {"bfloat16": (np.uint16, 0)}))
#: Defines float 8 e4m3fn type, see See :ref:`onnx-detail-float8` for technical details.
float8e4m3fn = np.dtype((np.uint8, {"e4m3fn": (np.uint8, 0)}))
#: Defines float 8 e4m3fnuz type, see See :ref:`onnx-detail-float8` for technical details.
float8e4m3fnuz = np.dtype((np.uint8, {"e4m3fnuz": (np.uint8, 0)}))
#: Defines float 8 e5m2 type, see See :ref:`onnx-detail-float8` for technical details.
float8e5m2 = np.dtype((np.uint8, {"e5m2": (np.uint8, 0)}))
#: Defines float 8 e5m2fnuz type, see See :ref:`onnx-detail-float8` for technical details.
float8e5m2fnuz = np.dtype((np.uint8, {"e5m2fnuz": (np.uint8, 0)}))
#: Defines int4, see See :ref:`onnx-detail-int4` for technical details.
#: Do note that one integer is stored using a byte and therefore is twice bigger
#: than its onnx size.
uint4 = np.dtype((np.uint8, {"uint4": (np.uint8, 0)}))
#: Defines int4, see See :ref:`onnx-detail-int4` for technical details.
#: Do note that one integer is stored using a byte and therefore is twice bigger
#: than its onnx size.
int4 = np.dtype((np.int8, {"int4": (np.int8, 0)}))
mapping_name_to_data_type = {
"bfloat16": onnx.TensorProto.BFLOAT16,
"e4m3fn": onnx.TensorProto.FLOAT8E4M3FN,
"e4m3fnuz": onnx.TensorProto.FLOAT8E4M3FNUZ,
"e5m2": onnx.TensorProto.FLOAT8E5M2,
"e5m2fnuz": onnx.TensorProto.FLOAT8E5M2FNUZ,
"int4": onnx.TensorProto.INT4,
"uint4": onnx.TensorProto.UINT4,
}

View File

@ -0,0 +1,3 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0

View File

@ -0,0 +1,139 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from collections import namedtuple
from typing import Any, NewType, Sequence
import numpy
import onnx.checker
import onnx.onnx_cpp2py_export.checker as c_checker
from onnx import IR_VERSION, ModelProto, NodeProto
class DeviceType:
"""Describes device type."""
_Type = NewType("_Type", int)
CPU: _Type = _Type(0)
CUDA: _Type = _Type(1)
class Device:
"""Describes device type and device id
syntax: device_type:device_id(optional)
example: 'CPU', 'CUDA', 'CUDA:1'
"""
def __init__(self, device: str) -> None:
options = device.split(":")
self.type = getattr(DeviceType, options[0])
self.device_id = 0
if len(options) > 1:
self.device_id = int(options[1])
def namedtupledict(
typename: str, field_names: Sequence[str], *args: Any, **kwargs: Any
) -> type[tuple[Any, ...]]:
field_names_map = {n: i for i, n in enumerate(field_names)}
# Some output names are invalid python identifier, e.g. "0"
kwargs.setdefault("rename", True)
data = namedtuple(typename, field_names, *args, **kwargs) # type: ignore # noqa: PYI024
def getitem(self: Any, key: Any) -> Any:
if isinstance(key, str):
key = field_names_map[key]
return super(type(self), self).__getitem__(key) # type: ignore
data.__getitem__ = getitem # type: ignore[assignment]
return data
class BackendRep:
"""BackendRep is the handle that a Backend returns after preparing to execute
a model repeatedly. Users will then pass inputs to the run function of
BackendRep to retrieve the corresponding results.
"""
def run(self, inputs: Any, **kwargs: Any) -> tuple[Any, ...]: # noqa: ARG002
"""Abstract function."""
return (None,)
class Backend:
"""Backend is the entity that will take an ONNX model with inputs,
perform a computation, and then return the output.
For one-off execution, users can use run_node and run_model to obtain results quickly.
For repeated execution, users should use prepare, in which the Backend
does all of the preparation work for executing the model repeatedly
(e.g., loading initializers), and returns a BackendRep handle.
"""
@classmethod
def is_compatible(
cls, model: ModelProto, device: str = "CPU", **kwargs: Any # noqa: ARG003
) -> bool:
# Return whether the model is compatible with the backend.
return True
@classmethod
def prepare(
cls, model: ModelProto, device: str = "CPU", **kwargs: Any # noqa: ARG003
) -> BackendRep | None:
# TODO Remove Optional from return type
onnx.checker.check_model(model)
return None
@classmethod
def run_model(
cls, model: ModelProto, inputs: Any, device: str = "CPU", **kwargs: Any
) -> tuple[Any, ...]:
backend = cls.prepare(model, device, **kwargs)
assert backend is not None
return backend.run(inputs)
@classmethod
def run_node(
cls,
node: NodeProto,
inputs: Any, # noqa: ARG003
device: str = "CPU", # noqa: ARG003
outputs_info: ( # noqa: ARG003
Sequence[tuple[numpy.dtype, tuple[int, ...]]] | None
) = None,
**kwargs: dict[str, Any],
) -> tuple[Any, ...] | None:
"""Simple run one operator and return the results.
Args:
node: The node proto.
inputs: Inputs to the node.
device: The device to run on.
outputs_info: a list of tuples, which contains the element type and
shape of each output. First element of the tuple is the dtype, and
the second element is the shape. More use case can be found in
https://github.com/onnx/onnx/blob/main/onnx/backend/test/runner/__init__.py
kwargs: Other keyword arguments.
"""
# TODO Remove Optional from return type
if "opset_version" in kwargs:
special_context = c_checker.CheckerContext()
special_context.ir_version = IR_VERSION
special_context.opset_imports = {"": kwargs["opset_version"]} # type: ignore
onnx.checker.check_node(node, special_context)
else:
onnx.checker.check_node(node)
return None
@classmethod
def supports_device(cls, device: str) -> bool: # noqa: ARG003
"""Checks whether the backend is compiled with particular device support.
In particular it's used in the testing suite.
"""
return True

View File

@ -0,0 +1,3 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0

View File

@ -0,0 +1,27 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import importlib
import inspect
import pkgutil
import sys
from types import ModuleType
def collect_sample_implementations() -> dict[str, str]:
dict_: dict[str, str] = {}
_recursive_scan(sys.modules[__name__], dict_)
return dict_
def _recursive_scan(package: ModuleType, dict_: dict[str, str]) -> None:
pkg_dir = package.__path__ # type: ignore
module_location = package.__name__
for _module_loader, name, ispkg in pkgutil.iter_modules(pkg_dir): # type: ignore
module_name = f"{module_location}.{name}" # Module/package
module = importlib.import_module(module_name)
dict_[name] = inspect.getsource(module)
if ispkg:
_recursive_scan(module, dict_)

View File

@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
def abs(input: np.ndarray) -> np.ndarray: # noqa: A001
return np.abs(input) # type: ignore[no-any-return]

View File

@ -0,0 +1,8 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
__all__ = ["BackendTest"]
# for backward compatibility
from onnx.backend.test.runner import Runner as BackendTest

View File

@ -0,0 +1,14 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import sys
from onnx.backend.test.case.base import Snippets
from onnx.backend.test.case.utils import import_recursive
def collect_snippets() -> dict[str, list[tuple[str, str]]]:
import_recursive(sys.modules[__name__])
return Snippets

View File

@ -0,0 +1,47 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import inspect
from collections import defaultdict
from textwrap import dedent
from typing import Any, ClassVar
import numpy as np
def process_snippet(op_name: str, name: str, export: Any) -> tuple[str, str]:
snippet_name = name[len("export_") :] or op_name.lower()
source_code = dedent(inspect.getsource(export))
# remove the function signature line
lines = source_code.splitlines()
assert lines[0] == "@staticmethod"
assert lines[1].startswith("def export")
return snippet_name, dedent("\n".join(lines[2:]))
Snippets: dict[str, list[tuple[str, str]]] = defaultdict(list)
class _Exporter(type):
exports: ClassVar[dict[str, list[tuple[str, str]]]] = defaultdict(list)
def __init__(
cls, name: str, bases: tuple[type[Any], ...], dct: dict[str, Any]
) -> None:
for k, v in dct.items():
if k.startswith("export"):
if not isinstance(v, staticmethod):
raise ValueError("Only staticmethods could be named as export.*")
export = getattr(cls, k)
Snippets[name].append(process_snippet(name, k, export))
# export functions should call expect and so populate
# TestCases
np.random.seed(seed=0)
export()
super().__init__(name, bases, dct)
class Base(metaclass=_Exporter):
pass

View File

@ -0,0 +1,78 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import sys
from typing import Sequence
import numpy as np
from onnx import ModelProto
from onnx.backend.test.case.test_case import TestCase
from onnx.backend.test.case.utils import import_recursive
_SimpleModelTestCases = []
def expect(
model: ModelProto,
inputs: Sequence[np.ndarray],
outputs: Sequence[np.ndarray],
name: str | None = None,
) -> None:
name = name or model.graph.name
_SimpleModelTestCases.append(
TestCase(
name=name,
model_name=model.graph.name,
url=None,
model_dir=None,
model=model,
data_sets=[(inputs, outputs)],
kind="simple",
rtol=1e-3,
atol=1e-7,
)
)
# BASE_URL = "https://download.onnxruntime.ai/onnx/models"
BASE_URL = "onnx/backend/test/data/light/light_%s.onnx"
def collect_testcases() -> list[TestCase]:
"""Collect model test cases defined in python/numpy code."""
real_model_testcases = []
model_tests = [
("test_bvlc_alexnet", "bvlc_alexnet", 1e-3, 1e-7),
("test_densenet121", "densenet121", 2e-3, 1e-7),
("test_inception_v1", "inception_v1", 1e-3, 1e-7),
("test_inception_v2", "inception_v2", 1e-3, 1e-7),
("test_resnet50", "resnet50", 1e-3, 1e-7),
("test_shufflenet", "shufflenet", 1e-3, 1e-7),
("test_squeezenet", "squeezenet", 1e-3, 1e-7),
("test_vgg19", "vgg19", 1e-3, 1e-7),
("test_zfnet512", "zfnet512", 1e-3, 1e-7),
]
for test_name, model_name, rtol, atol in model_tests:
url = BASE_URL % model_name
real_model_testcases.append(
TestCase(
name=test_name,
model_name=model_name,
url=url,
model_dir=None,
model=None,
data_sets=None,
kind="real",
rtol=rtol,
atol=atol,
)
)
import_recursive(sys.modules[__name__])
return real_model_testcases + _SimpleModelTestCases

View File

@ -0,0 +1,89 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Sequence
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class ExpandDynamicShape(Base):
@staticmethod
def export() -> None:
def make_graph(
node: onnx.helper.NodeProto,
input_shape: Sequence[int],
shape_shape: Sequence[int],
output_shape: Sequence[int],
) -> onnx.helper.GraphProto:
graph = onnx.helper.make_graph(
nodes=[node],
name="Expand",
inputs=[
onnx.helper.make_tensor_value_info(
"X", onnx.TensorProto.FLOAT, input_shape
),
onnx.helper.make_tensor_value_info(
"shape", onnx.TensorProto.INT64, shape_shape
),
],
outputs=[
onnx.helper.make_tensor_value_info(
"Y", onnx.TensorProto.FLOAT, output_shape
)
],
)
return graph
node = onnx.helper.make_node("Expand", ["X", "shape"], ["Y"], name="test")
input_shape = [1, 3, 1]
x = np.ones(input_shape, dtype=np.float32)
# 1st testcase
shape = np.array([3, 1], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model1")
# 2nd testcase
shape = np.array([1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model2")
# 3rd testcase
shape = np.array([3, 1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model3")
# 4th testcase
shape = np.array([3, 3, 1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model4")

View File

@ -0,0 +1,110 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN, ONNX_DOMAIN
class Gradient(Base):
@staticmethod
def export_gradient_scalar_add() -> None:
add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add")
gradient_node = onnx.helper.make_node(
"Gradient",
["a", "b"],
["dc_da", "dc_db"],
name="my_gradient",
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
xs=["a", "b"],
y="c",
)
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# dc / da = d(a+b) / da = 1
dc_da = np.array(1).astype(np.float32)
# db / db = d(a+b) / db = 1
dc_db = np.array(1).astype(np.float32)
graph = onnx.helper.make_graph(
nodes=[add_node, gradient_node],
name="GradientOfAdd",
inputs=[
onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []),
],
outputs=[
onnx.helper.make_tensor_value_info("c", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dc_da", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dc_db", onnx.TensorProto.FLOAT, []),
],
)
opsets = [
onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),
]
model = onnx.helper.make_model_gen_version(
graph, producer_name="backend-test", opset_imports=opsets
)
expect(
model, inputs=[a, b], outputs=[c, dc_da, dc_db], name="test_gradient_of_add"
)
@staticmethod
def export_gradient_scalar_add_and_mul() -> None:
add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add")
mul_node = onnx.helper.make_node("Mul", ["c", "a"], ["d"], name="my_mul")
gradient_node = onnx.helper.make_node(
"Gradient",
["a", "b"],
["dd_da", "dd_db"],
name="my_gradient",
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
xs=["a", "b"],
y="d",
)
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# d = a * c = a * (a + b)
d = a * c
# dd / da = d(a*a+a*b) / da = 2 * a + b
dd_da = (2 * a + b).astype(np.float32)
# dd / db = d(a*a+a*b) / db = a
dd_db = a
graph = onnx.helper.make_graph(
nodes=[add_node, mul_node, gradient_node],
name="GradientOfTwoOperators",
inputs=[
onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []),
],
outputs=[
onnx.helper.make_tensor_value_info("d", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dd_da", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dd_db", onnx.TensorProto.FLOAT, []),
],
)
opsets = [
onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),
]
model = onnx.helper.make_model_gen_version(
graph, producer_name="backend-test", opset_imports=opsets
)
expect(
model,
inputs=[a, b],
outputs=[d, dd_da, dd_db],
name="test_gradient_of_add_and_mul",
)

View File

@ -0,0 +1,457 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import typing
import numpy as np
import onnx
from onnx import TensorProto
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
def SequenceEmptyImpl() -> list[np.ndarray | None]:
return []
def SequenceConstructImpl(*tensors: np.ndarray) -> list[np.ndarray]:
return list(tensors)
def SequenceInsertImpl(
sequence: list[np.ndarray], tensor: np.ndarray, position: int | None = None
) -> list[np.ndarray]:
if position is None:
position = len(sequence)
sequence.insert(position, tensor)
return sequence
def SequenceAtImpl(sequence: list[np.ndarray], position: int) -> np.ndarray:
return sequence[position]
def SequenceEraseImpl(
sequence: list[np.ndarray], position: int | None = None
) -> list[np.ndarray | None]:
if position is None:
position = -1
del sequence[position]
return sequence
def SequenceLengthImpl(sequence: list[np.ndarray]) -> np.int64:
return np.int64(len(sequence))
def SplitToSequenceImpl(
tensor: np.ndarray,
split: int | list[int] | None = None,
axis: int = 0,
keepdims: int = 1,
) -> list[np.ndarray]:
dim_size = tensor.shape[axis]
if split is None:
split = 1
split_indices = [
i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size
]
if not keepdims:
results = np.array_split(tensor, split_indices, axis)
return [np.squeeze(res, axis) for res in results]
if np.isscalar(split):
split_indices = [
i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size
] # type: ignore
else:
split_indices = np.cumsum(split) + 1
return np.array_split(tensor, split_indices, axis) # type: ignore
def ConcatFromSequenceImpl(
sequence: list[np.ndarray], axis: int, new_axis: int | None = 0
) -> np.ndarray:
if not new_axis:
return np.concatenate(sequence, axis)
return np.stack(sequence, axis)
class Sequence(Base):
@staticmethod
def export() -> None:
def make_graph(
nodes: list[onnx.helper.NodeProto],
input_shapes: list[typing.Sequence[str | int] | None],
output_shapes: list[typing.Sequence[str | int] | None],
input_names: list[str],
output_names: list[str],
input_types: list[TensorProto.DataType],
output_types: list[TensorProto.DataType],
initializers: list[TensorProto] | None = None,
) -> onnx.helper.GraphProto:
graph = onnx.helper.make_graph(
nodes=nodes,
name="Sequence",
inputs=[
onnx.helper.make_tensor_value_info(name, input_type, input_shape)
for name, input_type, input_shape in zip(
input_names, input_types, input_shapes
)
],
outputs=[
onnx.helper.make_tensor_value_info(name, output_type, output_shape)
for name, output_type, output_shape in zip(
output_names, output_types, output_shapes
)
],
initializer=initializers,
)
return graph
# 1st testcase - insert and at.
# 1. SequenceEmpty: -> []
# 2. SequenceInsert(x): -> [x]
# 3. SequenceInsert(y): -> [x, y]
# 4. SequenceInsert(z, 1): -> [x, z, y]
# 5. SequenceAt(2): -> y
seq_empty_node = onnx.helper.make_node("SequenceEmpty", [], ["Seq_empty"])
seq_insert_node = onnx.helper.make_node(
"SequenceInsert", ["Seq_empty", "X"], ["Seq_1"]
)
seq_insert_node2 = onnx.helper.make_node(
"SequenceInsert", ["Seq_1", "Y"], ["Seq_2"]
)
seq_insert_node3 = onnx.helper.make_node(
"SequenceInsert", ["Seq_2", "Z", "pos"], ["Seq_3"]
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["Seq_3", "pos_at"], ["out"])
x_shape = [2, 3, 4]
y_shape = [1, 3, 4]
z_shape = [3, 3, 4]
out_shape = [None, 3, 4]
x = np.ones(x_shape, dtype=np.float32)
y = np.zeros(y_shape, dtype=np.float32)
z = np.ones(z_shape, dtype=np.float32) * 2
pos_val = 1
pos_at_val = 2
out = SequenceEmptyImpl()
out = SequenceInsertImpl(out, x)
out = SequenceInsertImpl(out, y)
out = SequenceInsertImpl(out, z, pos_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, y)
pos = onnx.helper.make_tensor("pos", TensorProto.INT64, (), (pos_val,))
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[
seq_empty_node,
seq_insert_node,
seq_insert_node2,
seq_insert_node3,
seq_at_node,
],
[x_shape, y_shape, z_shape, [], []], # type: ignore
[out_shape], # type: ignore
["X", "Y", "Z", "pos", "pos_at"],
["out"],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore
[onnx.TensorProto.FLOAT],
[pos, pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model1")
# 2nd testcase - erase and at.
# 1. SequenceConstruct(x, y, z): -> [x, y, z]
# 2. SequenceErase(1): -> [x, z]
# 3. SequenceAt(1): -> z
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_erase_node = onnx.helper.make_node(
"SequenceErase", ["seq_1", "pos_erase"], ["seq_2"]
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_2", "pos_at"], ["out"])
tensor_shape = [2, 3, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
pos_erase_val = 1
pos_at_val = 1
out = SequenceConstructImpl(x, y, z)
out = SequenceEraseImpl(out, pos_erase_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, z)
pos_erase = onnx.helper.make_tensor(
"pos_erase", TensorProto.INT64, (), (pos_erase_val,)
)
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[seq_construct_node, seq_erase_node, seq_at_node],
[tensor_shape, tensor_shape, tensor_shape, [], []], # type: ignore
[tensor_shape], # type: ignore
["X", "Y", "Z", "pos_erase", "pos_at"],
["out"],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore
[onnx.TensorProto.FLOAT],
[pos_erase, pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model2")
# 3rd testcase - erase, insert and at, with negative index value.
# 1. SequenceConstruct(x, y, z): -> [x, y, z]
# 2. SequenceErase(-3): -> [y, z]
# 3. SequenceInsert(x, -1): -> [y, x, z]
# 4. SequenceAt(-1): -> z
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_erase_node = onnx.helper.make_node(
"SequenceErase", ["seq_1", "pos_erase"], ["seq_2"]
)
seq_insert_node = onnx.helper.make_node(
"SequenceInsert", ["seq_2", "X", "pos_insert"], ["seq_3"]
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_3", "pos_at"], ["out"])
tensor_shape = [2, 3, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
pos_erase_val = -3
pos_insert_val = -1
pos_at_val = -1
out = SequenceConstructImpl(x, y, z)
out = SequenceEraseImpl(out, pos_erase_val)
out = SequenceInsertImpl(out, x, pos_insert_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, z)
pos_erase = onnx.helper.make_tensor(
"pos_erase", TensorProto.INT64, (), (pos_erase_val,)
)
pos_insert = onnx.helper.make_tensor(
"pos_insert", TensorProto.INT64, (), (pos_insert_val,)
)
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[seq_construct_node, seq_erase_node, seq_insert_node, seq_at_node],
[tensor_shape, tensor_shape, tensor_shape, [], [], []], # type: ignore
[tensor_shape], # type: ignore
["X", "Y", "Z", "pos_erase", "pos_insert", "pos_at"],
["out"],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 3, # type: ignore
[onnx.TensorProto.FLOAT],
[pos_erase, pos_insert, pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model3")
# 4th testcase - concat
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_concat_node = onnx.helper.make_node(
"ConcatFromSequence", ["seq_1"], ["out"], axis=1
)
tensor_shape = [2, 3, 4]
concat_out_shape = [2, None, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
out = SequenceConstructImpl(x, y, z)
concat_out = ConcatFromSequenceImpl(out, 1)
graph = make_graph(
[seq_construct_node, seq_concat_node],
[tensor_shape] * 3, # type: ignore
[concat_out_shape], # type: ignore
["X", "Y", "Z"],
["out"],
[onnx.TensorProto.FLOAT] * 3, # type: ignore
[onnx.TensorProto.FLOAT],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(
model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model4"
)
# 5th testcase - concat with new_axis = 1
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_concat_node = onnx.helper.make_node(
"ConcatFromSequence", ["seq_1"], ["out"], axis=-1, new_axis=1
)
tensor_shape = [2, 3, 4]
concat_out_shape = [2, 3, 4, 3]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
out = SequenceConstructImpl(x, y, z)
concat_out = ConcatFromSequenceImpl(out, -1, 1)
graph = make_graph(
[seq_construct_node, seq_concat_node],
[tensor_shape] * 3, # type: ignore
[concat_out_shape], # type: ignore
["X", "Y", "Z"],
["out"],
[onnx.TensorProto.FLOAT] * 3, # type: ignore
[onnx.TensorProto.FLOAT],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(
model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model5"
)
# 6th testcase - split and len
seq_split_node = onnx.helper.make_node(
"SplitToSequence", ["X"], ["seq_1"], axis=-1
)
seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"])
tensor_shape = [2, 3, 4]
len_shape = [] # type: ignore
x = np.ones(tensor_shape, dtype=np.float32)
out = SplitToSequenceImpl(x, axis=-1)
out = SequenceLengthImpl(out)
assert np.array_equal(out, np.int64(4))
graph = onnx.helper.make_graph(
nodes=[seq_split_node, seq_len_node],
name="Sequence",
inputs=[
onnx.helper.make_tensor_value_info(
"X", onnx.TensorProto.FLOAT, tensor_shape
)
],
outputs=[
onnx.helper.make_tensor_value_info(
"len", onnx.TensorProto.INT64, len_shape
)
],
) # type: ignore
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x], outputs=[out], name="test_sequence_model6")
# 7th testcase - split with keepdims=0, and SequenceAt
seq_split_node = onnx.helper.make_node(
"SplitToSequence", ["X"], ["seq_1"], axis=0, keepdims=0
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_1", "pos_at"], ["out"])
tensor_shape = [2, 3, 4]
out_shape = [3, 4]
x = np.random.rand(*tensor_shape)
pos_at_val = 1
out = SplitToSequenceImpl(x, axis=0, keepdims=0)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, x[pos_at_val])
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[seq_split_node, seq_at_node],
[tensor_shape, []], # type: ignore
[out_shape], # type: ignore
["X", "pos_at"],
["out"],
[onnx.TensorProto.DOUBLE, onnx.TensorProto.INT64],
[onnx.TensorProto.DOUBLE],
[pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x], outputs=[out], name="test_sequence_model7")
# 8th testcase - split zero length
seq_split_node = onnx.helper.make_node(
"SplitToSequence", ["X", "Splits"], ["seq_1"]
)
seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"])
tensor_shape = ["n"] # type: ignore
splits_shape = [3] # type: ignore
x = np.array([]).astype(np.float32)
splits = np.array([0, 0, 0]).astype(np.int64)
out_len = np.int64(3)
graph = onnx.helper.make_graph(
nodes=[seq_split_node, seq_len_node],
name="Sequence",
inputs=[
onnx.helper.make_tensor_value_info(
"X", onnx.TensorProto.FLOAT, tensor_shape
), # type: ignore
onnx.helper.make_tensor_value_info(
"Splits", onnx.TensorProto.INT64, splits_shape
),
], # type: ignore
outputs=[
onnx.helper.make_tensor_value_info(
"len", onnx.TensorProto.INT64, len_shape
)
],
) # type: ignore
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(
model, inputs=[x, splits], outputs=[out_len], name="test_sequence_model8"
)

View File

@ -0,0 +1,42 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class ShrinkTest(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node(
"Shrink",
["x"],
["y"],
lambd=1.5,
bias=1.5,
)
graph = onnx.helper.make_graph(
nodes=[node],
name="Shrink",
inputs=[
onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [5])
],
outputs=[
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [5])
],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
x = np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)
y = np.array([-0.5, 0.0, 0.0, 0.0, 0.5], dtype=np.float32)
expect(model, inputs=[x], outputs=[y], name="test_shrink")

View File

@ -0,0 +1,36 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class SingleSign(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node("Sign", ["x"], ["y"], name="test")
x = np.array([-1.0, 4.5, -4.5, 3.1, 0.0, 2.4, -5.5]).astype(np.float32)
y = np.array([-1.0, 1.0, -1.0, 1.0, 0.0, 1.0, -1.0]).astype(np.float32)
graph = onnx.helper.make_graph(
nodes=[node],
name="SingleSign",
inputs=[
onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [7])
],
outputs=[
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [7])
],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x], outputs=[y], name="test_sign_model")

View File

@ -0,0 +1,36 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class SingleRelu(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node("Relu", ["x"], ["y"], name="test")
graph = onnx.helper.make_graph(
nodes=[node],
name="SingleRelu",
inputs=[
onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [1, 2])
],
outputs=[
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1, 2])
],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
x = np.random.randn(1, 2).astype(np.float32)
y = np.maximum(x, 0)
expect(model, inputs=[x], outputs=[y], name="test_single_relu_model")

View File

@ -0,0 +1,203 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Sequence
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class NormalizeStrings(Base):
@staticmethod
def export() -> None:
def make_graph(
node: onnx.helper.NodeProto,
input_shape: Sequence[int],
output_shape: Sequence[int],
) -> onnx.helper.GraphProto:
graph = onnx.helper.make_graph(
nodes=[node],
name="StringNormalizer",
inputs=[
onnx.helper.make_tensor_value_info(
"x", onnx.TensorProto.STRING, input_shape
)
],
outputs=[
onnx.helper.make_tensor_value_info(
"y", onnx.TensorProto.STRING, output_shape
)
],
)
return graph
# 1st model_monday_casesensintive_nochangecase
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
is_case_sensitive=1,
stopwords=stopwords,
)
x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
y = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_casesensintive_nochangecase",
)
# 2nd model_nostopwords_nochangecase
node = onnx.helper.make_node(
"StringNormalizer", inputs=["x"], outputs=["y"], is_case_sensitive=1
)
x = np.array(["monday", "tuesday"]).astype(object)
y = x
graph = make_graph(node, [2], [2])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_nostopwords_nochangecase",
)
# 3rd model_monday_casesensintive_lower
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="LOWER",
is_case_sensitive=1,
stopwords=stopwords,
)
x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
y = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_casesensintive_lower",
)
# 4 model_monday_casesensintive_upper
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="UPPER",
is_case_sensitive=1,
stopwords=stopwords,
)
x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
y = np.array(["TUESDAY", "WEDNESDAY", "THURSDAY"]).astype(object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_casesensintive_upper",
)
# 5 monday_insensintive_upper_twodim
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="UPPER",
stopwords=stopwords,
)
input_shape = [1, 6]
output_shape = [1, 4]
x = (
np.array(
["Monday", "tuesday", "wednesday", "Monday", "tuesday", "wednesday"]
)
.astype(object)
.reshape(input_shape)
)
y = (
np.array(["TUESDAY", "WEDNESDAY", "TUESDAY", "WEDNESDAY"])
.astype(object)
.reshape(output_shape)
)
graph = make_graph(node, input_shape, output_shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_insensintive_upper_twodim",
)
# 6 monday_empty_output
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="UPPER",
is_case_sensitive=0,
stopwords=stopwords,
)
x = np.array(["monday", "monday"]).astype(object)
y = np.array([""]).astype(object)
graph = make_graph(node, [2], [1])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_empty_output",
)

View File

@ -0,0 +1,428 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import subprocess
import sys
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Sequence
import numpy as np
import onnx
from onnx.backend.test.case.test_case import TestCase
from onnx.backend.test.case.utils import import_recursive
from onnx.onnx_pb import (
AttributeProto,
FunctionProto,
GraphProto,
ModelProto,
NodeProto,
TensorProto,
TypeProto,
)
_NodeTestCases = []
_TargetOpType = None
_DiffOpTypes = None
_existing_names: dict[str, onnx.NodeProto] = {}
def _rename_edges_helper(
internal_node: NodeProto,
rename_helper: Callable[[str], str],
attribute_map: dict[str, AttributeProto],
prefix: str,
) -> NodeProto:
new_node = NodeProto()
new_node.CopyFrom(internal_node)
new_node.ClearField("input")
new_node.ClearField("output")
new_node.ClearField("attribute")
for internal_name in internal_node.input:
new_node.input.append(rename_helper(internal_name))
for internal_name in internal_node.output:
new_node.output.append(rename_helper(internal_name))
for attr in internal_node.attribute:
if attr.HasField("ref_attr_name"):
if attr.ref_attr_name in attribute_map:
new_attr = AttributeProto()
new_attr.CopyFrom(attribute_map[attr.ref_attr_name]) # type: ignore
new_attr.name = attr.name
new_node.attribute.extend([new_attr])
else:
new_attr = AttributeProto()
new_attr.CopyFrom(attr)
if attr.type == AttributeProto.GRAPH:
new_graph = new_attr.g
sg_rename = {}
for in_desc in new_graph.input:
sg_rename[in_desc.name] = in_desc.name = prefix + in_desc.name
for out_desc in new_graph.output:
sg_rename[out_desc.name] = out_desc.name = prefix + out_desc.name
for init_desc in new_graph.initializer:
sg_rename[init_desc.name] = init_desc.name = prefix + init_desc.name
for sparse_init_desc in new_graph.sparse_initializer:
sg_rename[sparse_init_desc.values.name] = (
sparse_init_desc.values.name
) = (prefix + sparse_init_desc.values.name)
for sparse_init_desc in new_graph.sparse_initializer:
sg_rename[sparse_init_desc.indices.name] = (
sparse_init_desc.indices.name
) = (prefix + sparse_init_desc.indices.name)
def subgraph_rename_helper(name: str) -> Any:
if name in sg_rename: # noqa: B023
return sg_rename[name] # noqa: B023
return rename_helper(name)
new_nodes = [
_rename_edges_helper(
node_desc, subgraph_rename_helper, attribute_map, prefix
)
for node_desc in new_graph.node
]
new_graph.ClearField("node")
new_graph.node.extend(new_nodes)
new_node.attribute.extend([new_attr])
return new_node
# FIXME(TMVector): Any reason we can't get rid of this and use the C++ helper directly?
def function_expand_helper(
node: NodeProto, function_proto: FunctionProto, op_prefix: str
) -> list[NodeProto]:
io_names_map = {}
attribute_map = {a.name: a for a in node.attribute}
for idx in range(len(function_proto.input)):
io_names_map[function_proto.input[idx]] = (
node.input[idx] if idx in range(len(node.input)) else ""
)
for idx in range(len(function_proto.output)):
# Even if the node has been created with optional outputs missing, we
# can't assume that the function body handles this correctly, such as in
# the case that output is also an intermediate value.
# So we only add a name mapping if the output is present. An internal
# name will be generated if the missing output is used, the same as any
# other internal tensor.
if idx in range(len(node.output)) and node.output[idx] != "":
io_names_map[function_proto.output[idx]] = node.output[idx]
def rename_helper(internal_name: str) -> Any:
if internal_name in io_names_map:
return io_names_map[internal_name]
elif internal_name == "":
return ""
return op_prefix + internal_name
new_node_list = [
_rename_edges_helper(internal_node, rename_helper, attribute_map, op_prefix)
for internal_node in function_proto.node
]
return new_node_list
def function_testcase_helper(
node: NodeProto, input_types: list[TypeProto], name: str
) -> tuple[list[tuple[list[NodeProto], Any]], int]:
test_op = node.op_type
op_prefix = test_op + "_" + name + "_expanded_function_"
schema = onnx.defs.get_schema(test_op, domain=node.domain)
# an op schema may have several functions, each for one opset version
# opset versions include the op's since_version and other opset versions
# if it is needed to define the op for a opset version other than the op's since_version.
function_protos = []
for opset_version in schema.function_opset_versions: # type: ignore
function_proto_str = schema.get_function_with_opset_version(opset_version) # type: ignore
function_proto = FunctionProto()
function_proto.ParseFromString(function_proto_str)
function_protos.append(function_proto)
for opset_version in schema.context_dependent_function_opset_versions: # type: ignore
function_proto_str = schema.get_context_dependent_function_with_opset_version( # type: ignore
opset_version,
node.SerializeToString(),
[t.SerializeToString() for t in input_types],
)
function_proto = FunctionProto()
function_proto.ParseFromString(function_proto_str)
function_protos.append(function_proto)
expanded_tests = []
for function_proto in function_protos:
for attr in schema.attributes:
if attr in [a.name for a in node.attribute]:
continue
if schema.attributes[attr].default_value:
node.attribute.extend([schema.attributes[attr].default_value])
# function_proto.attributes
node_list = function_expand_helper(node, function_proto, op_prefix)
expanded_tests.append((node_list, function_proto.opset_import))
return expanded_tests, schema.since_version
def _extract_value_info(
input: list[Any] | np.ndarray | None,
name: str,
type_proto: TypeProto | None = None,
) -> onnx.ValueInfoProto:
if type_proto is None:
if input is None:
raise NotImplementedError(
"_extract_value_info: both input and type_proto arguments cannot be None."
)
elif isinstance(input, list):
elem_type = onnx.helper.np_dtype_to_tensor_dtype(input[0].dtype)
shape = None
tensor_type_proto = onnx.helper.make_tensor_type_proto(elem_type, shape)
type_proto = onnx.helper.make_sequence_type_proto(tensor_type_proto)
elif isinstance(input, TensorProto):
elem_type = input.data_type
shape = tuple(input.dims)
type_proto = onnx.helper.make_tensor_type_proto(elem_type, shape)
else:
elem_type = onnx.helper.np_dtype_to_tensor_dtype(input.dtype)
shape = input.shape
type_proto = onnx.helper.make_tensor_type_proto(elem_type, shape)
return onnx.helper.make_value_info(name, type_proto)
def _make_test_model_gen_version(graph: GraphProto, **kwargs: Any) -> ModelProto:
(
latest_onnx_version,
latest_ml_version,
latest_training_version,
) = onnx.helper.VERSION_TABLE[-1][
2:5
] # type: ignore
if "opset_imports" in kwargs:
for opset in kwargs["opset_imports"]:
# If the test model uses an unreleased opset version (latest_version+1),
# directly use make_model to create a model with the latest ir version
if (
(
(opset.domain in {"", "ai.onnx"})
and opset.version == latest_onnx_version + 1
)
or (
opset.domain == "ai.onnx.ml"
and opset.version == latest_ml_version + 1
)
or (
(
opset.domain
in {"ai.onnx.training version", "ai.onnx.preview.training"}
)
and opset.version == latest_training_version + 1
)
):
return onnx.helper.make_model(graph, **kwargs)
# Otherwise, find and use the corresponding ir version according to given opset version
return onnx.helper.make_model_gen_version(graph, **kwargs)
# In the case of ops with optional inputs and outputs, node_op.input and node_op.output indicate
# which inputs/outputs are present and which are omitted. However, the parameter inputs
# and outputs of this function include values only for inputs/outputs that are present.
# E.g., for an op with 3 inputs, if the second parameter is optional and we wish to omit it,
# node_op.inputs would look like ["Param1", "", "Param3"], while inputs would look like
# [input-1-value, input-3-value]
# Instead of creating model with latest version, it now generates models for since_version by default.
# Thus it can make every model uses the same opset version after every opset change.
# Besides, user can specify "use_max_opset_version" to generate models for
# the latest opset vesion that supports before targeted opset version
def expect(
node_op: onnx.NodeProto,
inputs: Sequence[np.ndarray | TensorProto],
outputs: Sequence[np.ndarray | TensorProto],
name: str,
**kwargs: Any,
) -> None:
# skip if the node_op's op_type is not same as the given one
if _TargetOpType and node_op.op_type != _TargetOpType:
return
if _DiffOpTypes is not None and node_op.op_type.lower() not in _DiffOpTypes:
return
if name in _existing_names:
raise ValueError(
f"Name {name!r} is already using by one test case for node type {node_op.op_type!r}."
)
_existing_names[name] = node_op
# in case node_op is modified
node = deepcopy(node_op)
present_inputs = [x for x in node.input if (x != "")]
present_outputs = [x for x in node.output if (x != "")]
input_type_protos = [None] * len(inputs)
if "input_type_protos" in kwargs:
input_type_protos = kwargs["input_type_protos"]
del kwargs["input_type_protos"]
output_type_protos = [None] * len(outputs)
if "output_type_protos" in kwargs:
output_type_protos = kwargs["output_type_protos"]
del kwargs["output_type_protos"]
inputs_vi = [
_extract_value_info(arr, arr_name, input_type)
for arr, arr_name, input_type in zip(inputs, present_inputs, input_type_protos)
]
outputs_vi = [
_extract_value_info(arr, arr_name, output_type)
for arr, arr_name, output_type in zip(
outputs, present_outputs, output_type_protos
)
]
graph = onnx.helper.make_graph(
nodes=[node], name=name, inputs=inputs_vi, outputs=outputs_vi
)
kwargs["producer_name"] = "backend-test"
if "opset_imports" not in kwargs:
# To make sure the model will be produced with the same opset_version after opset changes
# By default, it uses since_version as opset_version for produced models
produce_opset_version = onnx.defs.get_schema(
node.op_type, domain=node.domain
).since_version
kwargs["opset_imports"] = [
onnx.helper.make_operatorsetid(node.domain, produce_opset_version)
]
model = _make_test_model_gen_version(graph, **kwargs)
_NodeTestCases.append(
TestCase(
name=name,
model_name=name,
url=None,
model_dir=None,
model=model,
data_sets=[(inputs, outputs)],
kind="node",
rtol=1e-3,
atol=1e-7,
)
)
# Create list of types for node.input, filling a default TypeProto for missing inputs:
# E.g. merge(["x", "", "y"], [x-value-info, y-value-info]) will return [x-type, default-type, y-type]
def merge(
node_inputs: list[str], present_value_info: list[onnx.ValueInfoProto]
) -> list[TypeProto]:
if node_inputs:
if node_inputs[0] != "":
return [
present_value_info[0].type,
*merge(node_inputs[1:], present_value_info[1:]),
]
else:
return [TypeProto(), *merge(node_inputs[1:], present_value_info)]
return []
merged_types = merge(list(node.input), inputs_vi)
(
expanded_tests,
since_version,
) = function_testcase_helper(node, merged_types, name)
for expanded_function_nodes, func_opset_import in expanded_tests:
kwargs["producer_name"] = "backend-test"
# TODO: if kwargs["opset_imports"] already exists, only generate test case for the opset version.
# replace opset versions with what are specified in function proto
if "opset_imports" not in kwargs:
kwargs["opset_imports"] = func_opset_import
else:
for opset_import in func_opset_import:
matches = [
opset
for opset in kwargs["opset_imports"]
if opset.domain == opset_import.domain
]
if matches:
matches[0].version = opset_import.version
else:
kwargs["opset_imports"].append(opset_import)
onnx_ai_opset_version = ""
if "opset_imports" in kwargs:
onnx_ai_opset_imports = [
oi for oi in kwargs["opset_imports"] if oi.domain in ("", "ai.onnx")
]
if len(onnx_ai_opset_imports) == 1:
onnx_ai_opset_version = onnx_ai_opset_imports[0].version
function_test_name = name + "_expanded"
if onnx_ai_opset_version and onnx_ai_opset_version != since_version:
function_test_name += f"_ver{onnx_ai_opset_version}"
graph = onnx.helper.make_graph(
nodes=expanded_function_nodes,
name=function_test_name,
inputs=inputs_vi,
outputs=outputs_vi,
)
model = _make_test_model_gen_version(graph, **kwargs)
_NodeTestCases.append(
TestCase(
name=function_test_name,
model_name=function_test_name,
url=None,
model_dir=None,
model=model,
data_sets=[(inputs, outputs)],
kind="node",
rtol=1e-3,
atol=1e-7,
)
)
def collect_testcases(op_type: str) -> list[TestCase]:
"""Collect node test cases"""
# only keep those tests related to this operator
global _TargetOpType # noqa: PLW0603
_TargetOpType = op_type
import_recursive(sys.modules[__name__])
return _NodeTestCases
def collect_diff_testcases() -> list[TestCase]:
"""Collect node test cases which are different from the main branch"""
global _DiffOpTypes # noqa: PLW0603
_DiffOpTypes = get_diff_op_types()
import_recursive(sys.modules[__name__])
return _NodeTestCases
def get_diff_op_types():
cwd_path = Path.cwd()
# git fetch first for git diff on GitHub Action
subprocess.run(
["git", "fetch", "origin", "main:main"],
cwd=cwd_path,
capture_output=True,
check=True,
)
# obtain list of added or modified files in this PR
obtain_diff = subprocess.Popen(
["git", "diff", "--name-only", "--diff-filter=AM", "origin/main", "HEAD"],
cwd=cwd_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdoutput, _ = obtain_diff.communicate()
diff_list = stdoutput.split()
changed_op_types = []
for file in diff_list:
file_name = file.decode("utf-8")
if file_name.startswith("onnx/backend/test/case/node/") and file_name.endswith(
".py"
):
changed_op_types.append(file_name.split("/")[-1].replace(".py", ""))
return changed_op_types

Some files were not shown because too many files have changed in this diff Show More