I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,6 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
# This file needs to be here to enable mypy type checking for this folder.
# It doesn't make this a python module for installation purposes as a __init__.pyi would.

View File

@ -0,0 +1,232 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import io
import os
import pathlib
import tempfile
import unittest
import google.protobuf.message
import google.protobuf.text_format
import parameterized
import onnx
from onnx import serialization
def _simple_model() -> onnx.ModelProto:
model = onnx.ModelProto()
model.ir_version = onnx.IR_VERSION
model.producer_name = "onnx-test"
model.graph.name = "test"
return model
def _simple_tensor() -> onnx.TensorProto:
tensor = onnx.helper.make_tensor(
name="test-tensor",
data_type=onnx.TensorProto.FLOAT,
dims=(2, 3, 4),
vals=[x + 0.5 for x in range(24)],
)
return tensor
@parameterized.parameterized_class(
[
{"format": "protobuf"},
{"format": "textproto"},
{"format": "json"},
{"format": "onnxtxt"},
]
)
class TestIO(unittest.TestCase):
format: str
def test_load_model_when_input_is_bytes(self) -> None:
proto = _simple_model()
proto_string = serialization.registry.get(self.format).serialize_proto(proto)
loaded_proto = onnx.load_model_from_string(proto_string, format=self.format)
self.assertEqual(proto, loaded_proto)
def test_save_and_load_model_when_input_has_read_function(self) -> None:
proto = _simple_model()
# When the proto is a bytes representation provided to `save_model`,
# it should always be a serialized binary protobuf representation. Aka. format="protobuf"
# The saved file format is specified by the `format` argument.
proto_string = serialization.registry.get("protobuf").serialize_proto(proto)
f = io.BytesIO()
onnx.save_model(proto_string, f, format=self.format)
loaded_proto = onnx.load_model(io.BytesIO(f.getvalue()), format=self.format)
self.assertEqual(proto, loaded_proto)
def test_save_and_load_model_when_input_is_file_name(self) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = os.path.join(temp_dir, "model.onnx")
onnx.save_model(proto, model_path, format=self.format)
loaded_proto = onnx.load_model(model_path, format=self.format)
self.assertEqual(proto, loaded_proto)
def test_save_and_load_model_when_input_is_pathlike(self) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = pathlib.Path(temp_dir, "model.onnx")
onnx.save_model(proto, model_path, format=self.format)
loaded_proto = onnx.load_model(model_path, format=self.format)
self.assertEqual(proto, loaded_proto)
@parameterized.parameterized_class(
[
{"format": "protobuf"},
{"format": "textproto"},
{"format": "json"},
# The onnxtxt format does not support saving/loading tensors yet
]
)
class TestIOTensor(unittest.TestCase):
"""Test loading and saving of TensorProto."""
format: str
def test_load_tensor_when_input_is_bytes(self) -> None:
proto = _simple_tensor()
proto_string = serialization.registry.get(self.format).serialize_proto(proto)
loaded_proto = onnx.load_tensor_from_string(proto_string, format=self.format)
self.assertEqual(proto, loaded_proto)
def test_save_and_load_tensor_when_input_has_read_function(self) -> None:
# Test if input has a read function
proto = _simple_tensor()
f = io.BytesIO()
onnx.save_tensor(proto, f, format=self.format)
loaded_proto = onnx.load_tensor(io.BytesIO(f.getvalue()), format=self.format)
self.assertEqual(proto, loaded_proto)
def test_save_and_load_tensor_when_input_is_file_name(self) -> None:
# Test if input is a file name
proto = _simple_tensor()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = os.path.join(temp_dir, "model.onnx")
onnx.save_tensor(proto, model_path, format=self.format)
loaded_proto = onnx.load_tensor(model_path, format=self.format)
self.assertEqual(proto, loaded_proto)
def test_save_and_load_tensor_when_input_is_pathlike(self) -> None:
# Test if input is a file name
proto = _simple_tensor()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = pathlib.Path(temp_dir, "model.onnx")
onnx.save_tensor(proto, model_path, format=self.format)
loaded_proto = onnx.load_tensor(model_path, format=self.format)
self.assertEqual(proto, loaded_proto)
class TestSaveAndLoadFileExtensions(unittest.TestCase):
def test_save_model_picks_correct_format_from_extension(self) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = os.path.join(temp_dir, "model.textproto")
# No format is specified, so the extension should be used to determine the format
onnx.save_model(proto, model_path)
loaded_proto = onnx.load_model(model_path, format="textproto")
self.assertEqual(proto, loaded_proto)
def test_load_model_picks_correct_format_from_extension(self) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = os.path.join(temp_dir, "model.textproto")
onnx.save_model(proto, model_path, format="textproto")
# No format is specified, so the extension should be used to determine the format
loaded_proto = onnx.load_model(model_path)
self.assertEqual(proto, loaded_proto)
def test_save_model_uses_format_when_it_is_specified(self) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = os.path.join(temp_dir, "model.textproto")
# `format` is specified. It should take precedence over the extension
onnx.save_model(proto, model_path, format="protobuf")
loaded_proto = onnx.load_model(model_path, format="protobuf")
self.assertEqual(proto, loaded_proto)
with self.assertRaises(google.protobuf.text_format.ParseError):
# Loading it as textproto (by file extension) should fail
onnx.load_model(model_path)
def test_load_model_uses_format_when_it_is_specified(self) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
model_path = os.path.join(temp_dir, "model.protobuf")
onnx.save_model(proto, model_path)
with self.assertRaises(google.protobuf.text_format.ParseError):
# `format` is specified. It should take precedence over the extension
# Loading it as textproto should fail
onnx.load_model(model_path, format="textproto")
loaded_proto = onnx.load_model(model_path, format="protobuf")
self.assertEqual(proto, loaded_proto)
def test_load_and_save_model_to_path_without_specifying_extension_succeeds(
self,
) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
# No extension is specified
model_path = os.path.join(temp_dir, "model")
onnx.save_model(proto, model_path, format="textproto")
with self.assertRaises(google.protobuf.message.DecodeError):
# `format` is not specified. load_model should assume protobuf
# and fail to load it
onnx.load_model(model_path)
loaded_proto = onnx.load_model(model_path, format="textproto")
self.assertEqual(proto, loaded_proto)
def test_load_and_save_model_without_specifying_extension_or_format_defaults_to_protobuf(
self,
) -> None:
proto = _simple_model()
with tempfile.TemporaryDirectory() as temp_dir:
# No extension is specified
model_path = os.path.join(temp_dir, "model")
onnx.save_model(proto, model_path)
with self.assertRaises(google.protobuf.text_format.ParseError):
# The model is saved as protobuf, so loading it as textproto should fail
onnx.load_model(model_path, format="textproto")
loaded_proto = onnx.load_model(model_path)
self.assertEqual(proto, loaded_proto)
loaded_proto_as_explicitly_protobuf = onnx.load_model(
model_path, format="protobuf"
)
self.assertEqual(proto, loaded_proto_as_explicitly_protobuf)
class TestBasicFunctions(unittest.TestCase):
def test_protos_exist(self) -> None:
# The proto classes should exist
_ = onnx.AttributeProto
_ = onnx.NodeProto
_ = onnx.GraphProto
_ = onnx.ModelProto
def test_version_exists(self) -> None:
model = onnx.ModelProto()
# When we create it, graph should not have a version string.
self.assertFalse(model.HasField("ir_version"))
# We should touch the version so it is annotated with the current
# ir version of the running ONNX
model.ir_version = onnx.IR_VERSION
model_string = model.SerializeToString()
model.ParseFromString(model_string)
self.assertTrue(model.HasField("ir_version"))
# Check if the version is correct.
self.assertEqual(model.ir_version, onnx.IR_VERSION)
if __name__ == "__main__":
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,959 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
from typing import Callable, Sequence
import numpy as np
from onnx import (
FunctionProto,
GraphProto,
ModelProto,
NodeProto,
SparseTensorProto,
TensorProto,
ValueInfoProto,
checker,
compose,
helper,
parser,
version_converter,
)
def _load_model(m_def: str) -> ModelProto:
"""Parses a model from a string representation, including checking the model for correctness"""
m = parser.parse_model(m_def)
checker.check_model(m)
return m
def _prefixed(prefix: str, s: str) -> str:
"""Prefixes a string (if not empty)"""
return prefix + s if len(s) > 0 else s
def _get_shape(value_info: ValueInfoProto) -> list[int]:
"""Returns a list of integers representing the shape of the provided ValueInfoProto"""
return [
value_info.type.tensor_type.shape.dim[d].dim_value
for d in range(len(value_info.type.tensor_type.shape.dim))
]
def _make_sparse_tensor(name: str) -> SparseTensorProto:
dense_shape = [3, 3]
linear_indices = [2, 3, 5]
sparse_values = [1.7, 0.4, 0.9]
values_tensor = helper.make_tensor(
name=name + "_values",
data_type=TensorProto.FLOAT,
dims=[len(sparse_values)],
vals=np.array(sparse_values).astype(np.float32),
raw=False,
)
indices_tensor = helper.make_tensor(
name=name + "_idx",
data_type=TensorProto.INT64,
dims=[len(linear_indices)],
vals=np.array(linear_indices).astype(np.int64),
raw=False,
)
return helper.make_sparse_tensor(values_tensor, indices_tensor, dense_shape)
M1_DEF = """
<
ir_version: 7,
opset_import: [ "": 10, "com.microsoft": 1]
>
agraph (float[N, M] A0, float[N, M] A1, float[N, M] _A) => (float[N, M] B00, float[N, M] B10, float[N, M] B20)
{
B00 = Add(A0, A1)
B10 = Sub(A0, A1)
B20 = Mul(A0, A1)
}
"""
M2_DEF = """
<
ir_version: 7,
opset_import: [ "": 10, "com.microsoft": 1]
>
agraph (float[N, M] B01, float[N, M] B11, float[N, M] B21) => (float[N, M] D0)
{
C0 = Add(B01, B11)
C1 = Sub(B11, B21)
D0 = Mul(C0, C1)
}
"""
class TestComposeFunctions(unittest.TestCase):
def _test_merge_models(
self,
m1def: str,
m2def: str,
io_map: list[tuple[str, str]],
check_expectations: Callable[[GraphProto, GraphProto, GraphProto], None],
inputs: list[str] | None = None,
outputs: list[str] | None = None,
prefix1: str | None = None,
prefix2: str | None = None,
) -> None:
m1, m2 = _load_model(m1def), _load_model(m2def)
g3 = compose.merge_graphs(
m1.graph,
m2.graph,
io_map=io_map,
inputs=inputs,
outputs=outputs,
prefix1=prefix1,
prefix2=prefix2,
)
checker.check_graph(g3)
check_expectations(m1.graph, m2.graph, g3)
m3 = compose.merge_models(
m1,
m2,
io_map=io_map,
inputs=inputs,
outputs=outputs,
prefix1=prefix1,
prefix2=prefix2,
)
checker.check_model(m3)
check_expectations(m1.graph, m2.graph, m3.graph)
def test_case_connect_all_no_name_collision(self) -> None:
"""Tests a simple scenario where two models without overlapping names are merged by
connecting all the outputs in the first models to all the inputs in the second model
"""
def check_expectations(g1: GraphProto, g2: GraphProto, g3: GraphProto) -> None:
self.assertEqual(g3.input, g1.input)
self.assertEqual(g3.output, g2.output)
self.assertEqual(
["Add", "Sub", "Mul", "Add", "Sub", "Mul"],
[item.op_type for item in g3.node],
)
io_map = [("B00", "B01"), ("B10", "B11"), ("B20", "B21")]
self._test_merge_models(M1_DEF, M2_DEF, io_map, check_expectations)
def test_case_connect_same_output_twice(self) -> None:
"""Tests a scenario where we merge two models by connecting a single output in the first model
to all the inputs in the second
"""
def check_expectations(g1: GraphProto, g2: GraphProto, g3: GraphProto) -> None:
del g2 # Unused
self.assertEqual(g3.input, g1.input)
self.assertEqual(["B10", "B20", "D0"], [elem.name for elem in g3.output])
self.assertEqual(
["Add", "Sub", "Mul", "Add", "Sub", "Mul"],
[item.op_type for item in g3.node],
)
io_map = [("B00", "B01"), ("B00", "B11"), ("B00", "B21")]
self._test_merge_models(M1_DEF, M2_DEF, io_map, check_expectations)
def test_case_connect_same_output_drop_outputs(self) -> None:
"""Tests a scenario where we merge two models by connecting a single output in the first model
to all the inputs in the second, while dropping the rest of the outputs in the first model
"""
def check_expectations(g1: GraphProto, g2: GraphProto, g3: GraphProto) -> None:
del g2 # Unused
self.assertEqual(g3.input, g1.input)
self.assertEqual(["D0"], [elem.name for elem in g3.output])
self.assertEqual(
["Add", "Add", "Sub", "Mul"], [item.op_type for item in g3.node]
)
io_map = [("B00", "B01"), ("B00", "B11"), ("B00", "B21")]
outputs = ["D0"]
self._test_merge_models(
M1_DEF, M2_DEF, io_map, check_expectations, outputs=outputs
)
def test_case_connect_same_input_output_name(self) -> None:
"""Tests a scenario where we merge two models, where the inputs/outputs connected
are named exactly the same
"""
m1_def = """
<
ir_version: 7,
opset_import: [ "": 10]
>
agraph (float[N, M] A) => (float[N, M] B)
{
B = Add(A, A)
}
"""
m2_def = """
<
ir_version: 7,
opset_import: [ "": 10]
>
agraph (float[N, M] B) => (float[N, M] C)
{
C = Add(B, B)
}
"""
io_map = [("B", "B")]
def check_expectations(g1: GraphProto, g2: GraphProto, g3: GraphProto) -> None:
del g1, g2 # Unused
self.assertEqual(["A"], [elem.name for elem in g3.input])
self.assertEqual(["C"], [elem.name for elem in g3.output])
self._test_merge_models(m1_def, m2_def, io_map, check_expectations)
def test_case_drop_inputs_outputs(self) -> None:
"""Tests a scenario where we merge two models, not including some of the inputs/outputs"""
m1_def = """
<
ir_version: 7,
opset_import: [ "": 10]
>
agraph (float[N] A0, float[N] B0) => (float[N] A1, float[N] B1)
{
A1 = Add(A0, A0)
B1 = Sub(B0, B0)
}
"""
m2_def = """
<
ir_version: 7,
opset_import: [ "": 10]
>
agraph (float[N] A2, float[N] B2) => (float[N] A3, float[N] B3)
{
A3 = Add(A2, A2)
B3 = Sub(B2, B2)
}
"""
io_map = [("A1", "B2")]
def check_expectations(g1: GraphProto, g2: GraphProto, g3: GraphProto) -> None:
del g1, g2 # Unused
self.assertEqual(["A0"], [elem.name for elem in g3.input])
self.assertEqual(["B3"], [elem.name for elem in g3.output])
self.assertEqual(["Add", "Sub"], [elem.op_type for elem in g3.node])
inputs = ["A0"]
outputs = ["B3"]
self._test_merge_models(
m1_def, m2_def, io_map, check_expectations, inputs=inputs, outputs=outputs
)
def test_case_name_collision_prefix(self) -> None:
"""Tests a scenario where we merge two models that have name collisions, but they
are avoided by prefixing the models model.
"""
m1_def = """
<
ir_version: 7,
opset_import: [ "": 10]
>
agraph (float[N] A, float[N] B) => (float[N] C)
{
C = Add(A, B)
}
"""
io_map = [("C", "A")]
def check_expectations(g1: GraphProto, g2: GraphProto, g3: GraphProto) -> None:
del g1, g2 # Unused
self.assertEqual(["m1/A", "m1/B", "m2/B"], [elem.name for elem in g3.input])
self.assertEqual(["m2/C"], [elem.name for elem in g3.output])
self.assertEqual(["Add", "Add"], [elem.op_type for elem in g3.node])
self._test_merge_models(
m1_def, m1_def, io_map, check_expectations, prefix1="m1/", prefix2="m2/"
)
def test_case_connect_partially_no_name_collision(self) -> None:
"""Tests a scenario where two models without overlapping names are merged by
connecting some outputs from the first model to some inputs in the second.
The remaining inputs/outputs should be present in the combined model
"""
def check_expectations(g1: GraphProto, g2: GraphProto, g4: GraphProto) -> None:
del g1, g2 # Unused
# B20 <-> B21 not connected. They should still be present
# in the inputs and outputs of the combined graph
self.assertEqual(
["A0", "A1", "_A", "B21"], [elem.name for elem in g4.input]
)
self.assertEqual(["B20", "D0"], [elem.name for elem in g4.output])
io_map = [("B00", "B01"), ("B10", "B11")]
self._test_merge_models(M1_DEF, M2_DEF, io_map, check_expectations)
def test_merge_models_with_metadata_props(self) -> None:
m1 = _load_model(M1_DEF)
helper.set_model_props(m1, {"p1": "v1", "p2": "v2"})
m2 = _load_model(M2_DEF)
helper.set_model_props(m2, {"p3": "v3", "p4": "v4"})
io_map = [("B00", "B01")]
m3 = compose.merge_models(m1, m2, io_map=io_map)
assert len(m3.metadata_props) == 4
# Overlap, but same value
helper.set_model_props(m2, {"p1": "v1", "p4": "v4"})
m3 = compose.merge_models(m1, m2, io_map=io_map)
assert len(m3.metadata_props) == 3
# Same keys but not same value. Error
helper.set_model_props(m2, {"p1": "v5", "p4": "v4"})
self.assertRaises(ValueError, compose.merge_models, m1, m2, io_map=io_map)
def test_error_wrong_input_output_name(self) -> None:
"""Tests that providing a non existing output/input name in the io_map argument produces an error."""
m1, m2 = _load_model(M1_DEF), _load_model(M2_DEF)
self.assertRaises(
ValueError,
compose.merge_models,
m1,
m2,
io_map=[("wrong_outname", "B01"), ("B10", "B11"), ("B20", "B21")],
)
# Wrong output name
self.assertRaises(
ValueError,
compose.merge_models,
m1,
m2,
io_map=[("B00", "wrong_input"), ("B10", "B11"), ("B20", "B21")],
)
def test_error_ir_version_mismatch(self) -> None:
m1 = _load_model(
"""
<
ir_version: 7,
opset_import: [ "": 13]
>
agraph (float[N, M] X0) => (float[N, M] Y0)
{
Y0 = Add(X0, X0)
}
"""
)
m2 = _load_model(
"""
<
ir_version: 6,
opset_import: [ "": 13]
>
agraph (float[N, M] X1) => (float[N, M] Y1)
{
Y1 = Add(X1, X1)
}
"""
)
# Wrong IR version name
self.assertRaises(
ValueError, compose.merge_models, m1, m2, io_map=[("Y0", "X1")]
)
def test_error_opset_import_mismatch(self) -> None:
"""Tests that providing models with different operator set imported produces an error."""
m1, m2 = _load_model(M1_DEF), _load_model(M2_DEF)
m1 = helper.make_model(
m1.graph, producer_name="test", opset_imports=[helper.make_opsetid("", 10)]
)
m2 = helper.make_model(
m2.graph, producer_name="test", opset_imports=[helper.make_opsetid("", 15)]
)
io_map = [("B00", "B01"), ("B10", "B11"), ("B20", "B21")]
self.assertRaises(ValueError, compose.merge_models, m1, m2, io_map)
# Converting to the same Operator set version, should work
m1 = version_converter.convert_version(m1, 15)
m3 = compose.merge_models(m1, m2, io_map=io_map)
checker.check_model(m3)
# FIXME: This function should be removed, as tests should not contain a copy of the tested logic.
def _test_add_prefix(
self,
rename_nodes: bool = False,
rename_edges: bool = False,
rename_inputs: bool = False,
rename_outputs: bool = False,
rename_initializers: bool = False,
rename_value_infos: bool = False,
inplace: bool = False,
) -> None:
m1 = _load_model(M1_DEF)
prefix = "pre/"
if inplace:
m2 = ModelProto()
m2.CopyFrom(m1)
compose.add_prefix(
m2,
prefix,
rename_nodes=rename_nodes,
rename_edges=rename_edges,
rename_inputs=rename_inputs,
rename_outputs=rename_outputs,
rename_initializers=rename_initializers,
rename_value_infos=rename_value_infos,
inplace=True,
)
else:
m2 = compose.add_prefix(
m1,
prefix,
rename_nodes=rename_nodes,
rename_edges=rename_edges,
rename_inputs=rename_inputs,
rename_outputs=rename_outputs,
rename_initializers=rename_initializers,
rename_value_infos=rename_value_infos,
)
g_in = m1.graph
g_out = m2.graph
if (
rename_edges
or rename_inputs
or rename_outputs
or rename_initializers
or rename_value_infos
):
name_mapping = {}
# Rename inputs/outputs/edges. Propagate name changes from and to edges
if rename_edges:
for n in g_in.node:
for e in n.input:
name_mapping[e] = _prefixed(prefix, e)
for e in n.output:
name_mapping[e] = _prefixed(prefix, e)
if rename_inputs:
for elem in g_in.input:
name_mapping[elem.name] = _prefixed(prefix, elem.name)
if rename_outputs:
for elem in g_in.output:
name_mapping[elem.name] = _prefixed(prefix, elem.name)
if rename_initializers:
for init in g_in.initializer:
name_mapping[init.name] = _prefixed(prefix, init.name)
for sparse_init in g_in.sparse_initializer:
name_mapping[sparse_init.values.name] = _prefixed(
prefix, sparse_init.values.name
)
name_mapping[sparse_init.indices.name] = _prefixed(
prefix, sparse_init.indices.name
)
if rename_value_infos:
for value_info in g_in.output:
name_mapping[value_info.name] = _prefixed(prefix, value_info.name)
for n1, n0 in zip(g_out.node, g_in.node):
for e1, e0 in zip(n1.input, n0.input):
self.assertEqual(name_mapping.get(e0, e0), e1)
for e1, e0 in zip(n1.output, n0.output):
self.assertEqual(name_mapping.get(e0, e0), e1)
for i1, i0 in zip(g_out.input, g_in.input):
self.assertEqual(name_mapping.get(i0.name, i0.name), i1.name)
for o1, o0 in zip(g_out.output, g_in.output):
self.assertEqual(name_mapping.get(o0.name, o0.name), o1.name)
for init1, init0 in zip(g_out.initializer, g_in.initializer):
self.assertEqual(name_mapping.get(init0.name, init0.name), init1.name)
for sparse_init1, sparse_init0 in zip(
g_out.sparse_initializer, g_in.sparse_initializer
):
self.assertEqual(
name_mapping.get(
sparse_init0.values.name, sparse_init0.values.name
),
sparse_init1.values.name,
)
self.assertEqual(
name_mapping.get(
sparse_init0.indices.name, sparse_init0.indices.name
),
sparse_init1.indices.name,
)
for vi1, vi0 in zip(g_out.value_info, g_in.value_info):
self.assertEqual(name_mapping.get(vi0.name, vi0.name), vi1.name)
if rename_nodes:
for n1, n0 in zip(g_out.node, g_in.node):
self.assertEqual(_prefixed(prefix, n0.name), n1.name)
def test_add_prefix_nodes(self) -> None:
"""Tests renaming nodes only"""
self._test_add_prefix(rename_nodes=True)
def test_add_prefix_edges(self) -> None:
"""Tests prefixing nodes edges. This will also rename inputs/outputs, since the names are shared"""
self._test_add_prefix(rename_edges=True)
def test_add_prefix_inputs(self) -> None:
"""Tests prefixing graph inputs only. Relevant node edges should be renamed as well"""
self._test_add_prefix(rename_inputs=True)
def test_add_prefix_outputs(self) -> None:
"""Tests prefixing graph outputs only. Relevant node edges should be renamed as well"""
self._test_add_prefix(rename_outputs=True)
def test_add_prefix_attribute_subgraph(self) -> None:
"""Tests prefixing attribute's subgraph. Relevant subgraph should be renamed as well"""
C = helper.make_tensor_value_info("C", TensorProto.BOOL, [1])
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, 1])
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None, 1])
Z = helper.make_tensor_value_info("Z", TensorProto.FLOAT, [None, 1])
Out = helper.make_tensor_value_info("Out", TensorProto.FLOAT, [None, 1])
XY = helper.make_node("Mul", inputs=["X", "Y"], outputs=["XY"])
add = helper.make_node("Add", inputs=["XY", "Z"], outputs=["Out"])
sub = helper.make_node("Sub", inputs=["XY", "Z"], outputs=["Out"])
cond = helper.make_node(
"If",
inputs=["C"],
outputs=["Out"],
then_branch=helper.make_graph(
nodes=[add], name="then", inputs=[], outputs=[Out]
),
else_branch=helper.make_graph(
nodes=[sub], name="else", inputs=[], outputs=[Out]
),
)
graph = helper.make_graph(
nodes=[XY, cond], name="graph", inputs=[C, X, Y, Z], outputs=[Out]
)
prefix = "prefix."
prefixed_graph = compose.add_prefix_graph(graph, prefix)
checker.check_graph(prefixed_graph)
for n1, n0 in zip(prefixed_graph.node, graph.node):
self.assertEqual(_prefixed(prefix, n0.name), n1.name)
for attribute1, attribute0 in zip(n1.attribute, n0.attribute):
if attribute1.g:
for subgraph_n1, subgraph_n0 in zip(
attribute1.g.node, attribute0.g.node
):
for input_n1, input_n0 in zip(
subgraph_n1.input, subgraph_n0.input
):
self.assertEqual(_prefixed(prefix, input_n0), input_n1)
for output_n1, output_n0 in zip(
subgraph_n1.output, subgraph_n0.output
):
self.assertEqual(_prefixed(prefix, output_n0), output_n1)
def test_add_prefix_all(self) -> None:
"""Tests prefixing all names in the graph"""
self._test_add_prefix(True, True, True, True, True, True)
def test_add_prefix_inplace(self) -> None:
"""Tests prefixing inplace"""
self._test_add_prefix(inplace=True)
def test_expand_out_dim(self) -> None:
"""Tests expanding output dimensions. The resulting graph should have the same output names,
but with one more dimension at the specified index.
"""
m1 = _load_model(M1_DEF)
def _check_model(m1: ModelProto, m2: ModelProto, dim_idx: int) -> None:
for out_g2, out_g1 in zip(m2.graph.output, m1.graph.output):
self.assertEqual(out_g2.name, out_g1.name)
self.assertEqual(
out_g2.type.tensor_type.elem_type, out_g1.type.tensor_type.elem_type
)
expected_out_shape = _get_shape(out_g1)
expected_out_shape.insert(dim_idx, 1)
self.assertEqual(_get_shape(out_g2), expected_out_shape)
for dim_idx in [0, 2, -1, -3]:
m2 = compose.expand_out_dim(m1, dim_idx)
_check_model(m1, m2, dim_idx)
# Test inplace
m2 = ModelProto()
m2.CopyFrom(m1)
dim_idx = 0
compose.expand_out_dim(m2, dim_idx, inplace=True)
_check_model(m1, m2, dim_idx)
def _test_overlapping_names(
self,
inputs0: Sequence[str] = ("i0", "i1"),
inputs1: Sequence[str] = ("i2", "i3"),
outputs0: Sequence[str] = ("o0", "o1"),
outputs1: Sequence[str] = ("o2", "o3"),
value_info0: Sequence[str] = ("v0", "v1"),
value_info1: Sequence[str] = ("v2", "v3"),
initializer0: Sequence[str] = ("init0", "init1"),
initializer1: Sequence[str] = ("init2", "init3"),
sparse_initializer0: Sequence[str] = ("sparse_init0", "sparse_init1"),
sparse_initializer1: Sequence[str] = ("sparse_init2", "sparse_init3"),
) -> None:
n0 = [
helper.make_node("Identity", inputs=[inputs0[i]], outputs=[outputs0[i]])
for i in range(len(inputs0))
]
i0 = [
helper.make_tensor_value_info(inputs0[i], TensorProto.FLOAT, [])
for i in range(len(inputs0))
]
o0 = [
helper.make_tensor_value_info(outputs0[i], TensorProto.FLOAT, [])
for i in range(len(outputs0))
]
vi0 = [
helper.make_tensor_value_info(value_info0[i], TensorProto.FLOAT, [])
for i in range(len(value_info0))
]
init0 = [
helper.make_tensor(
name=initializer0[i], data_type=TensorProto.INT64, dims=(), vals=[1]
)
for i in range(len(initializer0))
]
sparse_init0 = [
_make_sparse_tensor(sparse_initializer0[i])
for i in range(len(sparse_initializer0))
]
n1 = [
helper.make_node("Identity", inputs=[inputs1[i]], outputs=[outputs1[i]])
for i in range(len(inputs1))
]
i1 = [
helper.make_tensor_value_info(inputs1[i], TensorProto.FLOAT, [])
for i in range(len(inputs1))
]
o1 = [
helper.make_tensor_value_info(outputs1[i], TensorProto.FLOAT, [])
for i in range(len(outputs1))
]
vi1 = [
helper.make_tensor_value_info(value_info1[i], TensorProto.FLOAT, [])
for i in range(len(value_info1))
]
init1 = [
helper.make_tensor(
name=initializer1[i], data_type=TensorProto.INT64, dims=(), vals=[1]
)
for i in range(len(initializer1))
]
sparse_init1 = [
_make_sparse_tensor(sparse_initializer1[i])
for i in range(len(sparse_initializer1))
]
ops = [helper.make_opsetid("", 10)]
m0 = helper.make_model(
helper.make_graph(
nodes=n0,
name="g0",
inputs=i0,
outputs=o0,
value_info=vi0,
initializer=init0,
sparse_initializer=sparse_init0,
),
producer_name="test",
opset_imports=ops,
)
m1 = helper.make_model(
helper.make_graph(
nodes=n1,
name="g1",
inputs=i1,
outputs=o1,
value_info=vi1,
initializer=init1,
sparse_initializer=sparse_init1,
),
producer_name="test",
opset_imports=ops,
)
overlap = compose.check_overlapping_names(m0.graph, m1.graph)
i = 0
overlapping_inputs = list(set(inputs0) & set(inputs1))
overlapping_outputs = list(set(outputs0) & set(outputs1))
overlapping_edges = list(set(overlapping_inputs + overlapping_outputs))
if overlapping_edges:
self.assertEqual(overlap[i], ("edge", overlapping_edges))
i += 1
overlapping_vis = list(set(value_info0) & set(value_info1))
if overlapping_vis:
self.assertEqual(overlap[i], ("value_info", overlapping_vis))
i += 1
overlapping_init = list(set(initializer0) & set(initializer1))
if overlapping_init:
self.assertEqual(overlap[i], ("initializer", overlapping_init))
i += 1
overlapping_sparse_init = list(
set(sparse_initializer0) & set(sparse_initializer1)
)
if overlapping_sparse_init:
expected_overlap = []
for overlapping_name in overlapping_sparse_init:
expected_overlap.append(overlapping_name + "_values")
expected_overlap.append(overlapping_name + "_idx")
self.assertEqual(overlap[i], ("sparse_initializer", expected_overlap))
i += 1
m0_new = compose.add_prefix(m0, prefix="g0/")
overlap = compose.check_overlapping_names(m0_new.graph, m1.graph)
self.assertEqual(0, len(overlap))
def test_overlapping_input_names(self) -> None:
"""Tests error checking when the name of the inputs overlaps"""
self._test_overlapping_names(inputs0=["i0", "i1"], inputs1=["i1", "i2"])
def test_overlapping_output_names(self) -> None:
"""Tests error checking when the name of the output overlaps"""
self._test_overlapping_names(outputs0=["o0", "o1"], outputs1=["o1", "o2"])
def test_overlapping_value_info_names(self) -> None:
"""Tests error checking when the name of value_info entries overlaps"""
self._test_overlapping_names(
value_info0=["vi0", "vi1"], value_info1=["vi1", "vi2"]
)
def test_overlapping_initializer_names(self) -> None:
"""Tests error checking when the name of initializer entries overlaps"""
self._test_overlapping_names(
initializer0=["init0", "init1"], initializer1=["init1", "init2"]
)
def test_overlapping_sparse_initializer_names(self) -> None:
"""Tests error checking when the name of sparse_initializer entries overlaps"""
self._test_overlapping_names(
sparse_initializer0=["sparse_init0", "sparse_init1"],
sparse_initializer1=["sparse_init1", "sparse_init2"],
)
def test_overlapping_function_names(self) -> None:
"""Tests error checking when the name of local function entries overlaps"""
ops = [helper.make_opsetid("", 10), helper.make_opsetid("local", 10)]
def _make_function(
domain: str,
fname: str,
inputs: list[str],
outputs: list[str],
nodes: list[NodeProto],
) -> FunctionProto:
f = FunctionProto()
f.domain = domain
f.name = fname
f.input.extend(inputs)
f.output.extend(outputs)
f.node.extend(nodes)
f.opset_import.extend(ops)
return f
ops = [helper.make_opsetid("", 10), helper.make_opsetid("local", 10)]
g = GraphProto()
g.input.extend(
[
helper.make_tensor_value_info("x0", TensorProto.FLOAT, []),
helper.make_tensor_value_info("x1", TensorProto.FLOAT, []),
]
)
g.output.extend(
[
helper.make_tensor_value_info("y", TensorProto.FLOAT, []),
]
)
g.node.extend(
[helper.make_node("f1", domain="local", inputs=["x0", "x1"], outputs=["y"])]
)
g1 = GraphProto()
g1.CopyFrom(g)
g1.name = "g1"
m1 = helper.make_model(g1, producer_name="test", opset_imports=ops)
m1.functions.extend(
[
_make_function(
"local",
"f1",
["x0", "x1"],
["y"],
[helper.make_node("Add", inputs=["x0", "x1"], outputs=["y"])],
)
]
)
checker.check_model(m1)
g2 = GraphProto()
g2.CopyFrom(g)
g2.name = "g2"
m2 = helper.make_model(g2, producer_name="test", opset_imports=ops)
m2.functions.extend(
[
_make_function(
"local",
"f1",
["x0", "x1"],
["y"],
[helper.make_node("Mul", inputs=["x0", "x1"], outputs=["y"])],
)
]
)
checker.check_model(m2)
m = compose.merge_models(
m1, m2, io_map=[("y", "x0"), ("y", "x1")], prefix1="m1/", prefix2="m2/"
)
checker.check_model(m)
nodes = [n.op_type for n in m.graph.node]
self.assertEqual(["m1/f1", "m2/f1"], nodes)
functions = [f.name for f in m.functions]
self.assertEqual(["m1/f1", "m2/f1"], functions)
g3 = GraphProto()
g3.CopyFrom(g)
g3.name = "g3"
g3.node[0].op_type = "f2"
m3 = helper.make_model(g3, producer_name="test", opset_imports=ops)
m3.functions.extend(
[
_make_function(
"local",
"f1",
["x0", "x1"],
["y"],
[
helper.make_node("Add", inputs=["x0", "x1"], outputs=["y0"]),
helper.make_node("Mul", inputs=["x0", "x1"], outputs=["y1"]),
helper.make_node("Add", inputs=["y0", "y1"], outputs=["y"]),
],
),
_make_function(
"local",
"f2",
["x0", "x1"],
["y"],
[
helper.make_node(
"f1", domain="local", inputs=["x0", "x1"], outputs=["y0"]
),
helper.make_node("Mul", inputs=["x0", "x1"], outputs=["y1"]),
helper.make_node("Add", inputs=["y0", "y1"], outputs=["y"]),
],
),
]
)
checker.check_model(m3)
m = compose.merge_models(
m1, m3, io_map=[("y", "x0"), ("y", "x1")], prefix1="m1/", prefix2="m3/"
)
checker.check_model(m)
nodes = [n.op_type for n in m.graph.node]
self.assertEqual(["m1/f1", "m3/f2"], nodes)
functions = [f.name for f in m.functions]
self.assertEqual(["m1/f1", "m3/f1", "m3/f2"], functions)
self.assertEqual(["Add"], [n.op_type for n in m.functions[0].node])
self.assertEqual(
["Add", "Mul", "Add"], [n.op_type for n in m.functions[1].node]
)
self.assertEqual(
["m3/f1", "Mul", "Add"], [n.op_type for n in m.functions[2].node]
)
def test_merge_drop_unnecessary_initializers_and_value_info(self) -> None:
"""Tests automatic removal of initializers when merging graphs"""
ops = [helper.make_opsetid("", 10)]
g = GraphProto()
g.input.extend([helper.make_tensor_value_info("x", TensorProto.FLOAT, [])])
g.output.extend([helper.make_tensor_value_info("y", TensorProto.FLOAT, [])])
g.node.extend([helper.make_node("Identity", inputs=["x"], outputs=["y"])])
g1 = GraphProto()
g1.CopyFrom(g)
g1.name = "g1"
m1 = helper.make_model(g1, producer_name="test", opset_imports=ops)
checker.check_model(m1)
g2 = GraphProto()
g2.CopyFrom(g)
g2.name = "g2"
g2.initializer.extend(
[
helper.make_tensor(
name="x", data_type=TensorProto.FLOAT, dims=(), vals=[0]
)
]
)
m2 = helper.make_model(g2, producer_name="test", opset_imports=ops)
checker.check_model(m2)
g3 = GraphProto()
g3.CopyFrom(g)
g3.name = "g3"
g3.sparse_initializer.extend([_make_sparse_tensor("x")])
m3 = helper.make_model(g3, producer_name="test", opset_imports=ops)
checker.check_model(m3)
g4 = GraphProto()
g4.CopyFrom(g)
g4.name = "g3"
g4.value_info.extend(
[helper.make_tensor_value_info("x", TensorProto.FLOAT, [])]
)
m4 = helper.make_model(g4, producer_name="test", opset_imports=ops)
checker.check_model(m4)
# Initializer 'x' from m1 is removed, because there is no longer an input with that name
out_m1 = compose.merge_models(m1, m2, prefix1="m1/", io_map=[("y", "x")])
self.assertEqual(0, len(out_m1.graph.initializer))
# Sparse initializer 'x' from m1 is removed, because there is no longer an input with that name
out_m2 = compose.merge_models(m1, m3, prefix1="m1/", io_map=[("y", "x")])
self.assertEqual(0, len(out_m2.graph.initializer))
# Value info 'x' from m1 is removed, because there is no longer an input with that name
out_m3 = compose.merge_models(m1, m4, prefix1="m1/", io_map=[("y", "x")])
self.assertEqual(0, len(out_m3.graph.value_info))
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,60 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <list>
#include <utility>
#include "gtest/gtest.h"
#include "onnx/common/path.h"
#ifdef _WIN32
// Only test clean_relative_path and normalize_separator on non-Windows
// because Windows has its own implementation for them from std::filesystem::path.
#else
using namespace ONNX_NAMESPACE;
namespace ONNX_NAMESPACE {
namespace Test {
TEST(PathTest, CleanRelativePathTest) {
// Already normal.
EXPECT_EQ(clean_relative_path("abc"), "abc");
EXPECT_EQ(clean_relative_path("abc/def"), "abc/def");
EXPECT_EQ(clean_relative_path("a/b/c"), "a/b/c");
EXPECT_EQ(clean_relative_path("."), ".");
EXPECT_EQ(clean_relative_path(".."), "..");
EXPECT_EQ(clean_relative_path("../.."), "../..");
EXPECT_EQ(clean_relative_path("../../abc"), "../../abc");
// Remove trailing slash
EXPECT_EQ(clean_relative_path("abc/"), "abc");
EXPECT_EQ(clean_relative_path("abc/def/"), "abc/def");
EXPECT_EQ(clean_relative_path("a/b/c/"), "a/b/c");
EXPECT_EQ(clean_relative_path("./"), ".");
EXPECT_EQ(clean_relative_path("../"), "..");
EXPECT_EQ(clean_relative_path("../../"), "../..");
// Remove doubled slash
EXPECT_EQ(clean_relative_path("abc//def//ghi"), "abc/def/ghi");
EXPECT_EQ(clean_relative_path("abc///"), "abc");
EXPECT_EQ(clean_relative_path("abc//"), "abc");
// Remove . elements
EXPECT_EQ(clean_relative_path("abc/./def"), "abc/def");
EXPECT_EQ(clean_relative_path("./abc/def"), "abc/def");
EXPECT_EQ(clean_relative_path("abc/."), "abc");
// Remove .. elements
EXPECT_EQ(clean_relative_path("abc/def/ghi/../jkl"), "abc/def/jkl");
EXPECT_EQ(clean_relative_path("abc/def/../ghi/../jkl"), "abc/jkl");
EXPECT_EQ(clean_relative_path("abc/def/.."), "abc");
EXPECT_EQ(clean_relative_path("abc/def/../.."), ".");
EXPECT_EQ(clean_relative_path("abc/def/../../.."), "..");
EXPECT_EQ(clean_relative_path("abc/def/../../../ghi/jkl/../../../mno"), "../../mno");
EXPECT_EQ(clean_relative_path("../abc"), "../abc");
// Combinations
EXPECT_EQ(clean_relative_path("abc/./../def"), "def");
EXPECT_EQ(clean_relative_path("abc//./../def"), "def");
EXPECT_EQ(clean_relative_path("abc/../../././../def"), "../../def");
}
} // namespace Test
} // namespace ONNX_NAMESPACE
#endif

View File

@ -0,0 +1,401 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/checker.h"
#include "onnx/defs/parser.h"
#include "onnx/defs/schema.h"
#include "onnx/defs/shape_inference.h"
#include "onnx/onnx_pb.h"
#include "onnx/shape_inference/implementation.h"
using namespace ONNX_NAMESPACE::shape_inference;
namespace ONNX_NAMESPACE {
namespace Test {
inline bool CompareShape(
const TensorShapeProto& inferredShape,
const TensorShapeProto& expectedShape,
bool checkSameParam = false) {
EXPECT_TRUE(inferredShape.dim_size() == expectedShape.dim_size())
<< "Dim size for inferred and expected shape is different.";
for (int i = 0; i < inferredShape.dim_size(); i++) {
EXPECT_TRUE(
(inferredShape.dim(i).has_dim_value() == expectedShape.dim(i).has_dim_value()) &&
(inferredShape.dim(i).has_dim_param() == expectedShape.dim(i).has_dim_param()))
<< "Inferred and expected dim values are different.";
EXPECT_TRUE(
inferredShape.dim(i).has_dim_value() ? inferredShape.dim(i).dim_value() == expectedShape.dim(i).dim_value()
: checkSameParam ? inferredShape.dim(i).dim_param() == expectedShape.dim(i).dim_param()
: true)
<< "Inferred and expected dims are different.";
}
return true;
}
TensorShapeProto RunDataPropagation(const char* graphCode, int domainVersion = 15) {
// Parses the graph from graphCode
GraphProto graph;
OnnxParser parser(graphCode);
auto status = parser.Parse(graph);
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
EXPECT_TRUE(parser.EndOfInput()) << "Extra unparsed input unexpected.";
// Constructs name to TypeProto map from value_info, input, output
std::unordered_map<std::string, TypeProto*> valueTypesByName;
for (auto& vi : *graph.mutable_value_info()) {
if (vi.has_type()) {
valueTypesByName[vi.name()] = vi.mutable_type();
}
}
for (auto& vi : *graph.mutable_input()) {
if (vi.has_type()) {
valueTypesByName[vi.name()] = vi.mutable_type();
}
}
for (auto& vi : *graph.mutable_output()) {
if (vi.has_type()) {
valueTypesByName[vi.name()] = vi.mutable_type();
}
}
// Constructs name to TensorProto map from initializer
std::unordered_map<std::string, const TensorProto*> inputDataByName;
for (const auto& tp : graph.initializer()) {
inputDataByName[tp.name()] = &tp;
}
// Collects data from constant nodes
for (const auto& n : graph.node()) {
if (n.op_type() != "Constant" || n.output().size() != 1) {
continue;
}
for (const auto& attr : n.attribute()) {
if (attr.name() == "value") {
if (attr.type() == AttributeProto::TENSOR && attr.has_t()) {
inputDataByName[n.output(0)] = &attr.t();
}
}
}
}
// Runs data propagation on each node
std::unordered_map<std::string, TensorShapeProto> generatedShapeDataByName;
auto* schemaRegistry = OpSchemaRegistry::Instance();
TensorShapeProto inferredShape;
for (auto n : graph.node()) {
// No need to run data propagation on Constant
if (n.op_type() == "Constant") {
continue;
}
DataPropagationContextImpl dataPropagationCtx(n, valueTypesByName, inputDataByName, generatedShapeDataByName);
const auto schema = schemaRegistry->GetSchema(n.op_type(), domainVersion, n.domain());
EXPECT_TRUE(schema->has_data_propagation_function());
schema->GetDataPropagationFunction()(dataPropagationCtx);
}
// Assuming the graph being tested only has 1 output.
// If this ever changes then fixes are required here.
const auto inputShapeDataIter = generatedShapeDataByName.find(graph.output(0).name());
EXPECT_TRUE(inputShapeDataIter != generatedShapeDataByName.cend());
inferredShape.CopyFrom(inputShapeDataIter->second);
// Returns the partial shape data for output
return inferredShape;
}
TEST(DataPropagationImplTest, ShapeTest) {
const char* code = R"ONNX(
agraph (int32[7,4,1] x) => (int32[3] y)
{
xs = Shape(x)
y = Cast<to = 7>(xs)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(7);
expected_tsp.mutable_dim()->Add()->set_dim_value(4);
expected_tsp.mutable_dim()->Add()->set_dim_value(1);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, SymbolicShapeTest) {
const char* code = R"ONNX(
agraph (int32[N,3,256,256] x) => (int32[4] y)
{
xs = Shape(x)
y = Cast<to = 7>(xs)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_param("N");
expected_tsp.mutable_dim()->Add()->set_dim_value(3);
expected_tsp.mutable_dim()->Add()->set_dim_value(256);
expected_tsp.mutable_dim()->Add()->set_dim_value(256);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp, true));
}
TEST(DataPropagationImplTest, CastTest) {
const char* code = R"ONNX(
agraph (int32[2,5] x) => (int32[2] y)
{
xs = Shape(x)
y = Cast<to = 7>(xs)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(2);
expected_tsp.mutable_dim()->Add()->set_dim_value(5);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, SqueezeTest) {
const char* code = R"ONNX(
agraph (int32[2,5] x) => (int32[2] z)
{
xs = Shape(x)
y = Squeeze(xs)
z = Cast<to = 7>(y)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(2);
expected_tsp.mutable_dim()->Add()->set_dim_value(5);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, UnsqueezeTest) {
const char* code = R"ONNX(
agraph (int32[2,5] x) => (int32[1,2] w)
{
xs = Shape(x)
axis = Constant<value = int64[1] {1}>()
z = Unsqueeze(xs, axis)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(2);
expected_tsp.mutable_dim()->Add()->set_dim_value(5);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, SizeTest) {
const char* code = R"ONNX(
agraph (int64[1] x) => (int32[1] w)
<int64[3] init = {2,3,5}>
{
z = Size(init)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(3);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, AddTest) {
const char* code = R"ONNX(
agraph (int32[2,4,5] x, int32[2,4,5] y) => (int32[3] w)
{
xs = Shape(x)
ys = Shape(y)
z = Add(xs, ys)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(4);
expected_tsp.mutable_dim()->Add()->set_dim_value(8);
expected_tsp.mutable_dim()->Add()->set_dim_value(10);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, AddSymbolicShapeTest) {
const char* code = R"ONNX(
agraph (int32[2,4,5] x, int32[2,4,M] y) => (int32[3] w)
{
xs = Shape(x)
ys = Shape(y)
z = Add(xs, ys)
w = Cast<to = 7>(z)
}
)ONNX";
// Add({2,4,5}, {2,4,M}) = {4,8,?}
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(4);
expected_tsp.mutable_dim()->Add()->set_dim_value(8);
// Not computable so do not set value or param
expected_tsp.mutable_dim()->Add();
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, SubTest) {
const char* code = R"ONNX(
agraph (int32[10,11,6] x, int32[5] y) => (int32[3] w)
{
xs = Shape(x)
ys = Shape(y)
z = Sub(xs, ys)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(5);
expected_tsp.mutable_dim()->Add()->set_dim_value(6);
expected_tsp.mutable_dim()->Add()->set_dim_value(1);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, MulTest) {
const char* code = R"ONNX(
agraph (int32[2] x, int32[5,1,7] y) => (int32[3] w)
{
xs = Shape(x)
ys = Shape(y)
z = Mul(xs, ys)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(10);
expected_tsp.mutable_dim()->Add()->set_dim_value(2);
expected_tsp.mutable_dim()->Add()->set_dim_value(14);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, ConcatTest) {
const char* code = R"ONNX(
agraph (int32[1,2] x, int32[3,4] y) => (int32[4] w)
{
xs = Shape(x)
ys = Shape(y)
z = Concat<axis = 0>(xs, ys)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(1);
expected_tsp.mutable_dim()->Add()->set_dim_value(2);
expected_tsp.mutable_dim()->Add()->set_dim_value(3);
expected_tsp.mutable_dim()->Add()->set_dim_value(4);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, GatherTest) {
const char* code = R"ONNX(
agraph (int32[1,2,3,4,5,6] x) => (int32[3] w)
{
xs = Shape(x)
indices = Constant<value = int64[3] {0,3,5}>()
z = Gather<axis = 0>(xs, indices)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(1);
expected_tsp.mutable_dim()->Add()->set_dim_value(4);
expected_tsp.mutable_dim()->Add()->set_dim_value(6);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, GatherNegativeIndicesTest) {
const char* code = R"ONNX(
agraph (int32[1,2,3,4,5,6] x) => (int32[2] w)
{
xs = Shape(x)
indices = Constant<value = int64[2] {-2,-1}>()
z = Gather<axis = 0>(xs, indices)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(5);
expected_tsp.mutable_dim()->Add()->set_dim_value(6);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, SliceTest) {
const char* code = R"ONNX(
agraph (int32[1,2,3,4,5,6,7,8] x) => (int32[2] w)
{
xs = Shape(x)
starts = Constant<value = int64[1] {1}>()
ends = Constant<value = int64[1] {7}>()
axes = Constant<value = int64[1] {0}>()
steps = Constant<value = int64[1] {3}>()
z = Slice(xs, starts, ends, axes, steps)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(2);
expected_tsp.mutable_dim()->Add()->set_dim_value(5);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, SliceDefaultAxesAndStepTest) {
const char* code = R"ONNX(
agraph (int32[1,2,3,4,5,6,7,8] x) => (int32[3] w)
{
xs = Shape(x)
starts = Constant<value = int64[1] {2}>()
ends = Constant<value = int64[1] {5}>()
z = Slice(xs, starts, ends)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(3);
expected_tsp.mutable_dim()->Add()->set_dim_value(4);
expected_tsp.mutable_dim()->Add()->set_dim_value(5);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
TEST(DataPropagationImplTest, SliceNegativeStartEndStepTest) {
const char* code = R"ONNX(
agraph (int32[1,2,3,4,5,6,7,8] x) => (int32[3] w)
{
xs = Shape(x)
starts = Constant<value = int64[1] {-3}>()
ends = Constant<value = int64[1] {-7}>()
axes = Constant<value = int64[1] {0}>()
steps = Constant<value = int64[1] {-2}>()
z = Slice(xs, starts, ends, axes, steps)
w = Cast<to = 7>(z)
}
)ONNX";
TensorShapeProto expected_tsp;
expected_tsp.mutable_dim()->Add()->set_dim_value(6);
expected_tsp.mutable_dim()->Add()->set_dim_value(4);
const auto propagated_tsp = RunDataPropagation(code);
EXPECT_TRUE(CompareShape(propagated_tsp, expected_tsp));
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,279 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/checker.h"
#include "onnx/common/constants.h"
#include "onnx/defs/function.h"
#include "onnx/defs/schema.h"
using namespace ONNX_NAMESPACE::checker;
#pragma warning(push)
#pragma warning(disable : 4530)
namespace ONNX_NAMESPACE {
namespace Test {
// Utilities. TODO: Turn them into reusable ONNX utilities for use by
TensorProto ToTensor(double value, TensorProto_DataType elem_type) {
TensorProto t;
t.set_data_type(elem_type);
switch (elem_type) {
case TensorProto_DataType::TensorProto_DataType_FLOAT:
t.add_float_data((float)value);
break;
case TensorProto_DataType::TensorProto_DataType_DOUBLE:
t.add_double_data(value);
break;
// case TensorProto_DataType::TensorProto_DataType_FLOAT16:
// t.add_int32_data(onnxruntime::math::floatToHalf((float)value));
// break;
default:
assert(false);
}
return t;
}
void BuildNodes(FunctionProto& functionProto, const std::vector<FunctionBodyHelper::NodeDef>& node_defs) {
for (size_t i = 0; i < node_defs.size(); i++) {
const FunctionBodyHelper::NodeDef& node = node_defs[i];
auto* np = functionProto.add_node();
np->set_op_type(node.op_type);
for (const auto& inp : node.inputs) {
np->add_input(inp);
}
for (const auto& o : node.outputs) {
np->add_output(o);
}
for (const auto& attr : node.attributes) {
*(np->add_attribute()) = attr.proto;
}
}
}
bool BuildFunctionProto(
FunctionProto& functionProto,
const OpSchema& schema,
const std::vector<FunctionBodyHelper::NodeDef>& node_defs) {
BuildNodes(functionProto, node_defs);
schema.BuildFunction(functionProto);
return true;
}
// A monomorphic context-dependent function test-case.
static bool
BuildFloatFunctionBody(const FunctionBodyBuildContext& ctx, const OpSchema& schema, FunctionProto& functionProto) {
// Create a scalar-tensor constant 2.0 of float type:
auto two_as_tensor = ToTensor(2.0, TensorProto_DataType::TensorProto_DataType_FLOAT);
std::vector<FunctionBodyHelper::NodeDef> body{// nodes: {outputs, op, inputs, attributes}
{{"Two"}, "Constant", {}, {{"value", two_as_tensor}}},
{{"Y"}, "Mul", {"X", "Two"}}};
return BuildFunctionProto(functionProto, schema, body);
}
void RegisterCustomFuncFloatSchema() {
ONNX_NAMESPACE::OpSchema schema;
schema.SetName("CustomFuncFloat")
.SetDomain(ONNX_DOMAIN)
.SinceVersion(12)
.SetDoc("This operator returns an output tensor that is twice the input tensor.")
.Input(0, "X", "Input tensor", "T", OpSchema::Single)
.Output(0, "Y", "Output tensor", "T", OpSchema::Single)
.TypeConstraint("T", {"tensor(float)"}, "Type of the input and output values")
.SetContextDependentFunctionBodyBuilder(BuildFloatFunctionBody);
ONNX_NAMESPACE::OpSchemaRegistry::OpSchemaRegisterOnce unused(schema);
(void)unused;
}
// Test for Context dependant function without type context
TEST(FunctionAPITest, ContextDependentFunctionTest) {
RegisterCustomFuncFloatSchema();
const auto* schema = OpSchemaRegistry::Schema("CustomFuncFloat", 12, ONNX_DOMAIN);
EXPECT_TRUE(schema);
EXPECT_FALSE(schema->HasFunction());
EXPECT_TRUE(schema->HasContextDependentFunction());
NodeProto nodeProto;
nodeProto.set_op_type("CustomFuncFloat");
nodeProto.add_input("X");
nodeProto.add_output("Y");
FunctionBodyBuildContextImpl ctx(nodeProto);
FunctionProto fnProto;
EXPECT_TRUE(schema->BuildContextDependentFunction(ctx, fnProto));
EXPECT_EQ(fnProto.node_size(), 2);
LexicalScopeContext lexicalScope;
CheckerContext checkerCtx;
std::unordered_map<std::string, int> opset_imports({{ONNX_DOMAIN, 12}});
checkerCtx.set_opset_imports(opset_imports);
checkerCtx.set_ir_version(7);
check_function(fnProto, checkerCtx, lexicalScope);
}
// A polymorphic context-dependent function test-case.
static bool
BuildFunctionBody(const FunctionBodyBuildContext& ctx, const OpSchema& schema, FunctionProto& functionProto) {
// Create a scalar-tensor constant 2.0 of input-type:
auto* tp = ctx.getInputType(0);
if ((tp == nullptr) || (!tp->has_tensor_type()))
return false;
auto elem_type = (TensorProto_DataType)tp->tensor_type().elem_type();
auto two_as_tensor = ToTensor(2.0, elem_type);
std::vector<FunctionBodyHelper::NodeDef> body{// nodes: {outputs, op, inputs, attributes}
{{"Two"}, "Constant", {}, {{"value", two_as_tensor}}},
{{"Y"}, "Mul", {"X", "Two"}}};
return BuildFunctionProto(functionProto, schema, body);
}
void RegisterCustomFunctionSchema() {
ONNX_NAMESPACE::OpSchema schema;
schema.SetName("CustomFunction")
.SetDomain(ONNX_DOMAIN)
.SinceVersion(12)
.SetDoc("This operator returns an output tensor that is twice the input tensor.")
.Input(0, "X", "Input tensor", "T", OpSchema::Single)
.Output(0, "Y", "Output tensor", "T", OpSchema::Single)
.TypeConstraint("T", {"tensor(float)", "tensor(double)"}, "Type of the input and output values")
.SetContextDependentFunctionBodyBuilder(BuildFunctionBody);
ONNX_NAMESPACE::OpSchemaRegistry::OpSchemaRegisterOnce unused(schema);
(void)unused;
}
TEST(FunctionAPITest, VersionedFunctionBodyTest) {
// This test illustrate issues of ONNX function ops.
// It is over simplified in that only one primary op (Sub) is used in function body.
// ONNX opset 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// MySub: 2 9 // MySub function op is created at opset 2.
// // Its semantic is updated at opset 7
// Body Ideal: 2 6 7 9 13 14 16 // Ideally function body shall be provided
// // each time there is any version bump of
// // used primary ops. It will be more
// // frequent
// // if more primary ops are used.
// Body Real: 2 9 16 // In real life, we seldom add function body
// // due to primary op update
// Sub: 1 6 7 13 14 // Version bumps of Sub
// Model: y y y y n n n y y y y n n n y y y // Model can(y)/cannot(n) used
// with opset import version.
ONNX_NAMESPACE::OpSchema schema_ver2;
schema_ver2.SetName("MySub")
.SetDomain(ONNX_DOMAIN)
.SinceVersion(2)
.SetDoc("Z = Sub (X, Y)")
.Input(0, "X", "Input tensor X", "T", OpSchema::Single)
.Input(1, "Y", "Input tensor Y", "T", OpSchema::Single)
.Output(0, "Z", "Output tensor Z", "T", OpSchema::Single)
.TypeConstraint("T", {"tensor(float)", "tensor(double)"}, "Type of the input and output values")
.FunctionBody(
R"ONNX(
{
Z = Sub (X, Y)
}
)ONNX",
2);
ONNX_NAMESPACE::OpSchema schema_ver9;
schema_ver9.SetName("MySub")
.SetDomain(ONNX_DOMAIN)
.SinceVersion(9)
.SetDoc("Z = Sub (X, Y)")
.Input(0, "X", "Input tensor X", "T", OpSchema::Single)
.Input(1, "Y", "Input tensor Y", "T", OpSchema::Single)
.Output(0, "Z", "Output tensor Z", "T", OpSchema::Single)
.TypeConstraint("T", {"tensor(float)", "tensor(double)"}, "Type of the input and output values")
.FunctionBody(
R"ONNX(
{
Z = Sub (X, Y)
}
)ONNX",
9)
.FunctionBody(
R"ONNX(
{
Z = Sub (X, Y)
}
)ONNX",
16);
ONNX_NAMESPACE::OpSchemaRegistry::OpSchemaRegisterOnce unused2(schema_ver2);
(void)unused2;
ONNX_NAMESPACE::OpSchemaRegistry::OpSchemaRegisterOnce unused9(schema_ver9);
(void)unused9;
const auto* schema2 = OpSchemaRegistry::Schema("MySub", 2, ONNX_DOMAIN);
EXPECT_TRUE(schema2);
for (int model_opset_import = 2; model_opset_import < 9; model_opset_import++) {
try {
bool validate = true;
const FunctionProto* function = schema2->GetFunction(model_opset_import, validate);
if (model_opset_import >= 6) { // function body should be updated at opset 6 where Sub is updated
ASSERT_TRUE(function == nullptr);
} else {
ASSERT_TRUE(function);
}
} catch (std::runtime_error err) {
ASSERT_TRUE(model_opset_import == 6 || model_opset_import == 7 || model_opset_import == 8);
}
}
const auto* schema9 = OpSchemaRegistry::Schema("MySub", 9, ONNX_DOMAIN);
EXPECT_TRUE(schema9);
for (int model_opset_import = 9; model_opset_import < 10; model_opset_import++) {
try {
const FunctionProto* function = schema9->GetFunction(model_opset_import);
ASSERT_TRUE(function);
} catch (std::runtime_error err) {
ASSERT_TRUE(model_opset_import == 13 || model_opset_import == 14 || model_opset_import == 15);
}
}
}
TEST(FunctionAPITest, TypeContextTest) {
RegisterCustomFunctionSchema();
const auto* schema = OpSchemaRegistry::Schema("CustomFunction", 12, ONNX_DOMAIN);
EXPECT_TRUE(schema);
EXPECT_FALSE(schema->HasFunction());
EXPECT_TRUE(schema->HasContextDependentFunction());
NodeProto nodeProto;
nodeProto.set_op_type("CustomFunction");
nodeProto.add_input("X");
nodeProto.add_output("Y");
TypeProto floatTypeProto;
floatTypeProto.mutable_tensor_type()->set_elem_type(TensorProto_DataType::TensorProto_DataType_FLOAT);
FunctionBodyBuildContextImpl ctx(nodeProto, {floatTypeProto});
FunctionProto fnProto;
EXPECT_TRUE(schema->BuildContextDependentFunction(ctx, fnProto));
EXPECT_EQ(fnProto.node_size(), 2);
LexicalScopeContext lexicalScope;
CheckerContext checkerCtx;
std::unordered_map<std::string, int> opset_imports({{ONNX_DOMAIN, 12}});
checkerCtx.set_opset_imports(opset_imports);
checkerCtx.set_ir_version(7);
check_function(fnProto, checkerCtx, lexicalScope);
}
} // namespace Test
} // namespace ONNX_NAMESPACE
#pragma warning(pop)

View File

@ -0,0 +1,49 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/common/constants.h"
#include "onnx/defs/schema.h"
namespace ONNX_NAMESPACE {
namespace Test {
TEST(FunctionAPITest, GetFunctionOpWithVersion) {
const auto* schema = OpSchemaRegistry::Schema("MeanVarianceNormalization", 9, "");
EXPECT_TRUE(schema);
EXPECT_TRUE(schema->HasFunction());
auto func = schema->GetFunction();
EXPECT_EQ(func->name(), "MeanVarianceNormalization");
}
TEST(FunctionAPITest, GetMeanVarianceNormalizationFunctionWithVersion) {
{
const auto* schema = OpSchemaRegistry::Schema("MeanVarianceNormalization", 13, "");
EXPECT_TRUE(schema);
EXPECT_TRUE(schema->HasFunction());
auto func = schema->GetFunction();
EXPECT_EQ(func->name(), "MeanVarianceNormalization");
}
{
const auto* schema = OpSchemaRegistry::Schema("MeanVarianceNormalization", 17, "");
EXPECT_TRUE(schema);
EXPECT_TRUE(schema->HasFunction());
auto func = schema->GetFunction();
EXPECT_EQ(func->name(), "MeanVarianceNormalization");
}
{
const auto* schema = OpSchemaRegistry::Schema("MeanVarianceNormalization", 18, "");
EXPECT_TRUE(schema);
EXPECT_TRUE(schema->HasFunction());
auto func = schema->GetFunction();
EXPECT_EQ(func->name(), "MeanVarianceNormalization");
}
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,558 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include <set>
#include "gtest/gtest.h"
#include "onnx/checker.h"
#include "onnx/common/constants.h"
#include "onnx/defs/parser.h"
#include "onnx/defs/printer.h"
#include "onnx/defs/schema.h"
#include "onnx/onnx-operators_pb.h"
#include "onnx/onnx_pb.h"
#include "onnx/shape_inference/implementation.h"
namespace ONNX_NAMESPACE {
namespace Test {
using namespace checker;
using TENSOR_TYPES_MAP = std::unordered_map<std::string, std::vector<std::string>>;
void GetFunctionProtoOpsetImport(
const OpSchema& op,
const FunctionProto* function_proto,
std::unordered_map<std::string, int>& op_set) {
if (function_proto->opset_import_size() > 0) {
for (const auto& opset_import : function_proto->opset_import()) {
op_set.insert({opset_import.domain(), opset_import.version()});
}
} else {
op_set.insert({op.domain(), op.since_version()});
}
}
void VerifyTypeConstraint(const OpSchema& function_op, const FunctionProto* function_proto, int& counter) {
// This is a simple partial type-checker for a function-body.
// TODO: Revisit to make the type-checker more complete.
TENSOR_TYPES_MAP tc_map;
std::set<std::string> primitive_types(OpSchema::all_tensor_types().begin(), OpSchema::all_tensor_types().end());
for (const auto& input : function_op.inputs()) {
std::string name = input.GetName();
auto& tvec = tc_map[name];
for (const auto& t : input.GetTypes()) {
tvec.emplace_back(*t);
}
}
for (const auto& output : function_op.outputs()) {
std::string name = output.GetName();
auto& tvec = tc_map[name];
for (const auto& t : output.GetTypes()) {
tvec.emplace_back(*t);
}
}
std::unordered_map<std::string, int> op_set;
GetFunctionProtoOpsetImport(function_op, function_proto, op_set);
for (auto& node : function_proto->node()) {
std::string op_type = node.op_type();
std::unordered_map<std::string, int>::const_iterator it = op_set.find(node.domain());
if (it == op_set.end()) {
fail_check(
"Op " + op_type + " of domain " + node.domain() + " used in " + function_op.Name() +
" function body does not has a opset import.");
}
int opset_version = it->second;
const OpSchema* schema = OpSchemaRegistry::Schema(op_type, opset_version, node.domain());
// Check that the types of actual inputs, if known, are legal as per schema
// of called op:
auto num_formal_inputs = static_cast<size_t>(schema->inputs().size());
auto num_actual_inputs = static_cast<size_t>(node.input_size());
for (size_t i = 0; i < num_actual_inputs; ++i) {
auto actual_param_name = node.input(static_cast<int>(i));
auto iter = tc_map.find(actual_param_name);
if (iter != tc_map.end()) {
// if i >= num_formal_inputs, it is a variadic parameter corresponding
// to the last formal parameter.
auto formal_i = std::min(i, num_formal_inputs - 1);
const auto& types = schema->inputs().at(formal_i).GetTypes();
std::unordered_set<std::string> allowed_types;
for (auto& s : types) {
allowed_types.insert(*s);
}
for (auto& actual_type : iter->second) {
if (allowed_types.find(actual_type) == allowed_types.end()) {
fail_check(
"Input type " + actual_type + " of parameter " + actual_param_name + " of function " +
function_op.Name() + " is not allowed by operator " + op_type);
}
}
}
}
// No simple check exists for outputs: we need to integrate type inference
// to identify the possible output types and verify that they are included
// in the function-schema.
}
++counter;
}
// Testing the function-definitions provided for function-ops in ONNX schema registry.
// We type-check the function-definition for all possible input-typings, as permitted
// by the op-schema. Since the type-checking is dependent on attribute-values, we specify
// the attribute-values for which we want to do the testing down below.
// The set of attribute-values (for testing a function) is represented using a vector.
using AttributeValues = std::vector<AttributeProto>;
// FunctionOpAttributeMap: Used to implement a map from OpSchema to a set of AttributeValues
// (implemented as a vector). The testing will be done for each attribute-values specified.
struct FunctionOpAttributeMap {
std::unordered_map<std::string, std::vector<AttributeValues>> map;
std::string key(std::string domain, std::string opname, int opset_version) const {
return domain + ":" + opname + ":" + std::to_string(opset_version);
}
void addTestCase(const std::string& opname, int opset_version, std::initializer_list<const char*> attributes) {
auto& schema_test_cases = map[key("", opname, opset_version)];
schema_test_cases.push_back(AttributeValues());
auto& test_case = schema_test_cases.back();
for (auto attr_text : attributes) {
test_case.push_back(AttributeProto());
OnnxParser::Parse(test_case.back(), attr_text);
}
}
FunctionOpAttributeMap() {
addTestCase("Elu", 6, {"alpha = 1.0"});
addTestCase("LeakyRelu", 16, {"alpha = 0.1"});
addTestCase("HardSigmoid", 6, {"alpha = 0.2", "beta=0.5"});
addTestCase("Selu", 6, {"alpha = 1.6", "gamma=1.05"});
addTestCase("ReduceL1", 18, {}); // Use default-value for attributes
addTestCase("ReduceL1", 18, {"keepdims = 0"});
addTestCase("ReduceL1", 18, {"noop_with_empty_axes = 1"});
addTestCase("ReduceL2", 18, {});
addTestCase("ReduceL2", 18, {"noop_with_empty_axes = 1", "keepdims = 0"});
addTestCase("ReduceSumSquare", 18, {});
addTestCase("ReduceLogSumExp", 18, {});
addTestCase("ThresholdedRelu", 10, {"alpha = 0.9"});
addTestCase("HannWindow", 17, {"output_datatype = 1", "periodic = 1"});
addTestCase("HammingWindow", 17, {"output_datatype = 1", "periodic = 1"});
addTestCase("BlackmanWindow", 17, {"output_datatype = 1", "periodic = 1"});
addTestCase("MeanValueNormalization", 13, {});
addTestCase("AffineGrid", 20, {"align_corners = 0"});
addTestCase("AffineGrid", 20, {"align_corners = 1"});
// The following test-cases fails, correctly so: Some clarification/changes required
// to handle unsigned integers or similar issues:
// addTestCase("Shrink", 9, {"bias = 0.0", "lambd = 0.5"});
// addTestCase("ReduceLogSum", 18, {});
// addTestCase("Range", 11, {});
// The following test-case fails because the checker doesn't support handling of
// default-values of attributes of function-ops
// addTestCase("ThresholdedRelu", 10, {});
}
const std::vector<AttributeValues>& getTestCases(const OpSchema& schema) {
auto key_value = key(schema.domain(), schema.Name(), schema.SinceVersion());
auto it = map.find(key_value);
if (it != map.end())
return it->second;
if (schema.attributes().size() == 0) {
// Test with no-attributes
map[key_value].push_back(std::vector<AttributeProto>());
}
return map[key_value];
}
static FunctionOpAttributeMap& instance() {
static FunctionOpAttributeMap _instance;
return _instance;
}
};
struct FunctionTypeChecker {
const OpSchema& schema;
const FunctionProto& function_proto;
const std::vector<AttributeValues>* attribute_cases;
FunctionTypeChecker(const OpSchema& op_schema, const FunctionProto& proto)
: schema(op_schema), function_proto(proto) {
attribute_cases = &FunctionOpAttributeMap::instance().getTestCases(op_schema);
}
// Binds each type-variable in schema to a type-value
std::unordered_map<std::string, DataType> typeVarBindings;
std::vector<std::string> errors;
void recordError(const std::string& error, AttributeValues attrs) {
std::ostringstream ostr;
ostr << "Type checking failed for instantiation " << schema.Name() << ":" << schema.SinceVersion() << " {";
for (auto& pair : typeVarBindings) {
ostr << pair.first << " = " << *pair.second << ", ";
}
for (auto& attr : attrs) {
ostr << attr << ", ";
}
ostr << "}\n" << error << "\n";
errors.push_back(ostr.str());
}
void recordSuccess(AttributeValues attrs) {
std::cout << "Type checking succeeded for instantiation " << schema.Name() << ":" << schema.SinceVersion() << " {";
for (auto& pair : typeVarBindings) {
std::cout << pair.first << " = " << *pair.second << ", ";
}
for (auto& attr : attrs) {
std::cout << attr << ", ";
}
std::cout << "}\n";
}
// forTypeVar: This is used to iterate through all possible bindings of type-values
// to all type-variables used in the op schema, and invoke the type-checker for
// each possible instantiation.
void forTypeVar(int i) {
auto& typeConstraintVector = schema.typeConstraintParams();
if (i < typeConstraintVector.size()) {
std::string typeVar = typeConstraintVector[i].type_param_str;
auto& values = schema.typeConstraintMap().at(typeVar).first;
for (auto typeValue : values) {
typeVarBindings[typeVar] = typeValue;
// Now, process remaining type-variables
forTypeVar(i + 1);
}
} else {
// Generated a complete instantiation of type-values to all type-variables.
// Now, check for this instantiation.
typeCheckBinding();
}
}
// typeCheckBinding: Type-check the function-body for the current type-instantiation
void typeCheckBinding() {
std::vector<TypeProto> input_types;
for (const auto& input : schema.inputs()) {
DataType datatype = (1 == input.GetTypes().size())
?
// Select the single possible type
(*(input.GetTypes().begin()))
:
// Select the type bound to the type-var in current instantiation
typeVarBindings[input.GetTypeStr()];
input_types.push_back(Utils::DataTypeUtils::ToTypeProto(datatype));
}
for (auto& attribute_vals : *attribute_cases) {
ONNX_TRY {
auto output_types = shape_inference::InferFunctionOutputTypes(function_proto, input_types, attribute_vals);
}
ONNX_CATCH(ONNX_NAMESPACE::InferenceError & e) {
ONNX_HANDLE_EXCEPTION(([&]() { recordError(e.what(), attribute_vals); }));
}
}
}
std::string checkAll() {
if (attribute_cases->size() > 0)
forTypeVar(0);
std::string all_errors = "";
for (const std::string& error : errors)
all_errors += error;
return all_errors;
}
};
void VerifyFunction(const OpSchema& op, const FunctionProto* function_proto, int& counter) {
// Verify function proto is valid
if (!function_proto) {
fail_check("Cannot get function body for op '", op.Name(), "'");
}
CheckerContext ctx;
std::unordered_map<std::string, int> op_set;
GetFunctionProtoOpsetImport(op, function_proto, op_set);
auto version_range = OpSchemaRegistry::DomainToVersionRange::Instance().Map().at(op.domain());
if (op.since_version() > version_range.second || op.since_version() < version_range.first) {
fail_check("Invalid function version in function op '", op.Name(), "'");
}
ctx.set_opset_imports(op_set);
ctx.set_is_main_graph(false);
LexicalScopeContext lex_ctx;
ONNX_TRY {
check_function(*function_proto, ctx, lex_ctx);
}
ONNX_CATCH(ValidationError & ex) {
ONNX_HANDLE_EXCEPTION([&]() { fail_check(ex.what()); });
}
// Verify function op has compatible Type constraints defined in
// op and function body.
VerifyTypeConstraint(op, function_proto, counter);
FunctionTypeChecker type_checker(op, *function_proto);
auto type_errors = type_checker.checkAll();
auto success = (type_errors == "");
ASSERT_TRUE(success) << type_errors;
}
// Verify registered ops with function body has compatible
// definition on TypeConstraints between ops and function body
TEST(FunctionVerification, VerifyFunctionOps) {
const std::vector<OpSchema> schemas = OpSchemaRegistry::get_all_schemas();
int function_counter = 0, verified_counter = 0;
for (const auto s : schemas) {
if (!s.HasFunction())
continue;
// Skip test for functions with known errors that need to be fixed:
// Range currently permits int16 parameters, but the operator Sub, called
// from the body of Range does not yet support int16 parameter.
if (s.Name() == "Range")
continue;
ONNX_TRY {
++function_counter;
std::vector<int> function_versions = s.function_opset_versions();
for (int function_version : function_versions) {
auto function_body = s.GetFunction(function_version);
VerifyFunction(s, function_body, verified_counter);
}
}
ONNX_CATCH(ONNX_NAMESPACE::checker::ValidationError e) {
ONNX_HANDLE_EXCEPTION([&]() { FAIL() << e.what(); });
}
}
std::cerr << "[ ] Verified " << verified_counter << "/" << function_counter << " Functions." << std::endl;
}
// Verify that FunctionExpandHelper obtains missing default attributes
// from schema and adds them to ops in expanded subgraph.
TEST(FunctionVerification, VerifyFunctionExpandHelper) {
GraphProto graph;
NodeProto* new_node = graph.add_node();
new_node->set_op_type("MeanVarianceNormalization");
const auto* schema = OpSchemaRegistry::Schema("MeanVarianceNormalization", 9, "");
const FunctionProto* func = schema->GetFunction();
const auto default_axes_attribute = schema->attributes().at("axes").default_value;
FunctionExpandHelper(*new_node, *func, graph);
for (const auto& node : graph.node()) {
if (node.op_type() == "ReduceMean") {
auto attr = node.attribute(0);
EXPECT_EQ(attr.name(), "axes");
EXPECT_EQ(attr.ints().size(), default_axes_attribute.ints().size());
for (int i = 0; i < default_axes_attribute.ints().size(); ++i) {
EXPECT_EQ(attr.ints(i), default_axes_attribute.ints(i));
}
return;
}
}
FAIL() << "During expanding MeanVarianceNormalization function, "
<< "the default attribute `axes` has not been assigned to ReduceMean op.";
}
void RegisterFunctionSchema() {
ONNX_NAMESPACE::OpSchema function_schema;
function_schema.SetName("DynamicQuantizeLinear_Fake")
.SetDomain(AI_ONNX_ML_DOMAIN)
.SinceVersion(2)
.SetDoc("Test Op")
.Input(0, "x", "Input tensor", "T1")
.Output(0, "y", "Quantized output tensor", "T2")
.Output(
1, "y_scale", "Output scale. It's a scalar, which means a per-tensor/layer quantization.", "tensor(float)")
.Output(2, "y_zero_point", "Output zero point. It's a scalar, which means a per-tensor/layer quantization.", "T2")
.TypeConstraint("T1", {"tensor(float)"}, "Constrain 'x' to float tensor.")
.TypeConstraint("T2", {"tensor(uint8)"}, "Constrain 'y_zero_point' and 'y' to 8-bit unsigned integer tensor.")
.FunctionBody(
FunctionBodyHelper::BuildNodes(
{// nodes: {outputs, op, inputs, attributes}
FunctionBodyHelper::Const<float>("Q_Min", 0.f),
FunctionBodyHelper::Const<float>("Q_Max", 255.f),
{{"X_Min"}, "ReduceMin", {"x"}, {MakeAttribute("keepdims", int64_t(0))}},
{{"X_Min_Adjusted"}, "Min", {"X_Min", "Q_Min"}},
{{"X_Max"}, "ReduceMax", {"x"}, {MakeAttribute("keepdims", int64_t(0))}},
{{"X_Max_Adjusted"}, "Max", {"X_Max", "Q_Min"}},
{{"X_Range"}, "Sub", {"X_Max_Adjusted", "X_Min_Adjusted"}},
{{"Scale"}, "Div", {"X_Range", "Q_Max"}},
{{"Min_Scaled"}, "Div", {"X_Min_Adjusted", "Scale"}},
{{"Initial_ZeroPoint_FP"}, "Sub", {"Q_Min", "Min_Scaled"}},
{{"Clipped_ZeroPoint_FP"}, "Clip", {"Initial_ZeroPoint_FP", "Q_Min", "Q_Max"}},
{{"Rounded_ZeroPoint_FP"}, "Round", {"Clipped_ZeroPoint_FP"}},
{{"Zeropoint"}, "Cast", {"Rounded_ZeroPoint_FP"}, {MakeAttribute("to", int64_t(2))}},
{{"y_scale"}, "Identity", {"Scale"}},
{{"y_zero_point"}, "Identity", {"Zeropoint"}},
{{"y"}, "QuantizeLinear", {"x", "Scale", "Zeropoint"}}}),
[]() {
std::vector<OperatorSetIdProto> operator_sets(2);
auto& onnx_opset = operator_sets[0];
onnx_opset.set_domain("");
onnx_opset.set_version(13);
auto& test_opset = operator_sets[1];
test_opset.set_domain(AI_ONNX_ML_DOMAIN);
test_opset.set_version(2);
return operator_sets;
}());
ONNX_NAMESPACE::OpSchemaRegistry::OpSchemaRegisterOnce unused(function_schema);
(void)unused;
}
TEST(FunctionVerification, VerifyFunctionBodyWithMultipleDomains) {
RegisterFunctionSchema();
const auto* schema = OpSchemaRegistry::Schema("DynamicQuantizeLinear_Fake", 2, AI_ONNX_ML_DOMAIN);
EXPECT_TRUE(schema);
EXPECT_TRUE(schema->HasFunction());
EXPECT_FALSE(schema->HasContextDependentFunction());
const FunctionProto* fnProto = schema->GetFunction();
EXPECT_EQ(fnProto->node_size(), 16);
LexicalScopeContext lexicalScope;
CheckerContext checkerCtx;
std::unordered_map<std::string, int> opset_imports({{AI_ONNX_ML_DOMAIN, 2}, {"", 13}});
checkerCtx.set_opset_imports(opset_imports);
checkerCtx.set_ir_version(7);
check_function(*fnProto, checkerCtx, lexicalScope);
}
TEST(FunctionVerification, VerifyModelLocalFunctions) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 13, "custom_domain_1" : 1, "custom_domain_2" : 1],
producer_name: "FunctionProtoTest",
producer_version: "1.0",
model_version: 1,
doc_string: "A test model for model local functions."
>
agraph (float[N] x) => (uint8[N] out)
{
o1, o2 = custom_domain_1.bar(x)
o3 = Add(o1, o2)
o4 = custom_domain_2.foo(o3)
out = Identity(o4)
}
<
domain: "custom_domain_1",
opset_import: [ "" : 13],
doc_string: "Test function proto"
>
bar (x) => (o1, o2) {
o1 = Identity (x)
o2 = Identity (o1)
}
<
domain: "custom_domain_2",
opset_import: [ "" : 13],
doc_string: "Test function proto"
>
foo (x) => (y) {
Q_Min = Constant <value = float[1] {0.0}> ()
Q_Max = Constant <value = float[1] {255.0}> ()
X_Min = ReduceMin <keepdims = 0> (x)
X_Max = ReduceMax <keepdims = 0> (x)
X_Range = Sub (X_Max, X_Min)
Scale = Div (X_Range, Q_Max)
ZeroPoint_FP = Sub (Q_Min, Scale)
Zeropoint = Cast <to = 2> (ZeroPoint_FP)
y = QuantizeLinear (x, Scale, Zeropoint)
}
)ONNX";
ModelProto model;
auto status = OnnxParser::Parse(model, code);
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
check_model(model);
ShapeInferenceOptions options{true, 1, true};
ONNX_NAMESPACE::shape_inference::InferShapes(model, OpSchemaRegistry::Instance(), options);
}
TEST(FunctionVerification, VerifyNestedModelLocalFunctions) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 13, "custom_domain_1" : 1, "custom_domain_2" : 1],
producer_name: "FunctionProtoTest",
producer_version: "1.0",
model_version: 1,
doc_string: "A test model for model local functions."
>
agraph (float[N] x) => (uint8[N] out)
{
o1, o2 = custom_domain_1.bar(x)
o3 = Add(o1, o2)
o4 = custom_domain_2.foo(o3)
out = Identity(o4)
}
<
domain: "custom_domain_1",
opset_import: [ "" : 13],
doc_string: "Test function proto"
>
bar (x) => (o1, o2) {
o1 = Identity (x)
o2 = Identity (o1)
}
<
domain: "custom_domain_2",
opset_import: [ "" : 13, "custom_domain_3" : 1],
doc_string: "Test function proto"
>
foo (x) => (o4) {
o1 = custom_domain_3.foo (x)
o4 = Identity (o1)
}
<
domain: "custom_domain_3",
opset_import: [ "" : 13],
doc_string: "Test function proto"
>
foo (x) => (y) {
Q_Min = Constant <value = float[1] {0.0}> ()
Q_Max = Constant <value = float[1] {255.0}> ()
X_Min = ReduceMin <keepdims = 0> (x)
X_Max = ReduceMax <keepdims = 0> (x)
X_Range = Sub (X_Max, X_Min)
Scale = Div (X_Range, Q_Max)
ZeroPoint_FP = Sub (Q_Min, Scale)
Zeropoint = Cast <to = 2> (ZeroPoint_FP)
y = QuantizeLinear (x, Scale, Zeropoint)
}
)ONNX";
ModelProto model;
auto status = OnnxParser::Parse(model, code);
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
check_model(model);
ShapeInferenceOptions options{true, 1, true};
ONNX_NAMESPACE::shape_inference::InferShapes(model, OpSchemaRegistry::Instance(), options);
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,376 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/checker.h"
#include "onnx/common/constants.h"
#include "onnx/defs/parser.h"
#include "onnx/defs/printer.h"
#include "onnx/defs/schema.h"
#include "onnx/inliner/inliner.h"
#include "onnx/shape_inference/implementation.h"
namespace ONNX_NAMESPACE {
namespace Test {
static void InlineFunctions(ModelProto& model, const char* input, const inliner::FunctionIdSet* to_inline = nullptr) {
OnnxParser parser(input);
auto status = parser.Parse(model);
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
EXPECT_TRUE(parser.EndOfInput()) << "Extra unparsed input unexpected.";
checker::check_model(model, false, true);
shape_inference::InferShapes(model);
// std::cout << "Before inlining:\n" << ProtoToString(model) << "\n";
if (to_inline != nullptr)
inliner::InlineSelectedFunctions(model, *to_inline);
else
inliner::InlineLocalFunctions(model, true);
// std::cout << "After inlining:\n" << ProtoToString(model) << "\n";
// The following will ensure basic safety checks hold after inlining, including
// absence of duplicate names (multiple assignments to same name).
checker::check_model(model, true, true);
}
TEST(FunctionInliner, BasicTest) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 10, "local" : 1 ]
>
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N, 10] C)
{
T = local.foo (X, W, B)
C = local.square(T)
}
<
opset_import: [ "" : 10 ],
domain: "local",
doc_string: "Function foo."
>
foo (x, w, b) => (c) {
T = MatMul(x, w)
S = Add(T, b)
c = Softmax(S)
}
<
opset_import: [ "" : 10 ],
domain: "local",
doc_string: "Function square."
>
square (x) => (y) {
y = Mul (x, x)
}
)ONNX";
ModelProto model;
InlineFunctions(model, code);
auto num_nodes = model.graph().node_size();
ASSERT_EQ(num_nodes, 4);
auto num_functions = model.functions_size();
ASSERT_EQ(num_functions, 0);
}
// Test that inlining processes subgraphs.
TEST(FunctionInliner, SubgraphTest) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 10, "local" : 1 ]
>
agraph (bool cond, float[N] X) => (float[N] Y)
{
Y = If (cond) <
then_branch = then_graph () => (y) {
y = local.square (X)
},
else_branch = else_graph () => (y) {
y = local.square (X)
}
>
}
<
opset_import: [ "" : 10 ],
domain: "local",
doc_string: "Function square."
>
square (x) => (y) {
y = Mul (x, x)
}
)ONNX";
ModelProto model;
InlineFunctions(model, code);
auto& if_node = model.graph().node(0);
auto& graph1 = if_node.attribute(0).g();
ASSERT_EQ(graph1.node(0).op_type(), "Mul");
auto& graph2 = if_node.attribute(1).g();
ASSERT_EQ(graph2.node(0).op_type(), "Mul");
auto num_functions = model.functions_size();
ASSERT_EQ(num_functions, 0);
}
TEST(FunctionInliner, Nested) {
const char* code = R"ONNX(
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
Y = local.foo (X)
}
<opset_import: [ "" : 17, "local" : 1 ], domain: "local">
foo (x) => (y) {
temp = Add(x, x)
y = local.bar(temp)
}
<opset_import: [ "" : 17 ], domain: "local">
bar (x) => (y) {
y = Mul (x, x)
}
)ONNX";
ModelProto model;
InlineFunctions(model, code);
auto num_nodes = model.graph().node_size();
ASSERT_EQ(num_nodes, 2);
auto num_functions = model.functions_size();
ASSERT_EQ(num_functions, 0);
}
TEST(FunctionInliner, Renaming) {
const char* code = R"ONNX(
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
temp = local.foo (X)
temp__1 = Mul (temp, temp)
Y = Abs (temp__1)
}
<opset_import: [ "" : 17, "local" : 1 ], domain: "local">
foo (x) => (y) {
temp = Add(x, x)
y = Neg (temp)
}
)ONNX";
ModelProto model;
// Check that renaming handles accidental collision of names: when "temp" in "foo" is
// inlined, it will be renamed into something distinct from "temp" and "temp__1" as
// both these names occur in the main graph.
InlineFunctions(model, code);
}
TEST(FunctionInliner, ValueInfoPropagation) {
const char* code = R"ONNX(
<ir_version: 10, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
result = local.foo (X)
Y = Abs (result)
}
<opset_import: [ "" : 17, "local" : 1 ], domain: "local">
foo (x) => (y)
<float[N] temp> {
temp = Add(x, x)
y = Neg (temp)
}
)ONNX";
ModelProto model;
InlineFunctions(model, code);
// Check that valueinfo is propagated fron function to main graph.
auto& graph = model.graph();
auto& temp_new_name = graph.node(0).output(0);
auto& valueinfos = graph.value_info();
for (auto& valueinfo : valueinfos) {
if (valueinfo.name() == temp_new_name) {
ASSERT_TRUE(valueinfo.has_type());
ASSERT_TRUE(valueinfo.type().has_tensor_type());
ASSERT_TRUE(valueinfo.type().tensor_type().has_shape());
ASSERT_TRUE(valueinfo.type().tensor_type().shape().dim_size() == 1);
return;
}
}
ASSERT_TRUE(false) << "ValueInfo not found";
}
TEST(FunctionInliner, TwoCallsToSameFunction) {
const char* code = R"ONNX(
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
temp = local.foo (X)
Y = local.foo (temp)
}
<opset_import: [ "" : 17, "local" : 1 ], domain: "local">
foo (x) => (y) {
temp = Add(x, x)
y = Neg (temp)
}
)ONNX";
ModelProto model;
// The call below will check that multiple assignments to same name does not happen
// after inlining two calls to same function.
InlineFunctions(model, code);
}
TEST(FunctionInliner, OpsetMismatch) {
const char* code = R"ONNX(
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
temp = local.foo (X)
Y = local.bar (temp)
}
<opset_import: [ "" : 18], domain: "local">
foo (x) => (y) {
y = Add(x, x)
}
<opset_import: [ "" : 17], domain: "local">
bar (x) => (y) {
y = Add(x, x)
}
)ONNX";
ModelProto model;
InlineFunctions(model, code);
// The first node's call, to foo, must be inlined.
auto& first_node = model.graph().node(0);
// Check that it is a call to Add
ASSERT_EQ(first_node.op_type(), "Add");
// The second node's call, to bar, must be inlined.
auto& second_node = model.graph().node(1);
// Check that it is a call to Add
ASSERT_EQ(second_node.op_type(), "Add");
ASSERT_EQ(model.functions_size(), 0);
}
TEST(FunctionInliner, SelectiveInlining) {
const char* code = R"ONNX(
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
temp = local.foo (X)
Y = local.bar (temp)
}
<opset_import: [ "" : 17], domain: "local">
foo (x) => (y) {
y = Add(x, x)
}
<opset_import: [ "" : 17, "local" : 1], domain: "local">
bar (x) => (y) {
y = local.foo(x)
}
)ONNX";
ModelProto model;
inliner::FunctionIdVector to_inline = {{"local", "foo"}};
auto to_inline_set = inliner::FunctionIdSet::Create(std::move(to_inline));
InlineFunctions(model, code, to_inline_set.get());
// The first node's call, to foo, must be inlined.
auto& first_node = model.graph().node(0);
// Check that it is a call to Add
ASSERT_EQ(first_node.op_type(), "Add");
// The second node's call, to bar, must not be inlined.
auto& second_node = model.graph().node(1);
// Check that it is a call to bar
ASSERT_EQ(second_node.op_type(), "bar");
// foo will be removed, bar will remain, in model.functions()
ASSERT_EQ(model.functions_size(), 1);
auto& bar_node = model.functions(0).node(0);
// Check that it is a call to Add, due to inlining
// the call to foo in bar.
ASSERT_EQ(bar_node.op_type(), "Add");
}
TEST(FunctionInliner, VersionConversion) {
const char* code = R"ONNX(
<ir_version: 8, opset_import: [ "" : 18, "local" : 1 ]>
agraph (float[N,M] X) => (float[N,M] Y)
{
Y = local.foo (X)
}
<opset_import: [ "" : 17], domain: "local">
foo (x) => (y) {
y = ReduceLogSum <axes = [0]> (x)
}
)ONNX";
ModelProto model;
InlineFunctions(model, code);
// Inlining ReduceLogSum (version 17) should convert it to ReduceLogSum (version 18)
// by promoting axes from attribute to input.
auto& node = model.graph().node(1);
ASSERT_EQ(node.op_type(), "ReduceLogSum");
ASSERT_EQ(node.input_size(), 2);
ASSERT_EQ(node.attribute_size(), 0);
}
TEST(FunctionInliner, NestedVersionConversion) {
const char* code = R"ONNX(
<ir_version: 8, opset_import: [ "" : 18, "local" : 1 ]>
agraph (float[N,M] X) => (float[N,M] Y)
{
Y = local.foo (X)
}
<opset_import: [ "" : 17, "local" : 1], domain: "local">
foo (x) => (y) {
t = ReduceLogSum <axes = [0]> (x)
y = local.bar (t)
}
<opset_import: [ "" : 17], domain: "local">
bar (x) => (y) {
y = ReduceLogSum <axes = [1]> (x)
}
)ONNX";
ModelProto model;
InlineFunctions(model, code);
// Inlining ReduceLogSum (version 17) should convert it to ReduceLogSum (version 18)
// by promoting axes from attribute to input, with a preceding Constant node for
// the axes value.
// Check that both ReduceLogSum nodes have been converted.
ASSERT_EQ(model.graph().node_size(), 4);
ASSERT_EQ(model.graph().node(0).op_type(), "Constant");
auto& node = model.graph().node(1);
ASSERT_EQ(node.op_type(), "ReduceLogSum");
ASSERT_EQ(node.input_size(), 2);
ASSERT_EQ(node.attribute_size(), 0);
ASSERT_EQ(model.graph().node(2).op_type(), "Constant");
auto node2 = model.graph().node(3);
ASSERT_EQ(node2.op_type(), "ReduceLogSum");
ASSERT_EQ(node2.input_size(), 2);
ASSERT_EQ(node2.attribute_size(), 0);
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,60 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/common/ir.h"
#include "onnx/common/ir_pb_converter.h"
#include "onnx/defs/printer.h"
namespace ONNX_NAMESPACE {
namespace Test {
static bool IsValidIdentifier(const std::string& name) {
if (name.empty()) {
return false;
}
if (!isalpha(name[0]) && name[0] != '_') {
return false;
}
for (size_t i = 1; i < name.size(); ++i) {
if (!isalnum(name[i]) && name[i] != '_') {
return false;
}
}
return true;
}
TEST(IR, ValidIdentifierTest) {
Graph* g = new Graph();
g->setName("test");
Value* x = g->addInput();
x->setUniqueName("x");
x->setElemType(ONNX_NAMESPACE::TensorProto_DataType_FLOAT);
x->setSizes({Dimension("M"), Dimension("N")});
Node* node1 = g->create(kNeg, 1);
node1->addInput(x);
g->appendNode(node1);
Value* temp1 = node1->outputs()[0];
Node* node2 = g->create(kNeg, 1);
node2->addInput(temp1);
g->appendNode(node2);
Value* y = node2->outputs()[0];
g->registerOutput(y);
ModelProto model;
ExportModelProto(&model, std::shared_ptr<Graph>(g));
for (auto& node : model.graph().node()) {
for (auto& name : node.output()) {
EXPECT_TRUE(IsValidIdentifier(name));
}
}
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,28 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/defs/schema.h"
namespace ONNX_NAMESPACE {
namespace Test {
TEST(OpRegistrationTest, GemmOp) {
auto opSchema = OpSchemaRegistry::Schema("Gemm");
EXPECT_TRUE(nullptr != opSchema);
size_t input_size = opSchema->inputs().size();
EXPECT_EQ(input_size, 3);
EXPECT_EQ(opSchema->inputs()[0].GetTypes(), opSchema->outputs()[0].GetTypes());
size_t attr_size = opSchema->attributes().size();
EXPECT_EQ(attr_size, 4);
EXPECT_NE(opSchema->attributes().count("alpha"), 0);
EXPECT_EQ(opSchema->attributes().at("alpha").type, AttributeProto_AttributeType_FLOAT);
EXPECT_NE(opSchema->attributes().count("beta"), 0);
EXPECT_EQ(opSchema->attributes().at("beta").type, AttributeProto_AttributeType_FLOAT);
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,667 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include "gtest/gtest.h"
#include "onnx/checker.h"
#include "onnx/defs/parser.h"
#include "onnx/defs/printer.h"
using namespace ONNX_NAMESPACE;
namespace ONNX_NAMESPACE {
namespace Test {
template <typename T>
static void Parse(T& parsedData, const char* input) {
OnnxParser parser(input);
auto status = parser.Parse(parsedData);
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
EXPECT_TRUE(parser.EndOfInput()) << "Extra unparsed input unexpected.";
// Extra checks for printer:
// Check we can convert data back to text form.
std::string text1 = ProtoToString(parsedData);
// Check that we can round-trip between the two representations.
// We cannot expect equality between text1 and input due to white-space and syntactic sugar,
// so, we convert it once more, and check for equality.
T temp;
status = OnnxParser::Parse(temp, text1.c_str());
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
std::string text2 = ProtoToString(temp);
EXPECT_EQ(text1, text2);
}
template <typename T>
static void ExpectParseFailure(T& parsedData, const char* input) {
auto status = OnnxParser::Parse(parsedData, input);
EXPECT_FALSE(status.IsOK());
}
static void CheckModel(const char* code) {
ModelProto model;
Parse(model, code);
checker::check_model(model);
}
TEST(ParserTest, EscapeStringLiteral) {
OnnxParser parser(R"(
"123\"56\\89"
)");
std::string s;
auto status = parser.ParserBase::Parse(s);
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
EXPECT_TRUE(parser.EndOfInput()) << "Extra unparsed input unexpected.";
EXPECT_EQ(s, std::string("123\"56\\89"));
}
TEST(ParserTest, TypeTest) {
TypeProto type;
// 1-dimensional tensor type with symbolic dimension:
Parse(type, "float[N]");
EXPECT_TRUE(type.has_tensor_type());
int float_type = static_cast<int>(TensorProto_DataType::TensorProto_DataType_FLOAT);
int int32_type = static_cast<int>(TensorProto_DataType::TensorProto_DataType_INT32);
EXPECT_EQ(type.tensor_type().elem_type(), float_type);
EXPECT_TRUE(type.tensor_type().has_shape());
EXPECT_EQ(type.tensor_type().shape().dim_size(), 1);
EXPECT_EQ(type.tensor_type().shape().dim(0).dim_param(), "N");
// scalar type:
Parse(type, "float");
EXPECT_TRUE(type.has_tensor_type());
EXPECT_EQ(type.tensor_type().elem_type(), float_type);
EXPECT_TRUE(type.tensor_type().has_shape());
EXPECT_EQ(type.tensor_type().shape().dim_size(), 0);
// tensor type with unknown rank:
Parse(type, "float[]");
EXPECT_TRUE(type.has_tensor_type());
EXPECT_EQ(type.tensor_type().elem_type(), float_type);
EXPECT_FALSE(type.tensor_type().has_shape());
// 3-dimensional tensor
Parse(type, "float[N,M,K]");
EXPECT_EQ(type.tensor_type().shape().dim_size(), 3);
// Unspecified dimension (neither symbolic nor constant)
Parse(type, "float[N,?,K]");
EXPECT_FALSE(type.tensor_type().shape().dim(1).has_dim_param());
EXPECT_FALSE(type.tensor_type().shape().dim(1).has_dim_value());
// sequence type:
Parse(type, "seq(float[])");
EXPECT_TRUE(type.has_sequence_type());
auto& elttype = type.sequence_type().elem_type();
EXPECT_TRUE(elttype.has_tensor_type());
EXPECT_EQ(elttype.tensor_type().elem_type(), float_type);
EXPECT_FALSE(elttype.tensor_type().has_shape());
// optional type:
Parse(type, "optional(float)");
EXPECT_TRUE(type.has_optional_type());
auto& optelttype = type.optional_type().elem_type();
EXPECT_TRUE(optelttype.has_tensor_type());
EXPECT_EQ(optelttype.tensor_type().elem_type(), float_type);
EXPECT_TRUE(optelttype.tensor_type().has_shape());
// optional type:
Parse(type, "sparse_tensor(float[1000])");
EXPECT_TRUE(type.has_sparse_tensor_type());
EXPECT_EQ(type.sparse_tensor_type().elem_type(), float_type);
EXPECT_EQ(type.sparse_tensor_type().shape().dim_size(), 1);
// map type:
Parse(type, "map(int32, float[N])");
EXPECT_TRUE(type.has_map_type());
EXPECT_EQ(type.map_type().key_type(), int32_type);
auto& valtype = type.map_type().value_type();
EXPECT_TRUE(valtype.has_tensor_type());
EXPECT_EQ(valtype.tensor_type().elem_type(), float_type);
EXPECT_EQ(valtype.tensor_type().shape().dim_size(), 1);
}
TEST(ParserTest, TensorProtoTest) {
TensorProto tensorProto;
// Concrete tensor-type with numeric dimensions expected:
ExpectParseFailure(tensorProto, "int32[] {1, 2, 3, 4, 5}");
// Symbolic dimensions are not allowed.
ExpectParseFailure(tensorProto, "int32[N] {1, 2, 3, 4, 5}");
Parse(tensorProto, "int32[5] {1, 2, 3, 4, 5}");
Parse(tensorProto, "int32[5] T {1, 2, 3, 4, 5}");
EXPECT_EQ(tensorProto.name(), "T");
Parse(tensorProto, "float[5] {1, 2.0, 3.1, 4, 5.5}");
Parse(tensorProto, "float[5] {1e1, 2.0e-1, 3.1E-1, 4E+1, 5.5e-10}");
Parse(tensorProto, "string[2] { \"Hello\", \"World\" }");
// String literals with escape character
Parse(tensorProto, R"(
string[2] { "Use a \"quoted\" word", "Use a backslash \\ like this." }
)");
}
TEST(ParserTest, AttributeTest) {
AttributeProto attr;
Parse(attr, "x = 2");
EXPECT_EQ(attr.name(), "x");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_INT);
EXPECT_EQ(attr.i(), 2);
Parse(attr, "x = 0.625");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT);
EXPECT_FLOAT_EQ(attr.f(), 0.625);
Parse(attr, "x = [2, 4, 6]");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_INTS);
EXPECT_EQ(attr.ints_size(), 3);
Parse(attr, "x = [0.125, 0.625]");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_FLOATS);
EXPECT_EQ(attr.floats_size(), 2);
Parse(attr, "x = float[3] {2.1, 4.1, 6.1}");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_TENSOR);
Parse(attr, "x = \"astring\"");
EXPECT_EQ(attr.name(), "x");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
EXPECT_EQ(attr.s(), "astring");
Parse(attr, "x = [\"abc\", \"def\"]");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_STRINGS);
Parse(attr, "x : ints = @xyz");
EXPECT_EQ(attr.ref_attr_name(), "xyz");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_INTS);
Parse(attr, "x : ints = []");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_INTS);
EXPECT_EQ(attr.ints_size(), 0);
Parse(attr, R"ONNX(
body = somegraph (float[N] y, float[N] z) => (float[N] w)
{
x = foo(y, z)
w = bar(x, y)
}
)ONNX");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_GRAPH);
EXPECT_EQ(attr.g().node_size(), 2);
Parse(attr, "type = float[3]");
EXPECT_EQ(attr.type(), AttributeProto_AttributeType::AttributeProto_AttributeType_TYPE_PROTO);
EXPECT_TRUE(attr.tp().has_tensor_type());
int float_type = static_cast<int>(TensorProto_DataType::TensorProto_DataType_FLOAT);
EXPECT_EQ(attr.tp().tensor_type().elem_type(), float_type);
}
TEST(ParserTest, AttrListTest) {
const char* code = R"ONNX(
<
x = 2,
w = 3
>
)ONNX";
AttrList attributes;
Parse(attributes, code);
EXPECT_EQ(attributes.size(), 2);
EXPECT_EQ(attributes.Get(0).name(), "x");
EXPECT_EQ(attributes.Get(1).name(), "w");
}
TEST(ParserTest, DomainOpCallTest) {
const char* code = "x = somedomain.foo(y, z)";
NodeProto n;
Parse(n, code);
}
TEST(ParserTest, NodeTest) {
const char* code = "x = foo(y, z)";
NodeProto n;
Parse(n, code);
EXPECT_EQ(n.input_size(), 2);
EXPECT_EQ(n.input(0), "y");
EXPECT_EQ(n.input(1), "z");
EXPECT_EQ(n.output_size(), 1);
EXPECT_EQ(n.output(0), "x");
EXPECT_EQ(n.op_type(), "foo");
NodeList nl;
Parse(nl, R"ONNX(
{
sub_result = Sub(limit, start)
sub_result_casted = Cast<to = 1>(sub_result)
delta_casted = Cast<to = 1>(delta)
div_result = Div(sub_result_casted, delta_casted)
ceil_result = Ceil(div_result)
ceil_result_relu = Relu(ceil_result)
ceil_result_relu_int = Cast<to = 7>(ceil_result_relu)
ceil_result_relu_bool = Cast<to = 9>(ceil_result_relu)
variadic_output, output = Loop (ceil_result_relu_int, ceil_result_relu_bool, start)
}
)ONNX");
}
TEST(ParserTest, QualifiedOpNameTest) {
const char* code = "x = com.example.foo(y, z)";
NodeProto n;
Parse(n, code);
EXPECT_EQ(n.domain(), "com.example");
EXPECT_EQ(n.op_type(), "foo");
}
TEST(ParserTest, NodeListTest) {
const char* code = R"ONNX(
{
x = foo(y, z)
w = bar(x, y)
}
)ONNX";
GraphProto graph;
Parse(*graph.mutable_node(), code);
EXPECT_EQ(graph.node_size(), 2);
EXPECT_EQ(graph.node(0).op_type(), "foo");
EXPECT_EQ(graph.node(1).op_type(), "bar");
}
TEST(ParserTest, NodeAttrTest1) {
const char* code = "x = foo <a = 100, b = 200.5, c = \"astring\"> (y, z)";
NodeProto n;
Parse(n, code);
EXPECT_EQ(n.attribute_size(), 3);
EXPECT_EQ(n.attribute(0).name(), "a");
EXPECT_EQ(n.attribute(1).name(), "b");
EXPECT_EQ(n.attribute(2).name(), "c");
}
TEST(ParserTest, NodeAttrTest2) {
const char* code = "x = foo <d = [5, 10], e = [0.55, 0.66], f = [\"str1\", \"str2\"]> (y, z)";
NodeProto n;
Parse(n, code);
EXPECT_EQ(n.attribute_size(), 3);
}
TEST(ParserTest, GraphTest) {
const char* code = R"ONNX(
agraph (float[N] y, float[N] z) => (float[N] w)
<float[2] w1 = {1.0, 2.0}, float[3] w2 = {4.0, 5.0, 6.0}, float[N] x>
{
# This is a comment.
x = foo(y, z, w1) # More comments.
w = bar(x, y, w2)
}
)ONNX";
GraphProto graph;
Parse(graph, code);
EXPECT_EQ(graph.name(), "agraph");
EXPECT_EQ(graph.input_size(), 2);
EXPECT_EQ(graph.output_size(), 1);
EXPECT_EQ(graph.node_size(), 2);
EXPECT_EQ(graph.initializer_size(), 2);
EXPECT_EQ(graph.value_info_size(), 1);
}
TEST(ParserTest, GraphPartialTypeTest) {
const char* code = R"ONNX(
agraph (float[N] y, z) => (float[N] w)
{
x = foo(y, z)
w = bar(x, y)
}
)ONNX";
GraphProto graph;
Parse(graph, code);
EXPECT_EQ(graph.name(), "agraph");
EXPECT_EQ(graph.input_size(), 2);
EXPECT_EQ(graph.output_size(), 1);
}
TEST(ParserTest, FunctionTest) {
const char* code = R"ONNX(
<
opset_import: [ "" : 10 ],
domain: "ai.onnx.ml",
doc_string: "A function test case."
>
f (y, z) => (w)
{
x = Add(y, z)
w = Mul(x, y)
}
)ONNX";
FunctionProto fp;
Parse(fp, code);
EXPECT_EQ(fp.name(), "f");
EXPECT_EQ(fp.input_size(), 2);
EXPECT_EQ(fp.output_size(), 1);
EXPECT_EQ(fp.node_size(), 2);
EXPECT_EQ(fp.attribute_size(), 0);
EXPECT_EQ(fp.opset_import_size(), 1);
}
TEST(ParserTest, FunctionValueInfoTest) {
const char* code = R"ONNX(
<
opset_import: [ "" : 10 ],
domain: "ai.onnx.ml",
doc_string: "A function test case."
>
f (float[N] y, float[N] z) => (float[N] w)
{
x = Add(y, z)
w = Mul(x, y)
}
)ONNX";
FunctionProto fp;
Parse(fp, code);
EXPECT_EQ(fp.input_size(), 2);
EXPECT_EQ(fp.output_size(), 1);
ASSERT_EQ(fp.value_info_size(), 3);
EXPECT_EQ(fp.value_info(0).name(), "y");
EXPECT_EQ(fp.value_info(1).name(), "z");
EXPECT_EQ(fp.value_info(2).name(), "w");
}
TEST(ParserTest, FunctionValueInfoTest2) {
const char* code = R"ONNX(
<
opset_import: [ "" : 10 ],
domain: "ai.onnx.ml",
doc_string: "A function test case."
>
f (float[N] y, float[N] z) => (float[N] w)
<float[N] x>
{
x = Add(y, z)
w = Mul(x, y)
}
)ONNX";
FunctionProto fp;
Parse(fp, code);
EXPECT_EQ(fp.input_size(), 2);
EXPECT_EQ(fp.value_info_size(), 4);
ASSERT_EQ(fp.output_size(), 1);
EXPECT_EQ(fp.value_info(0).name(), "y");
EXPECT_EQ(fp.value_info(1).name(), "z");
EXPECT_EQ(fp.value_info(2).name(), "w");
EXPECT_EQ(fp.value_info(3).name(), "x");
}
TEST(ParserTest, FunctionValueInfoTest3) {
const char* code = R"ONNX(
<
opset_import: [ "" : 10 ],
domain: "ai.onnx.ml",
doc_string: "A function test case."
>
f (float[N] y, z) => (float[N] w)
<float[N] x, float[N] t>
{
x = Add(y, z)
t = Add(x, x)
w = Mul(t, y)
}
)ONNX";
FunctionProto fp;
Parse(fp, code);
EXPECT_EQ(fp.input_size(), 2);
ASSERT_EQ(fp.value_info_size(), 4);
EXPECT_EQ(fp.output_size(), 1);
EXPECT_EQ(fp.value_info(0).name(), "y");
EXPECT_EQ(fp.value_info(1).name(), "w");
EXPECT_EQ(fp.value_info(2).name(), "x");
EXPECT_EQ(fp.value_info(3).name(), "t");
}
TEST(ParserTest, InitializerTest) {
const char* code = R"ONNX(
agraph (float y = {1.0}, float[N] z) => (float[N] w)
<float[2] w1 = {1.0, 2.0}, float[3] w2 = {4.0, 5.0, 6.0}, float[N] x>
{
x = foo(y, z, w1)
w = bar(x, y, w2)
}
)ONNX";
GraphProto graph;
Parse(graph, code);
EXPECT_EQ(graph.input_size(), 2);
EXPECT_EQ(graph.output_size(), 1);
EXPECT_EQ(graph.initializer_size(), 3); // y, w1, w2
EXPECT_EQ(graph.value_info_size(), 1); // x
}
TEST(ParserTest, IfNodeTest) {
const char* code = R"ONNX(
z = If (b) <
then_branch = g1 () => (float[N] z_then)
{
z_then = foo(y)
},
else_branch = g2 () => (float[N] z_else)
{
z_else = bar(x)
}
>
)ONNX";
NodeProto node;
Parse(node, code);
EXPECT_EQ(node.input_size(), 1);
EXPECT_EQ(node.output_size(), 1);
EXPECT_EQ(node.attribute_size(), 2);
}
TEST(ParserTest, ModelTest) {
const char* code = R"ONNX(
<
ir_version: 7,
opset_import: [ "ai.onnx.ml" : 10 ],
producer_name: "ParserTest",
producer_version: "1.0",
domain: "ai.onnx.ml",
model_version: 1,
doc_string: "A parser test case model.",
metadata_props: [ "somekey" : "somevalue", "key2" : "value2" ]
>
agraph (float[N] y, float[N] z) => (float[N] w)
{
x = foo(y, z)
w = bar(x, y)
}
)ONNX";
ModelProto model;
Parse(model, code);
EXPECT_EQ(model.graph().input_size(), 2);
EXPECT_EQ(model.graph().output_size(), 1);
EXPECT_EQ(model.graph().node_size(), 2);
}
TEST(ParserTest, ModelCheckTest) {
const char* code = R"ONNX(
<
ir_version: 7,
opset_import: [ "" : 10 ]
>
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
)ONNX";
CheckModel(code);
}
TEST(ParserTest, IfModelTest) {
const char* code = R"ONNX(
<
ir_version: 7,
opset_import: [ "" : 13 ]
>
iftest (bool b, float[128] X, float[128] Y) => (float[128] Z)
{
Z = If (b) <
then_branch = g1 () => (float[128] z_then) { z_then = Identity(X) },
else_branch = g2 () => (float[128] z_else) { z_else = Identity(Y) }
>
}
)ONNX";
CheckModel(code);
}
TEST(ParserTest, FunModelTest) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 10, "local" : 1 ]
>
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = local.foo (X, W, B)
C = local.square(T)
}
<
opset_import: [ "" : 10 ],
domain: "local",
doc_string: "Function foo."
>
foo (x, w, b) => (c) {
T = MatMul(x, w)
S = Add(T, b)
c = Softmax(S)
}
<
opset_import: [ "" : 10 ],
domain: "local",
doc_string: "Function square."
>
square (x) => (y) {
y = Mul (x, x)
}
)ONNX";
CheckModel(code);
const char* code_function_with_attributes = R"ONNX(
<
ir_version: 9,
opset_import: [ "" : 15, "custom_domain" : 1]
>
agraph (float[N] x) => (float[N] out)
{
out = custom_domain.foo<alpha=2.0, gamma=3.0>(x)
}
<
domain: "custom_domain",
opset_import: [ "" : 15],
doc_string: "function foo"
>
foo
<alpha: float=4.0, gamma>
(X) => (C)
{
constant_alpha = Constant<value_float: float=@alpha>()
constant_gamma = Constant<value_float: float=@gamma>()
constant_alpha_x = Mul(constant_alpha, X)
C = Add(constant_alpha_x, constant_gamma)
}
)ONNX";
CheckModel(code_function_with_attributes);
}
TEST(ParserTest, TypesModelTest1) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 18 ]
>
agraph (seq(float[N]) seqX) => (float[M, N] X)
{
X = ConcatFromSequence < axis = 0, new_axis = 1 >(seqX)
}
)ONNX";
CheckModel(code);
}
TEST(ParserTest, TypesModelTest2) {
const char* code = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 18 ]
>
agraph (float[N] tensorX, seq(float[N]) seqX, map(int32, float[N]) mapX, optional(float[N]) optionalX, sparse_tensor(float[N]) sparseX) => (float[N] X)
{
X = Identity (tensorX)
}
)ONNX";
CheckModel(code);
}
TEST(ParserTest, ExternalDataTest) {
const char* code = R"ONNX(
agraph (float y = {1.0}, float[N] z) => (w) <
float[3, 2] m1 = ["location": "weight_1.bin", "offset": "17"],
float[2, 1] m2 = {1.0, 2.0}
>
{
x = Add(y, z)
m = Mul(m1, m1)
}
)ONNX";
GraphProto graph;
Parse(graph, code);
EXPECT_EQ(graph.input_size(), 2);
EXPECT_EQ(graph.output_size(), 1);
EXPECT_EQ(graph.initializer_size(), 3); // m1, m2
EXPECT_EQ(graph.value_info_size(), 0); // x
EXPECT_EQ(graph.initializer().Get(1).data_location(), TensorProto_DataLocation::TensorProto_DataLocation_EXTERNAL);
EXPECT_EQ(graph.initializer().Get(1).external_data().Get(0).key(), "location");
EXPECT_EQ(graph.initializer().Get(1).external_data().Get(0).value(), "weight_1.bin");
EXPECT_EQ(graph.initializer().Get(1).external_data().Get(1).key(), "offset");
EXPECT_EQ(graph.initializer().Get(1).external_data().Get(1).value(), "17");
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,275 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/defs/operator_sets.h"
#include "onnx/defs/schema.h"
using namespace ONNX_NAMESPACE;
namespace ONNX_NAMESPACE {
namespace Test {
TEST(SchemaRegistrationTest, DisabledOnnxStaticRegistrationAPICall) {
#ifdef __ONNX_DISABLE_STATIC_REGISTRATION
EXPECT_TRUE(IsOnnxStaticRegistrationDisabled());
#else
EXPECT_FALSE(IsOnnxStaticRegistrationDisabled());
#endif
}
// Schema of all versions are registered by default
// Further schema manipulation expects to be error-free
TEST(SchemaRegistrationTest, RegisterAllByDefaultAndManipulateSchema) {
#ifndef __ONNX_DISABLE_STATIC_REGISTRATION
// Expects all opset registered by default
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 0);
// Should find schema for all versions
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 1));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 6));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 7));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 13));
// Clear all opset schema registration
DeregisterOnnxOperatorSetSchema();
// Should not find any opset
EXPECT_EQ(nullptr, OpSchemaRegistry::Schema("Add"));
// Register all opset versions
RegisterOnnxOperatorSetSchema();
// Should find all opset
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add"));
#endif
}
// By default ONNX registers all opset versions and selective schema loading cannot be tested
// So these tests are run only when static registration is disabled
TEST(SchemaRegistrationTest, RegisterAndDeregisterAllOpsetSchemaVersion) {
#ifdef __ONNX_DISABLE_STATIC_REGISTRATION
// Clear all opset schema registration
DeregisterOnnxOperatorSetSchema();
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == -1);
// Should not find schema for any op
EXPECT_EQ(nullptr, OpSchemaRegistry::Schema("Acos"));
EXPECT_EQ(nullptr, OpSchemaRegistry::Schema("Add"));
EXPECT_EQ(nullptr, OpSchemaRegistry::Schema("Trilu"));
// Register all opset versions
RegisterOnnxOperatorSetSchema(0);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 0);
// Should find schema for all ops. Available versions are:
// Acos-7
// Add-1,6,7,13,14
// Trilu-14
auto schema = OpSchemaRegistry::Schema("Acos", 7);
EXPECT_NE(nullptr, schema);
EXPECT_EQ(schema->SinceVersion(), 7);
schema = OpSchemaRegistry::Schema("Add", 14);
EXPECT_NE(nullptr, schema);
EXPECT_EQ(schema->SinceVersion(), 14);
schema = OpSchemaRegistry::Schema("Trilu");
EXPECT_NE(nullptr, schema);
EXPECT_EQ(schema->SinceVersion(), 14);
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 1));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 6));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 7));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 13));
// Clear all opset schema registration
DeregisterOnnxOperatorSetSchema();
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == -1);
// Should not find schema for any op
EXPECT_EQ(nullptr, OpSchemaRegistry::Schema("Acos"));
EXPECT_EQ(nullptr, OpSchemaRegistry::Schema("Add"));
EXPECT_EQ(nullptr, OpSchemaRegistry::Schema("Trilu"));
#endif
}
TEST(SchemaRegistrationTest, RegisterSpecifiedOpsetSchemaVersion) {
#ifdef __ONNX_DISABLE_STATIC_REGISTRATION
DeregisterOnnxOperatorSetSchema();
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == -1);
RegisterOnnxOperatorSetSchema(13);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 13);
auto opSchema = OpSchemaRegistry::Schema("Add");
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 13);
// Should not find opset 12
opSchema = OpSchemaRegistry::Schema("Add", 12);
EXPECT_EQ(nullptr, opSchema);
// Should not find opset 14
opSchema = OpSchemaRegistry::Schema("Trilu");
EXPECT_EQ(nullptr, opSchema);
// Acos-7 is the latest Acos before specified 13
opSchema = OpSchemaRegistry::Schema("Acos", 13);
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 7);
#endif
}
// Regsiter opset-11, then opset-14
// Expects Reg(11, 14) == Reg(11) U Reg(14)
TEST(SchemaRegistrationTest, RegisterMultipleOpsetSchemaVersions_UpgradeVersion) {
#ifdef __ONNX_DISABLE_STATIC_REGISTRATION
DeregisterOnnxOperatorSetSchema();
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == -1);
// Register opset 11
RegisterOnnxOperatorSetSchema(11);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 11);
// Register opset 14
// Do not fail on duplicate schema registration request
RegisterOnnxOperatorSetSchema(14, false);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 14);
// Acos-7 is the latest before/at opset 11 and 14
auto opSchema = OpSchemaRegistry::Schema("Acos");
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 7);
// Add-7 is the latest before/at opset 11
// Add-14 is the latest before/at opset 14
// Should find both Add-7,14
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 7));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 14));
// Should find the max version 14
opSchema = OpSchemaRegistry::Schema("Add");
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 14);
// Should find Add-7 as the max version <=13
opSchema = OpSchemaRegistry::Schema("Add", 13);
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 7);
// Should find opset 14
opSchema = OpSchemaRegistry::Schema("Trilu");
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 14);
#endif
}
// Regsiter opset-14, then opset-11
// Expects Reg(14, 11) == Reg(11) U Reg(14)
TEST(SchemaRegistrationTest, RegisterMultipleOpsetSchemaVersions_DowngradeVersion) {
#ifdef __ONNX_DISABLE_STATIC_REGISTRATION
DeregisterOnnxOperatorSetSchema();
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == -1);
// Register opset 14
RegisterOnnxOperatorSetSchema(14);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 14);
// Register opset 11
// Do not fail on duplicate schema registration request
RegisterOnnxOperatorSetSchema(11, false);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 11);
// Acos-7 is the latest before/at opset 11 and 14
auto opSchema = OpSchemaRegistry::Schema("Acos");
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 7);
// Add-7 is the latest before/at opset 11
// Add-14 is the latest before/at opset 14
// Should find both Add-7,14
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 7));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 14));
// Should find the max version 14
opSchema = OpSchemaRegistry::Schema("Add");
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 14);
// Should find Add-7 as the max version <=13
opSchema = OpSchemaRegistry::Schema("Add", 13);
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 7);
// Should find opset 14
opSchema = OpSchemaRegistry::Schema("Trilu");
EXPECT_NE(nullptr, opSchema);
EXPECT_EQ(opSchema->SinceVersion(), 14);
#endif
}
// Register opset-11, then all versions
// Expects no error
TEST(SchemaRegistrationTest, RegisterSpecificThenAllVersion) {
#ifdef __ONNX_DISABLE_STATIC_REGISTRATION
DeregisterOnnxOperatorSetSchema();
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == -1);
// Register opset 11
RegisterOnnxOperatorSetSchema(11);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 11);
// Register all opset versions
// Do not fail on duplicate schema registration request
RegisterOnnxOperatorSetSchema(0, false);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 0);
// Should find schema for all ops
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Acos"));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add"));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Trilu"));
// Should find schema for all versions
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 1));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 6));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 7));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 13));
#endif
}
// Register all versions, then opset 11
// Expects no error
TEST(SchemaRegistrationTest, RegisterAllThenSpecificVersion) {
#ifdef __ONNX_DISABLE_STATIC_REGISTRATION
DeregisterOnnxOperatorSetSchema();
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == -1);
// Register all opset versions
RegisterOnnxOperatorSetSchema(0);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 0);
// Register opset 11
// Do not fail on duplicate schema registration request
RegisterOnnxOperatorSetSchema(11, false);
EXPECT_TRUE(OpSchemaRegistry::Instance()->GetLoadedSchemaVersion() == 11);
// Should find schema for all ops
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Acos"));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add"));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Trilu"));
// Should find schema for all versions
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 1));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 6));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 7));
EXPECT_NE(nullptr, OpSchemaRegistry::Schema("Add", 13));
#endif
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,660 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
#include "onnx/defs/parser.h"
#include "onnx/defs/schema.h"
#include "onnx/defs/shape_inference.h"
#include "onnx/onnx_pb.h"
#include "onnx/shape_inference/implementation.h"
using namespace ONNX_NAMESPACE::shape_inference;
namespace ONNX_NAMESPACE {
// onnx/defs/controlflow/old.cc
void ScanInferenceFunctionOpset8(InferenceContext& ctx);
// onnx/defs/controlflow/defs.cc
void ScanInferenceFunction(InferenceContext& ctx);
namespace Test {
template <class Type>
void CreateDims(Type& proto, int num_dims) {
auto mutable_shape = proto.mutable_shape();
mutable_shape->clear_dim();
for (int i = 0; i < num_dims; ++i)
mutable_shape->add_dim();
}
template <class Type>
void SetDimValues(Type& proto, const std::vector<int>& values) {
auto* mutable_shape = proto.mutable_shape();
EXPECT_TRUE(mutable_shape->dim_size() == values.size());
int idx = 0;
for (auto value : values) {
auto mutable_dim = mutable_shape->mutable_dim(idx++);
if (value != -1)
mutable_dim->set_dim_value(value);
}
}
template <class Type>
void SetDimParams(Type& proto, const std::vector<const std::string*>& values) {
auto mutable_shape = proto.mutable_shape();
EXPECT_TRUE(mutable_shape->dim_size() == values.size());
int idx = 0;
for (auto value : values) {
auto mutable_dim = mutable_shape->mutable_dim(idx++);
if (value)
mutable_dim->set_dim_param(*value);
}
}
template <class Type>
void Dump(const Type& t) {
auto& s_shape = t.shape();
auto num_dims = s_shape.dim_size();
std::cout << num_dims << " dims. ";
for (int i = 0; i < num_dims; ++i) {
auto x = s_shape.dim(0);
auto y = x.has_dim_value();
auto z = x.has_dim_param();
std::cout << "Dim " << i << " Value:" << (y ? ONNX_NAMESPACE::to_string(x.dim_value()) : "<unset>")
<< ", Param:" << (z ? x.dim_param() : "<unset>") << "\n";
}
};
TEST(ShapeInferenceTest, mergeShapeInfo_HasShape) {
// source has shape, target doesn't
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(source, 1);
SetDimValues(source, {1});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim_size() == 1 && shape.dim(0).dim_value() == 1);
}
// source has no shape, target does
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(target, 1);
SetDimValues(target, {1});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim_size() == 1 && shape.dim(0).dim_value() == 1);
}
// source has shape, target doesn't
{
TypeProto_SparseTensor source;
TypeProto_SparseTensor target;
CreateDims(source, 1);
SetDimValues(source, {1});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim_size() == 1 && shape.dim(0).dim_value() == 1);
}
// source has no shape, target does
{
TypeProto_SparseTensor source;
TypeProto_SparseTensor target;
CreateDims(target, 1);
SetDimValues(target, {1});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim_size() == 1 && shape.dim(0).dim_value() == 1);
}
}
TEST(ShapeInferenceTest, mergeShapeInfo_PreferValueOverParam) {
std::string param = "A";
// source has value, target has param. prefer value
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(source, 1);
SetDimValues(source, {1});
CreateDims(target, 1);
SetDimParams(target, {&param});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim_size() == 1 && shape.dim(0).dim_value() == 1);
}
// source has param, target has value.
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(source, 1);
SetDimParams(source, {&param});
CreateDims(target, 1);
SetDimValues(target, {1});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim_size() == 1 && shape.dim(0).dim_value() == 1);
}
}
TEST(ShapeInferenceTest, mergeShapeInfo_CombineShapes) {
// merge from both sides, preferring real value over -1
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(source, 2);
SetDimValues(source, {-1, 2});
CreateDims(target, 2);
SetDimValues(target, {1, -1});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim(0).dim_value() == 1 && shape.dim(1).dim_value() == 2);
}
{
TypeProto_SparseTensor source;
TypeProto_SparseTensor target;
CreateDims(source, 2);
SetDimValues(source, {-1, 2});
CreateDims(target, 2);
SetDimValues(target, {1, -1});
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim(0).dim_value() == 1 && shape.dim(1).dim_value() == 2);
}
// prefer value over param,
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(source, 2);
SetDimValues(source, {-1, 2});
CreateDims(target, 2);
SetDimValues(target, {1, 0});
// replace second dim with a param. the value from the source should be
// preferred
const std::string param = "A";
target.mutable_shape()->mutable_dim(1)->set_dim_param(param);
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim(0).dim_value() == 1 && shape.dim(1).dim_value() == 2);
}
{
TypeProto_SparseTensor source;
TypeProto_SparseTensor target;
CreateDims(source, 2);
SetDimValues(source, {-1, 2});
CreateDims(target, 2);
SetDimValues(target, {1, 0});
// replace second dim with a param. the value from the source should be
// preferred
const std::string param = "A";
target.mutable_shape()->mutable_dim(1)->set_dim_param(param);
mergeInShapeInfo(source, target);
Dump(target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim(0).dim_value() == 1 && shape.dim(1).dim_value() == 2);
}
}
TEST(ShapeInferenceTest, mergeShapeInfo_Mismatches) {
#ifndef ONNX_NO_EXCEPTIONS
// mismatched num dims
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(source, 2);
SetDimValues(source, {-1, 2});
CreateDims(target, 3);
SetDimValues(target, {1, -1, 1});
EXPECT_THROW(mergeInShapeInfo(source, target), ONNX_NAMESPACE::InferenceError);
}
{
TypeProto_SparseTensor source;
TypeProto_SparseTensor target;
CreateDims(source, 2);
SetDimValues(source, {-1, 2});
CreateDims(target, 3);
SetDimValues(target, {1, -1, 1});
EXPECT_THROW(mergeInShapeInfo(source, target), ONNX_NAMESPACE::InferenceError);
}
// mismatched dim values
{
TypeProto_Tensor source;
TypeProto_Tensor target;
CreateDims(source, 2);
SetDimValues(source, {2, 2});
CreateDims(target, 2);
SetDimValues(target, {2, 1});
EXPECT_THROW(mergeInShapeInfo(source, target), ONNX_NAMESPACE::InferenceError);
}
{
TypeProto_SparseTensor source;
TypeProto_SparseTensor target;
CreateDims(source, 2);
SetDimValues(source, {2, 2});
CreateDims(target, 2);
SetDimValues(target, {2, 1});
EXPECT_THROW(mergeInShapeInfo(source, target), ONNX_NAMESPACE::InferenceError);
}
#endif
// mismatched param value. prefer target
{
TypeProto_Tensor source;
TypeProto_Tensor target;
const std::string param_a = "A";
const std::string param_b = "B";
CreateDims(source, 1);
SetDimParams(source, {&param_a});
CreateDims(target, 1);
SetDimParams(target, {&param_b});
mergeInShapeInfo(source, target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim(0).dim_param() == "B");
}
{
TypeProto_SparseTensor source;
TypeProto_SparseTensor target;
const std::string param_a = "A";
const std::string param_b = "B";
CreateDims(source, 1);
SetDimParams(source, {&param_a});
CreateDims(target, 1);
SetDimParams(target, {&param_b});
mergeInShapeInfo(source, target);
auto& shape = target.shape();
EXPECT_TRUE(shape.dim(0).dim_param() == "B");
}
}
// Check subgraph inferencing via GraphInferencer using a Scan
static void doInferencingTest(bool use_scan_opset8) {
auto* schemaRegistry = OpSchemaRegistry::Instance();
GraphProto subgraph;
// simple tensor without shape info
TypeProto simple_tensor_no_shape;
auto* tensor_type = simple_tensor_no_shape.mutable_tensor_type();
tensor_type->set_elem_type(TensorProto_DataType_FLOAT);
// simple tensor with shape info
TypeProto simple_tensor = simple_tensor_no_shape;
simple_tensor.mutable_tensor_type()->mutable_shape()->add_dim()->set_dim_value(2);
// setup simple graph that can be used with Scan containing two Identity
// nodes. one for the loop state variable. one for the scan output.
{
NodeProto loop_state_identity;
loop_state_identity.set_name("loop_state_identity");
loop_state_identity.set_domain(ONNX_DOMAIN);
loop_state_identity.set_op_type("Identity");
loop_state_identity.set_doc_string("loop state identity");
loop_state_identity.add_input("loop_state_in");
loop_state_identity.add_output("loop_state_out");
*subgraph.add_node() = loop_state_identity;
NodeProto scan_in_out_identity;
scan_in_out_identity.set_name("scan_in_out_identity");
scan_in_out_identity.set_domain(ONNX_DOMAIN);
scan_in_out_identity.set_op_type("Identity");
scan_in_out_identity.set_doc_string("scan identity");
scan_in_out_identity.add_input("scan_in");
scan_in_out_identity.add_output("scan_out");
*subgraph.add_node() = scan_in_out_identity;
ValueInfoProto loop_state_in;
loop_state_in.set_name("loop_state_in");
*loop_state_in.mutable_type() = simple_tensor;
*subgraph.add_input() = loop_state_in;
ValueInfoProto scan_in;
scan_in.set_name("scan_in");
*scan_in.mutable_type() = simple_tensor;
*subgraph.add_input() = scan_in;
ValueInfoProto loop_state_out = loop_state_in;
loop_state_out.set_name("loop_state_out");
*loop_state_out.mutable_type() = simple_tensor_no_shape;
*subgraph.add_output() = loop_state_out;
ValueInfoProto scan_state_out = scan_in;
scan_state_out.set_name("scan_out");
*scan_state_out.mutable_type() = simple_tensor_no_shape;
*subgraph.add_output() = scan_state_out;
}
std::unordered_map<std::string, int> opset_imports;
opset_imports[ONNX_DOMAIN] = 8; // Scan is v8
const std::unordered_map<std::string, TypeProto*> outer_scope_value_types;
SymbolTableImpl symbolTable;
symbolTable.addFromGraph(subgraph);
GraphInferenceContext graphInfCtx(outer_scope_value_types, opset_imports, &symbolTable);
GraphInferencerImpl graphInferencer(subgraph, graphInfCtx);
// loop_state_in and scan_in are the two inputs.
// order in subgraphInputTypes matches their order as graph inputs.
std::vector<const TypeProto*> subgraphInputTypes = {&simple_tensor, &simple_tensor};
std::vector<const TensorProto*> subgraphInputData = {};
ShapeInferenceOptions options{false, 0, false};
auto output = graphInferencer.doInferencing(subgraphInputTypes, subgraphInputData);
// check the subgraph outputs had their shape inferred when we called
// doInferencing directly
EXPECT_TRUE(output.size() == 2);
auto checkType = [](const TypeProto& type, const TypeProto_Tensor& expect) {
auto checkDims = [](const TensorShapeProto& l, const TensorShapeProto& r) {
EXPECT_TRUE(l.dim_size() == r.dim_size());
for (int i = 0, end = l.dim_size(); i < end; ++i) {
// if (l.dim().Get(i).dim_value() != r.dim().Get(i).dim_value())
// break;
EXPECT_TRUE(l.dim().Get(i).dim_value() == r.dim().Get(i).dim_value());
}
};
EXPECT_TRUE(type.has_tensor_type());
EXPECT_TRUE(type.tensor_type().elem_type() == expect.elem_type());
checkDims(type.tensor_type().shape(), expect.shape());
};
checkType(*output[0], simple_tensor.tensor_type());
checkType(*output[1], simple_tensor.tensor_type());
// setup Scan node to test subgraph inferencing works as expected when called
// from the operators type/shape inferencing function
NodeProto scan;
{
AttributeProto num_scan_inputs;
num_scan_inputs.set_name("num_scan_inputs");
num_scan_inputs.set_i(1);
AttributeProto body;
body.set_name("body");
*body.mutable_g() = subgraph;
*scan.add_attribute() = num_scan_inputs;
*scan.add_attribute() = body;
scan.set_name("Scan");
scan.set_domain(ONNX_DOMAIN);
scan.set_doc_string("Scan node");
scan.set_op_type("Scan");
if (use_scan_opset8)
scan.add_input(""); // optional sequence lens
scan.add_input("loop_state_start");
scan.add_input("scan_op_in");
scan.add_output("loop_state_final");
scan.add_output("scan_op_out");
}
TypeProto loop_state_in_tensor = simple_tensor_no_shape;
auto* shape = loop_state_in_tensor.mutable_tensor_type()->mutable_shape();
if (use_scan_opset8)
shape->add_dim()->set_dim_value(1); // batch size
shape->add_dim()->set_dim_value(2); // input size. must match subgraph
TypeProto loop_state_out_tensor = loop_state_in_tensor; // should be unchanged
TypeProto scan_in_tensor = simple_tensor_no_shape;
shape = scan_in_tensor.mutable_tensor_type()->mutable_shape();
if (use_scan_opset8)
shape->add_dim()->set_dim_value(1); // batch size
shape->add_dim()->set_dim_value(1); // sequence length
shape->add_dim()->set_dim_value(2); // input size. must match subgraph
TypeProto scan_out_tensor = scan_in_tensor; // should be unchanged
std::unordered_map<std::string, TypeProto*> valueTypesByName;
valueTypesByName["loop_state_start"] = &loop_state_in_tensor;
valueTypesByName["scan_op_in"] = &scan_in_tensor;
InferenceContextImpl ctx(scan, valueTypesByName, {}, {}, options, {}, &graphInfCtx);
if (use_scan_opset8)
ScanInferenceFunctionOpset8(ctx);
else
ScanInferenceFunction(ctx);
EXPECT_TRUE(ctx.getNumOutputs() == 2);
checkType(*ctx.getOutputType(0), loop_state_out_tensor.tensor_type());
checkType(*ctx.getOutputType(1), scan_out_tensor.tensor_type());
}
// Check subgraph inferencing via GraphInferencer using a Scan (from opset 8)
TEST(GraphInferencerImplTest, Scan8_BasicTest) {
doInferencingTest(true);
}
// Check subgraph inferencing via GraphInferencer using a Scan (from opset 9)
TEST(GraphInferencerImplTest, Scan9_BasicTest) {
doInferencingTest(false);
}
void ParseAndInfer(ModelProto& model, const char* modelStr) {
OnnxParser parser(modelStr);
auto status = parser.Parse(model);
EXPECT_TRUE(status.IsOK()) << status.ErrorMessage();
EXPECT_TRUE(parser.EndOfInput()) << "Extra unparsed input unexpected.";
ShapeInferenceOptions options{true, 1, true};
ONNX_NAMESPACE::shape_inference::InferShapes(model, ONNX_NAMESPACE::OpSchemaRegistry::Instance(), options);
}
void RunReshapeShapeInfTest(const char* modelStr, TensorShapeProto& expectedShape) {
ModelProto model;
ParseAndInfer(model, modelStr);
const auto inferredShape = model.graph().output(0).type().tensor_type().shape();
EXPECT_TRUE(inferredShape.dim_size() == expectedShape.dim_size());
for (int i = 0; i < inferredShape.dim_size(); i++) {
EXPECT_TRUE(
(inferredShape.dim(i).has_dim_value() && expectedShape.dim(i).has_dim_value()) ||
(inferredShape.dim(i).has_dim_param() && expectedShape.dim(i).has_dim_param()));
EXPECT_TRUE(
inferredShape.dim(i).has_dim_value() ? inferredShape.dim(i).dim_value() == expectedShape.dim(i).dim_value()
: inferredShape.dim(i).dim_param() == expectedShape.dim(i).dim_param());
}
}
TEST(ShapeInferenceTest, ReshapeTestWithShapeAsSymInput) {
const char* modelStr = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 15],
producer_name: "DataPropagationTest",
producer_version: "1.0",
model_version: 1,
doc_string: "A test model for data propagation."
>
agraph (float[batch_size, 256, 768, 3] x, float[batch_size, 196608] m) => (float[?, ?, ?] z)
{
y = Shape<start = 0, end = 3>(x)
z = Reshape(m, y)
}
)ONNX";
TensorShapeProto expectedShape;
expectedShape.mutable_dim()->Add()->set_dim_param("batch_size");
expectedShape.mutable_dim()->Add()->set_dim_value(256);
expectedShape.mutable_dim()->Add()->set_dim_value(768);
RunReshapeShapeInfTest(modelStr, expectedShape);
}
TEST(ShapeInferenceTest, ReshapeTestWithShapeAsInitializer) {
const char* modelStr = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 15],
producer_name: "DataPropagationTest",
producer_version: "1.0",
model_version: 1,
doc_string: "A test model for data propagation."
>
agraph (float[1, 196608] m) => (float[?, ?, ?] z)
<int64[3] shape = {1, 768, 256}>
{
z = Reshape(m, shape)
}
)ONNX";
TensorShapeProto expectedShape;
expectedShape.mutable_dim()->Add()->set_dim_value(1);
expectedShape.mutable_dim()->Add()->set_dim_value(768);
expectedShape.mutable_dim()->Add()->set_dim_value(256);
RunReshapeShapeInfTest(modelStr, expectedShape);
}
TEST(ShapeInferenceTest, ReshapeTestWithShapeAsInitializer1) {
const char* modelStr = R"ONNX(
<
ir_version: 8,
opset_import: [ "" : 15],
producer_name: "DataPropagationTest",
producer_version: "1.0",
model_version: 1,
doc_string: "A test model for data propagation."
>
agraph (float[1, 196608] m) => (float[?, ?, ?] z)
<int64[3] shape = {1, -1, 256}>
{
z = Reshape(m, shape)
}
)ONNX";
TensorShapeProto expectedShape;
expectedShape.mutable_dim()->Add()->set_dim_value(1);
expectedShape.mutable_dim()->Add()->set_dim_value(768);
expectedShape.mutable_dim()->Add()->set_dim_value(256);
RunReshapeShapeInfTest(modelStr, expectedShape);
}
TEST(ShapeInferenceTest, CheckShapesAndTypesTest) {
#ifndef ONNX_NO_EXCEPTIONS
// Tensor element types mis-match should cause an exception.
TypeProto tensor_infer;
auto* tensor_infer_type = tensor_infer.mutable_tensor_type();
tensor_infer_type->set_elem_type(TensorProto_DataType_FLOAT);
TypeProto tensor_exist;
auto* tensor_exist_type = tensor_exist.mutable_tensor_type();
tensor_exist_type->set_elem_type(TensorProto_DataType_UINT8);
EXPECT_THROW(checkShapesAndTypes(tensor_infer, tensor_exist), ONNX_NAMESPACE::InferenceError);
#endif
}
TEST(ShapeInferenceTest, CustomOpTest) {
const char* modelStr = R"ONNX(
<ir_version: 8, opset_import: ["" : 15, "custom.domain" : 1]>
agraph (float[256, 768, 3] x) => (z1, z2)
{
z1 = custom.domain.CustomOp (x)
# Inference cannot determine the type/shape of z1
z2 = Abs(x)
# Inference SHOULD determine the type/shape of z2 (same as that of x)
}
)ONNX";
ModelProto model;
ParseAndInfer(model, modelStr);
auto& z1_value_info = model.graph().output(0);
// Check no inferred type for z1 (It's a quirk of the implementation that it
// has a dummy TypeProto, but it should have no values filled in.)
ASSERT_TRUE(z1_value_info.has_type());
ASSERT_FALSE(z1_value_info.type().has_tensor_type());
// Check inferred type for z2:
auto& z2_value_info = model.graph().output(1);
ASSERT_TRUE(z2_value_info.has_type());
ASSERT_TRUE(z2_value_info.type().has_tensor_type());
EXPECT_EQ(z2_value_info.type().tensor_type().elem_type(), TensorProto_DataType_FLOAT);
EXPECT_EQ(z2_value_info.type().tensor_type().shape().dim_size(), 3);
EXPECT_EQ(z2_value_info.type().tensor_type().shape().dim(0).dim_value(), 256);
EXPECT_EQ(z2_value_info.type().tensor_type().shape().dim(1).dim_value(), 768);
EXPECT_EQ(z2_value_info.type().tensor_type().shape().dim(2).dim_value(), 3);
}
} // namespace Test
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,15 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#include <iostream>
#include "gtest/gtest.h"
GTEST_API_ int main(int argc, char** argv) {
std::cout << "Running main() from test_main.cc" << std::endl;
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,137 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
# TODO: remove the following ignore after mypy upgrade in ONNX
from shape_inference_test import TestShapeInferenceHelper
import onnx.parser
from onnx import TensorProto
from onnx.helper import make_node, make_tensor, make_tensor_value_info
class TestDataPropagation(TestShapeInferenceHelper):
def test_expand_symbolic_input(self) -> None:
graph = self._make_graph(
[("x", TensorProto.INT32, (3, 1, 2)), ("y", TensorProto.INT32, (1, 4, 2))],
[
make_node("Shape", ["y"], ["shape"]),
make_node("Expand", ["x", "shape"], ["z"]),
],
[],
)
self._assert_inferred(
graph,
[
make_tensor_value_info("shape", TensorProto.INT64, (3,)),
make_tensor_value_info("z", TensorProto.INT32, (3, 4, 2)),
],
data_prop=True,
)
def test_constantofshape_with_symbolic_shape(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (3, 4, 5))],
[
make_node("Shape", ["x"], ["shape"]),
make_node(
"ConstantOfShape",
["shape"],
["y"],
value=make_tensor("value", TensorProto.INT32, (1,), (2,)),
),
],
[],
)
self._assert_inferred(
graph,
[
make_tensor_value_info("shape", TensorProto.INT64, (3,)),
make_tensor_value_info("y", TensorProto.INT32, (3, 4, 5)),
],
data_prop=True,
) # type: ignore
def test_model_data_propagation(self) -> None:
"""Infer the shape of z by propagating the value of xshape."""
model = onnx.parser.parse_model(
"""
<ir_version: 7, opset_import: [ "" : 18]>
agraph (float[4, 1, 16] x, float[1, 8, 16] y) => () {
xshape = Shape (x)
z = Expand (y, xshape)
}
"""
)
self._assert_inferred(
model,
[
make_tensor_value_info("xshape", TensorProto.INT64, (3,)),
make_tensor_value_info("z", TensorProto.FLOAT, (4, 8, 16)),
],
data_prop=True,
)
def test_data_prop_via_function(self) -> None:
"""Test value-propagation through function calls.
Underlying core example is same as previous test_model_data_propagation.
"""
model = onnx.parser.parse_model(
"""
<ir_version: 7, opset_import: [ "" : 18, "local" : 1 ]>
agraph (float[4, 1, 16] x, float[1, 8, 16] y) => () {
xshape = local.GetShape (x)
z = Expand (y, xshape)
}
<domain: "local", opset_import: [ "" : 18 ]>
GetShape (x) => (shapeval) {
shapeval = Shape(x)
}
"""
)
self._assert_inferred(
model,
[
make_tensor_value_info("xshape", TensorProto.INT64, (3,)),
make_tensor_value_info("z", TensorProto.FLOAT, (4, 8, 16)),
],
data_prop=True,
)
def test_multiple_calls_to_function(self) -> None:
"""Test value-propagation handles multiple calls to same function correctly.
Underlying core example is same as previous test_model_data_propagation.
"""
model = onnx.parser.parse_model(
"""
<ir_version: 7, opset_import: [ "" : 18, "local" : 1 ]>
agraph (float[4, 1, 16] x, float[1, 8, 16] y) => () {
yshape = local.GetShape (y)
xshape = local.GetShape (x)
z = Expand (y, xshape)
w = Expand (y, yshape)
}
<domain: "local", opset_import: [ "" : 18 ]>
GetShape (x) => (shapeval) {
shapeval = Shape(x)
}
"""
)
self._assert_inferred(
model,
[
make_tensor_value_info("yshape", TensorProto.INT64, (3,)),
make_tensor_value_info("xshape", TensorProto.INT64, (3,)),
make_tensor_value_info("z", TensorProto.FLOAT, (4, 8, 16)),
make_tensor_value_info("w", TensorProto.FLOAT, (1, 8, 16)),
],
data_prop=True,
)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,19 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
from onnx import checker, defs, helper
class TestRelu(unittest.TestCase):
def test_elu(self) -> None:
self.assertTrue(defs.has("Elu"))
node_def = helper.make_node("Elu", ["X"], ["Y"], alpha=1.0)
checker.check_node(node_def)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,119 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
from typing import Sequence
from shape_inference_test import TestShapeInferenceHelper
import onnx
import onnx.helper
import onnx.parser
import onnx.shape_inference
from onnx import AttributeProto, TypeProto
float_type_ = onnx.helper.make_tensor_type_proto(1, None)
uint8_type_ = onnx.helper.make_tensor_type_proto(2, None)
int8_type_ = onnx.helper.make_tensor_type_proto(3, None)
int32_type_ = onnx.helper.make_tensor_type_proto(6, None)
float16_type_ = onnx.helper.make_tensor_type_proto(10, None)
no_type_ = TypeProto()
class TestFunctionInference(TestShapeInferenceHelper):
def _check(
self,
function_text: str,
input_types: Sequence[TypeProto],
attributes: Sequence[AttributeProto],
expected_output_types: Sequence[TypeProto],
):
function = onnx.parser.parse_function(function_text)
result = onnx.shape_inference.infer_function_output_types(
function, input_types, attributes
)
self.assertEqual(len(expected_output_types), len(result))
for expected, actual in zip(expected_output_types, result):
self._compare_value_infos(expected, actual)
def _check_fails(
self,
function_text: str,
input_types: Sequence[TypeProto],
attributes: Sequence[AttributeProto],
):
function = onnx.parser.parse_function(function_text)
def invoke_inference():
onnx.shape_inference.infer_function_output_types(
function, input_types, attributes
)
self.assertRaises(onnx.shape_inference.InferenceError, invoke_inference)
def test_fi_basic(self):
code = """
<opset_import: [ "" : 18 ], domain: "local">
f (y, z) => (w) {
x = Add(y, z)
w = Mul(x, y)
}
"""
self._check(code, [float_type_, float_type_], [], [float_type_])
self._check(code, [int32_type_, int32_type_], [], [int32_type_])
self._check_fails(code, [float_type_, int32_type_], [])
def test_fi_attribute(self):
code = """
<opset_import: [ "" : 18 ], domain: "local">
CastTo <dtype> (x) => (y) {
y = Cast <to : int = @dtype> (x)
}
"""
dtype_6 = onnx.helper.make_attribute("dtype", 6)
self._check(code, [float_type_], [dtype_6], [int32_type_])
dtype_10 = onnx.helper.make_attribute("dtype", 10)
self._check(code, [float_type_], [dtype_10], [float16_type_])
def test_fi_optional_input(self):
code = """
<opset_import: [ "" : 18 ], domain: "local">
DoReduce (x, axes) => (y) {
y = ReduceMax (x, axes)
}
"""
# We can omit the type for a missing trailing optional parameter
self._check(code, [float_type_], [], [float_type_])
# Or, we can pass in a default-value of TypeProto() for a missing optional parameter
self._check(code, [float_type_, no_type_], [], [float_type_])
code = """
<opset_import: [ "" : 18 ], domain: "local">
Quantize (x, scale, zero_point) => (y) {
y = QuantizeLinear (x, scale, zero_point)
}
"""
# If the optional third parameter is specified, it determines the output type.
self._check(code, [float_type_, float_type_, int8_type_], [], [int8_type_])
self._check(code, [float_type_, float_type_, uint8_type_], [], [uint8_type_])
# If the optional third parameter is omitted, the output type is uint8 (default).
self._check(code, [float_type_, float_type_, no_type_], [], [uint8_type_])
code = """
<opset_import: [ "" : 18 ], domain: "local">
DoClip (x, min, max) => (y) {
y = Clip (x, min, max)
}
"""
# A test-case with a non-trailing missing optional parameter
self._check(code, [float_type_, no_type_, float_type_], [], [float_type_])
# A failing test-case with a non-trailing missing optional parameter
self._check_fails(code, [float_type_, no_type_, int8_type_], [])
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,226 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) ONNX Project Contributors
from __future__ import annotations
import unittest
import onnx
from onnx import checker, utils
class TestFunction(unittest.TestCase):
def _verify_function_set(self, extracted_model, function_set, func_domain): # type: ignore
checker.check_model(extracted_model)
self.assertEqual(len(extracted_model.functions), len(function_set))
for function in function_set:
self.assertIsNotNone(
next(
(
f
for f in extracted_model.functions
if f.name == function and f.domain == func_domain
),
None,
)
)
def test_extract_model_with_local_function(self) -> None:
r"""# 1. build a model with graph below. extract models with output combinations
# 2. validate extracted models' local functions
#
# model graph:
# i0 i1 i2
# | __________________|__________________/_________
# | | | | / |
# | | | | / |
# func_add func_identity add identity
# | ___\___________\____________________|_________ |
# | | \ \ | _______|___|
# | | \ \ | | | |
# add function_nested_identity_add add function_nested_identity_add
# | | | |
# | | | |
# o_func_add o_all_func0 o_no_func o_all_func1
#
# where function_nested_identity_add is a function that is defined with functions:
# a b
# | |
# func_identity func_identity
# \ /
# func_add
# |
# c
#
"""
# function common
func_domain = "local"
func_opset_imports = [onnx.helper.make_opsetid("", 14)]
func_nested_opset_imports = [
onnx.helper.make_opsetid("", 14),
onnx.helper.make_opsetid(func_domain, 1),
]
# add function
func_add_name = "func_add"
func_add_inputs = ["a", "b"]
func_add_outputs = ["c"]
func_add_nodes = [onnx.helper.make_node("Add", ["a", "b"], ["c"])]
func_add = onnx.helper.make_function(
func_domain,
func_add_name,
func_add_inputs,
func_add_outputs,
func_add_nodes,
func_opset_imports,
)
# identity function
func_identity_name = "func_identity"
func_identity_inputs = ["a"]
func_identity_outputs = ["b"]
func_identity_nodes = [onnx.helper.make_node("Identity", ["a"], ["b"])]
func_identity = onnx.helper.make_function(
func_domain,
func_identity_name,
func_identity_inputs,
func_identity_outputs,
func_identity_nodes,
func_opset_imports,
)
# nested identity/add function
func_nested_identity_add_name = "func_nested_identity_add"
func_nested_identity_add_inputs = ["a", "b"]
func_nested_identity_add_outputs = ["c"]
func_nested_identity_add_nodes = [
onnx.helper.make_node("func_identity", ["a"], ["a1"], domain=func_domain),
onnx.helper.make_node("func_identity", ["b"], ["b1"], domain=func_domain),
onnx.helper.make_node("func_add", ["a1", "b1"], ["c"], domain=func_domain),
]
func_nested_identity_add = onnx.helper.make_function(
func_domain,
func_nested_identity_add_name,
func_nested_identity_add_inputs,
func_nested_identity_add_outputs,
func_nested_identity_add_nodes,
func_nested_opset_imports,
)
# create graph nodes
node_func_add = onnx.helper.make_node(
func_add_name, ["i0", "i1"], ["t0"], domain=func_domain
)
node_add0 = onnx.helper.make_node("Add", ["i1", "i2"], ["t2"])
node_add1 = onnx.helper.make_node("Add", ["t0", "t2"], ["o_func_add"])
node_func_identity = onnx.helper.make_node(
func_identity_name, ["i1"], ["t1"], domain=func_domain
)
node_identity = onnx.helper.make_node("Identity", ["i1"], ["t3"])
node_add2 = onnx.helper.make_node("Add", ["t3", "t2"], ["o_no_func"])
node_func_nested0 = onnx.helper.make_node(
func_nested_identity_add_name,
["t0", "t1"],
["o_all_func0"],
domain=func_domain,
)
node_func_nested1 = onnx.helper.make_node(
func_nested_identity_add_name,
["t3", "t2"],
["o_all_func1"],
domain=func_domain,
)
graph_name = "graph_with_imbedded_functions"
ir_version = 8
opset_imports = [
onnx.helper.make_opsetid("", 14),
onnx.helper.make_opsetid("local", 1),
]
tensor_type_proto = onnx.helper.make_tensor_type_proto(elem_type=2, shape=[5])
graph = onnx.helper.make_graph(
[
node_func_add,
node_add0,
node_add1,
node_func_identity,
node_identity,
node_func_nested0,
node_func_nested1,
node_add2,
],
graph_name,
[
onnx.helper.make_value_info(name="i0", type_proto=tensor_type_proto),
onnx.helper.make_value_info(name="i1", type_proto=tensor_type_proto),
onnx.helper.make_value_info(name="i2", type_proto=tensor_type_proto),
],
[
onnx.helper.make_value_info(
name="o_no_func", type_proto=tensor_type_proto
),
onnx.helper.make_value_info(
name="o_func_add", type_proto=tensor_type_proto
),
onnx.helper.make_value_info(
name="o_all_func0", type_proto=tensor_type_proto
),
onnx.helper.make_value_info(
name="o_all_func1", type_proto=tensor_type_proto
),
],
)
meta = {
"ir_version": ir_version,
"opset_imports": opset_imports,
"producer_name": "test_extract_model_with_local_function",
"functions": [func_identity, func_add, func_nested_identity_add],
}
model = onnx.helper.make_model(graph, **meta)
checker.check_model(model)
extracted_with_no_funcion = utils.Extractor(model).extract_model(
["i0", "i1", "i2"], ["o_no_func"]
)
self._verify_function_set(extracted_with_no_funcion, {}, func_domain)
extracted_with_add_funcion = utils.Extractor(model).extract_model(
["i0", "i1", "i2"], ["o_func_add"]
)
self._verify_function_set(
extracted_with_add_funcion, {func_add_name}, func_domain
)
extracted_with_o_all_funcion0 = utils.Extractor(model).extract_model(
["i0", "i1", "i2"], ["o_all_func0"]
)
self._verify_function_set(
extracted_with_o_all_funcion0,
{func_add_name, func_identity_name, func_nested_identity_add_name},
func_domain,
)
extracted_with_o_all_funcion1 = utils.Extractor(model).extract_model(
["i0", "i1", "i2"], ["o_all_func1"]
)
self._verify_function_set(
extracted_with_o_all_funcion1,
{func_add_name, func_identity_name, func_nested_identity_add_name},
func_domain,
)
extracted_with_o_all_funcion2 = utils.Extractor(model).extract_model(
["i0", "i1", "i2"],
["o_no_func", "o_func_add", "o_all_func0", "o_all_func1"],
)
self._verify_function_set(
extracted_with_o_all_funcion2,
{func_add_name, func_identity_name, func_nested_identity_add_name},
func_domain,
)
if __name__ == "__main__":
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,118 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import glob
import os
import unittest
from os.path import join
import pytest
from onnx import ModelProto, hub
@pytest.mark.skipif(
"TEST_HUB" not in os.environ or not os.environ["TEST_HUB"],
reason="Conserving Git LFS quota",
)
class TestModelHub(unittest.TestCase):
def setUp(self) -> None:
self.name = "MNIST"
self.repo = "onnx/models:main"
self.opset = 7
def test_force_reload(self) -> None:
model = hub.load(self.name, self.repo, force_reload=True)
self.assertIsInstance(model, ModelProto)
cached_files = list(
glob.glob(join(hub.get_dir(), "**", "*.onnx"), recursive=True)
)
self.assertGreaterEqual(len(cached_files), 1)
def test_listing_models(self) -> None:
model_info_list_1 = hub.list_models(self.repo, model="mnist", tags=["vision"])
model_info_list_2 = hub.list_models(self.repo, tags=["vision"])
model_info_list_3 = hub.list_models(self.repo)
self.assertGreater(len(model_info_list_1), 1)
self.assertGreater(len(model_info_list_2), len(model_info_list_1))
self.assertGreater(len(model_info_list_3), len(model_info_list_2))
def test_basic_usage(self) -> None:
model = hub.load(self.name, self.repo)
self.assertIsInstance(model, ModelProto)
cached_files = list(
glob.glob(join(hub.get_dir(), "**", "*.onnx"), recursive=True)
)
self.assertGreaterEqual(len(cached_files), 1)
def test_custom_cache(self) -> None:
old_cache = hub.get_dir()
new_cache = join(old_cache, "custom")
hub.set_dir(new_cache)
model = hub.load(self.name, self.repo)
self.assertIsInstance(model, ModelProto)
cached_files = list(glob.glob(join(new_cache, "**", "*.onnx"), recursive=True))
self.assertGreaterEqual(len(cached_files), 1)
hub.set_dir(old_cache)
def test_download_with_opset(self) -> None:
model = hub.load(self.name, self.repo, opset=8)
self.assertIsInstance(model, ModelProto)
def test_opset_error(self) -> None:
self.assertRaises(
AssertionError, lambda: hub.load(self.name, self.repo, opset=-1)
)
def test_manifest_not_found(self) -> None:
self.assertRaises(
AssertionError,
lambda: hub.load(self.name, "onnx/models:unknown", silent=True),
)
def test_verify_repo_ref(self) -> None:
# Not trusted repo:
verified = hub._verify_repo_ref("mhamilton723/models")
self.assertFalse(verified)
# Not trusted repo:
verified = hub._verify_repo_ref("onnx/models:unknown")
self.assertFalse(verified)
# Trusted repo:
verified = hub._verify_repo_ref(self.repo)
self.assertTrue(verified)
def test_get_model_info(self) -> None:
hub.get_model_info("mnist", self.repo, opset=8)
hub.get_model_info("mnist", self.repo)
self.assertRaises(
AssertionError, lambda: hub.get_model_info("mnist", self.repo, opset=-1)
)
def test_download_model_with_test_data(self) -> None:
directory = hub.download_model_with_test_data("mnist")
files = os.listdir(directory)
self.assertIsInstance(directory, str)
self.assertIn(member="model.onnx", container=files, msg="Onnx model not found")
self.assertIn(
member="test_data_set_0", container=files, msg="Test data not found"
)
def test_model_with_preprocessing(self) -> None:
model = hub.load_composite_model(
"ResNet50-fp32", preprocessing_model="ResNet-preproc"
)
self.assertIsInstance(model, ModelProto)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,298 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) ONNX Project Contributors
from __future__ import annotations
import unittest
import numpy as np
import onnx
from onnx import TensorProto, TypeProto
from onnx.checker import ValidationError
from onnx.defs import OpSchema, get_all_schemas_with_history, get_schema
from onnx.helper import (
make_graph,
make_node,
make_opsetid,
make_tensor_type_proto,
make_tensor_value_info,
)
from onnx.numpy_helper import from_array
from onnx.shape_inference import InferenceError, infer_node_outputs
ADD_SCHEMA = max(
(s for s in get_all_schemas_with_history() if s.name == "Add" and s.domain == ""),
key=lambda s: s.since_version,
)
RESHAPE_SCHEMA = max(
(
s
for s in get_all_schemas_with_history()
if s.name == "Reshape" and s.domain == ""
),
key=lambda s: s.since_version,
)
CLIP_SCHEMA = max(
(s for s in get_all_schemas_with_history() if s.name == "Clip" and s.domain == ""),
key=lambda s: s.since_version,
)
def _to_tensor_types(
tensor_types: dict[str, tuple[int, tuple[int | str | None, ...]]]
) -> dict[str, TypeProto]:
return {key: make_tensor_type_proto(*value) for key, value in tensor_types.items()}
def _run_case(
schema: OpSchema,
input_names: list[str],
output_names: list[str],
input_types: dict[str, TypeProto],
input_data: dict[str, np.ndarray] | None = None,
) -> dict[str, TypeProto]:
if input_data is None:
input_data = {}
return infer_node_outputs(
schema,
make_node(schema.name, input_names, output_names, domain=schema.domain),
input_types,
{key: from_array(arr) for key, arr in input_data.items()},
)
class TestInferenceFunctionCall(unittest.TestCase):
def test_add_inference(self) -> None:
cases = [
(
{"A": (TensorProto.FLOAT, ()), "B": (TensorProto.FLOAT, ())},
{"C": (TensorProto.FLOAT, ())},
),
(
{
"A": (TensorProto.FLOAT, (None, 2)),
"B": (TensorProto.FLOAT, (2,)),
},
{"C": (TensorProto.FLOAT, (None, 2))},
),
(
{
"A": (TensorProto.FLOAT, (None, 2)),
"B": (TensorProto.FLOAT, (1, 2)),
},
{"C": (TensorProto.FLOAT, (None, 2))},
),
(
{
"A": (TensorProto.DOUBLE, ("n", "m")),
"B": (TensorProto.DOUBLE, (1, "n", "m")),
},
{"C": (TensorProto.DOUBLE, (1, "n", "m"))},
),
(
{
"A": (TensorProto.FLOAT, ("x", 2)),
"B": (TensorProto.FLOAT, ("y", 2)),
},
{"C": (TensorProto.FLOAT, (None, 2))},
),
]
for ins, outs in cases:
assert _run_case(ADD_SCHEMA, ["A", "B"], ["C"], _to_tensor_types(ins)) == _to_tensor_types(outs) # type: ignore
def test_clip_inference_with_optional_input(self) -> None:
# Test case where the second input is optional
input_names = ["X", "", "max"]
output_names = ["Y"]
input_types = _to_tensor_types(
{"X": (TensorProto.FLOAT, (3, 4)), "max": (TensorProto.FLOAT, ())}
)
expected_output_types = _to_tensor_types({"Y": (TensorProto.FLOAT, (3, 4))})
assert (
_run_case(CLIP_SCHEMA, input_names, output_names, input_types)
== expected_output_types
)
def test_add_inference_raises_errors(self) -> None:
with self.assertRaises(ValidationError):
_run_case(
ADD_SCHEMA,
["A"],
["C"],
_to_tensor_types({"A": (TensorProto.FLOAT, (3, 4))}),
)
with self.assertRaises(ValidationError):
_run_case(
ADD_SCHEMA,
["A", "B"],
["C"],
_to_tensor_types({"A": (TensorProto.FLOAT, (3, 4)), "B": (2, (3, 4))}),
)
with self.assertRaises(InferenceError):
_run_case(
ADD_SCHEMA,
["A", "B"],
["C"],
_to_tensor_types(
{
"A": (TensorProto.FLOAT, (2, 4)),
"B": (TensorProto.FLOAT, (3, 4)),
}
),
)
with self.assertRaises(KeyError):
_run_case(
ADD_SCHEMA,
["A", "B"],
["C"],
_to_tensor_types({"A": (TensorProto.FLOAT, (3, 4))}),
)
def test_reshape_inference(self) -> None:
assert _run_case(
RESHAPE_SCHEMA,
["x", "t"],
["y"],
_to_tensor_types(
{
"x": (TensorProto.FLOAT, (5, 4)),
"t": (TensorProto.INT64, (3,)),
}
),
{"t": np.array([2, 2, 5], dtype=np.int64)},
) == _to_tensor_types({"y": (TensorProto.FLOAT, (2, 2, 5))})
def test_scan_inference_with_subgraph(self) -> None:
seq_len = "sequence"
input_size = 2
loop_state_size = 3
input_value_infos = [
make_tensor_value_info("loop_state_in", TensorProto.UNDEFINED, None),
make_tensor_value_info("input", TensorProto.UNDEFINED, None),
make_tensor_value_info("outer", TensorProto.UNDEFINED, None),
]
output_value_infos = [
make_tensor_value_info("loop_state_out", TensorProto.UNDEFINED, None),
make_tensor_value_info("output", TensorProto.FLOAT, (seq_len, input_size)),
]
subgraph = make_graph(
[
make_node("Identity", ["loop_state_in"], ["loop_state_out"]),
make_node("Add", ["input", "outer"], ["output"]),
],
"subgraph",
input_value_infos,
output_value_infos,
)
assert infer_node_outputs(
get_schema("Scan", 9),
make_node(
"Scan",
["loop_state_orig", "scan_input", "scan_outer"],
["loop_state_final", "scan_output"],
num_scan_inputs=1,
body=subgraph,
),
_to_tensor_types(
{
"loop_state_orig": (TensorProto.FLOAT, (loop_state_size,)),
"scan_input": (TensorProto.FLOAT, (seq_len, input_size)),
"scan_outer": (TensorProto.FLOAT, (input_size,)),
}
),
# Same as default value in Scan-9
opset_imports=[make_opsetid("", 9)],
ir_version=4,
) == _to_tensor_types(
{
"loop_state_final": (TensorProto.FLOAT, (loop_state_size,)),
"scan_output": (TensorProto.FLOAT, (seq_len, input_size)),
}
)
def test_inference_with_conflow(self) -> None:
model_script = """
<
ir_version: 8,
opset_import: ["" : 18, "onnxscript.atenlib" : 1],
producer_name: "pytorch",
producer_version: "2.1.0"
>
torch_jit (float input_0) => (float reault, int64 index)
{
reault, index = onnxscript.atenlib.aten_min_dim <dim = 0, keepdim = 1> (input_0)
}
<
domain: "onnxscript.atenlib",
opset_import: ["" : 18]
>
aten_min_dim <dim>(self) => (result_7, indices_6)
{
tmp = Shape (self)
tmp_0 = Size (tmp)
tmp_1 = Constant <value = int64 tmp_1 {0}> ()
tmp_1_cast = CastLike (tmp_1, tmp_0)
tmp_2 = Equal (tmp_0, tmp_1_cast)
cond = Not (tmp_2)
indices_6, result_7 = If (cond) <
then_branch = thenGraph_4 () => ( indices, result) {
dim = Constant <value_int: int = @dim> ()
tmp_3 = Constant <value_ints = [-1]> ()
dims = Reshape (dim, tmp_3)
result = ReduceMin <keepdims: int = @keepdim> (self, dims)
indices = ArgMin <axis: int = @dim, keepdims: int = @keepdim> (self)
}, else_branch = elseGraph_4 () => ( indices_4, result_5) {
indices_4 = Constant <value_int = 0> ()
result_5 = Identity (self)
}
>
}
"""
model = onnx.parser.parse_model(model_script)
onnx.shape_inference.infer_shapes(model, strict_mode=False)
with self.assertRaises(onnx.shape_inference.InferenceError):
onnx.shape_inference.infer_shapes(model, strict_mode=True)
def test_inference_with_attribute(self) -> None:
model_script = """
<
ir_version: 8,
opset_import: ["" : 18, "custom" : 1],
producer_name: "",
producer_version: "1.0"
>
MeanVarianceNormalization (float[N] x) => (float[M] y)
{
y = custom.custom_mvn <axes = [0]> (x)
}
<
domain: "custom",
opset_import: ["" : 18]
>
custom_mvn <axes>(X) => (Y)
{
Exponent = Constant <value = float {2.0}>()
Epsilon = Constant <value = float {1e-9}>()
axes = Constant <value_ints: ints = @axes>()
X_RM = ReduceMean (X, axes)
EX_squared = Pow (X_RM, Exponent)
X_squared = Pow (X, Exponent)
E_Xsquared = ReduceMean (X_squared, axes)
Variance = Sub (E_Xsquared, EX_squared)
STD = Sqrt (Variance)
X_variance = Sub (X, X_RM)
Processed_STD = Add (STD, Epsilon)
Y = Div (X_variance, Processed_STD)
}
"""
model = onnx.parser.parse_model(model_script)
# onnx.shape_inference.infer_shapes(model, strict_mode=False)
onnx.shape_inference.infer_shapes(model, strict_mode=True)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,116 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
from onnx import inliner, parser
class InlinerTest(unittest.TestCase):
def test_basic(self):
model = parser.parse_model(
"""
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
Y = local.foo (X)
}
<opset_import: [ "" : 17, "local" : 1 ], domain: "local">
foo (x) => (y) {
temp = Add(x, x)
y = local.bar(temp)
}
<opset_import: [ "" : 17 ], domain: "local">
bar (x) => (y) {
y = Mul (x, x)
}
"""
)
inlined = inliner.inline_local_functions(model)
inlined_nodes = inlined.graph.node
# function-call should be replaced by Add, followed by Mul
self.assertEqual(len(inlined_nodes), 2)
self.assertEqual(inlined_nodes[0].op_type, "Add")
self.assertEqual(inlined_nodes[1].op_type, "Mul")
def test_selective_inlining(self):
model = parser.parse_model(
"""
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
T = local.square (X)
Y = local.double_and_square (T)
}
<opset_import: [ "" : 17, "local" : 1 ], domain: "local">
double_and_square (x) => (y) {
double = Add(x, x)
y = local.square(double)
}
<opset_import: [ "" : 17 ], domain: "local">
square (x) => (y) {
y = Mul (x, x)
}
"""
)
inlined = inliner.inline_selected_functions(
model, [("local", "square")], exclude=False
)
inlined_nodes = inlined.graph.node
# function-call to square should be replaced by Add, but not the one to double_and_square
self.assertEqual(len(inlined_nodes), 2)
self.assertEqual(inlined_nodes[0].op_type, "Mul")
self.assertEqual(inlined_nodes[1].op_type, "double_and_square")
# check call to square inside double_and_square was inlined:
function_nodes = inlined.functions[0].node
self.assertEqual(len(function_nodes), 2)
self.assertEqual(function_nodes[0].op_type, "Add")
self.assertEqual(function_nodes[1].op_type, "Mul")
def test_selective_exclusion(self):
model = parser.parse_model(
"""
<ir_version: 8, opset_import: [ "" : 17, "local" : 1 ]>
agraph (float[N] X) => (float[N] Y)
{
T = local.square (X)
Y = local.double_and_square (T)
}
<opset_import: [ "" : 17, "local" : 1 ], domain: "local">
double_and_square (x) => (y) {
double = Add(x, x)
y = local.square(double)
}
<opset_import: [ "" : 17 ], domain: "local">
square (x) => (y) {
y = Mul (x, x)
}
"""
)
inlined = inliner.inline_selected_functions(
model, [("local", "double_and_square")], exclude=True
)
inlined_nodes = inlined.graph.node
# function-call to square should be replaced by Add, but not the one to double_and_square
self.assertEqual(len(inlined_nodes), 2)
self.assertEqual(inlined_nodes[0].op_type, "Mul")
self.assertEqual(inlined_nodes[1].op_type, "double_and_square")
# check call to square inside double_and_square was inlined:
function_nodes = inlined.functions[0].node
self.assertEqual(len(function_nodes), 2)
self.assertEqual(function_nodes[0].op_type, "Add")
self.assertEqual(function_nodes[1].op_type, "Mul")
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,140 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
import numpy.testing as npt
import onnx
import onnx.helper
import onnx.model_container
import onnx.numpy_helper
import onnx.reference
def _linear_regression():
X = onnx.helper.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [None, None])
Y = onnx.helper.make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [None])
graph = onnx.helper.make_graph(
[
onnx.helper.make_node("MatMul", ["X", "A"], ["XA"]),
onnx.helper.make_node("MatMul", ["XA", "B"], ["XB"]),
onnx.helper.make_node("MatMul", ["XB", "C"], ["Y"]),
],
"mm",
[X],
[Y],
[
onnx.numpy_helper.from_array(
np.arange(9).astype(np.float32).reshape((-1, 3)), name="A"
),
onnx.numpy_helper.from_array(
(np.arange(9) * 100).astype(np.float32).reshape((-1, 3)),
name="B",
),
onnx.numpy_helper.from_array(
(np.arange(9) + 10).astype(np.float32).reshape((-1, 3)),
name="C",
),
],
)
onnx_model = onnx.helper.make_model(graph)
onnx.checker.check_model(onnx_model)
return onnx_model
def _large_linear_regression():
X = onnx.helper.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [None, None])
Y = onnx.helper.make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [None])
graph = onnx.helper.make_graph(
[
onnx.helper.make_node("MatMul", ["X", "A"], ["XA"]),
onnx.helper.make_node("MatMul", ["XA", "B"], ["XB"]),
onnx.helper.make_node("MatMul", ["XB", "C"], ["Y"]),
],
"mm",
[X],
[Y],
[
onnx.model_container.make_large_tensor_proto(
"#loc0", "A", onnx.TensorProto.FLOAT, (3, 3)
),
onnx.numpy_helper.from_array(
np.arange(9).astype(np.float32).reshape((-1, 3)), name="B"
),
onnx.model_container.make_large_tensor_proto(
"#loc1", "C", onnx.TensorProto.FLOAT, (3, 3)
),
],
)
onnx_model = onnx.helper.make_model(graph)
large_model = onnx.model_container.make_large_model(
onnx_model.graph,
{
"#loc0": (np.arange(9) * 100).astype(np.float32).reshape((-1, 3)),
"#loc1": (np.arange(9) + 10).astype(np.float32).reshape((-1, 3)),
},
)
large_model.check_model()
return large_model
class TestLargeOnnxReferenceEvaluator(unittest.TestCase):
def common_check_reference_evaluator(self, container):
X = np.arange(9).astype(np.float32).reshape((-1, 3))
ref = onnx.reference.ReferenceEvaluator(container)
got = ref.run(None, {"X": X})
expected = np.array(
[
[945000, 1015200, 1085400],
[2905200, 3121200, 3337200],
[4865400, 5227200, 5589000],
],
dtype=np.float32,
)
npt.assert_allclose(expected, got[0]) # type: ignore[index]
def test_large_onnx_no_large_initializer(self):
model_proto = _linear_regression()
large_model = onnx.model_container.make_large_model(model_proto.graph)
self.common_check_reference_evaluator(large_model)
with self.assertRaises(ValueError):
large_model["#anymissingkey"]
with tempfile.TemporaryDirectory() as temp:
filename = os.path.join(temp, "model.onnx")
large_model.save(filename)
copy = onnx.model_container.ModelContainer()
copy.load(filename)
self.common_check_reference_evaluator(copy)
def test_large_one_weight_file(self):
large_model = _large_linear_regression()
self.common_check_reference_evaluator(large_model)
with tempfile.TemporaryDirectory() as temp:
filename = os.path.join(temp, "model.onnx")
large_model.save(filename, True)
copy = onnx.model_container.ModelContainer()
copy.load(filename)
loaded_model = onnx.load_model(filename, load_external_data=True)
self.common_check_reference_evaluator(loaded_model)
def test_large_multi_files(self):
large_model = _large_linear_regression()
self.common_check_reference_evaluator(large_model)
with tempfile.TemporaryDirectory() as temp:
filename = os.path.join(temp, "model.onnx")
large_model.save(filename, False)
copy = onnx.load_model(filename)
self.common_check_reference_evaluator(copy)
loaded_model = onnx.load_model(filename, load_external_data=True)
self.common_check_reference_evaluator(loaded_model)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -0,0 +1,137 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
import onnx
import onnx.external_data_helper as ext_data
import onnx.helper
import onnx.model_container
import onnx.numpy_helper
def _linear_regression():
X = onnx.helper.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [None, None])
Y = onnx.helper.make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [None])
graph = onnx.helper.make_graph(
[
onnx.helper.make_node("MatMul", ["X", "A"], ["XA"]),
onnx.helper.make_node("MatMul", ["XA", "B"], ["XB"]),
onnx.helper.make_node("MatMul", ["XB", "C"], ["Y"]),
],
"mm",
[X],
[Y],
[
onnx.numpy_helper.from_array(
np.arange(9).astype(np.float32).reshape((-1, 3)), name="A"
),
onnx.numpy_helper.from_array(
(np.arange(9) * 10).astype(np.float32).reshape((-1, 3)),
name="B",
),
onnx.numpy_helper.from_array(
(np.arange(9) * 10).astype(np.float32).reshape((-1, 3)),
name="C",
),
],
)
onnx_model = onnx.helper.make_model(graph)
onnx.checker.check_model(onnx_model)
return onnx_model
def _large_linear_regression():
X = onnx.helper.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [None, None])
Y = onnx.helper.make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [None])
graph = onnx.helper.make_graph(
[
onnx.helper.make_node("MatMul", ["X", "A"], ["XA"]),
onnx.helper.make_node("MatMul", ["XA", "B"], ["XB"]),
onnx.helper.make_node("MatMul", ["XB", "C"], ["Y"]),
],
"mm",
[X],
[Y],
[
onnx.model_container.make_large_tensor_proto(
"#loc0", "A", onnx.TensorProto.FLOAT, (3, 3)
),
onnx.numpy_helper.from_array(
np.arange(9).astype(np.float32).reshape((-1, 3)), name="B"
),
onnx.model_container.make_large_tensor_proto(
"#loc1", "C", onnx.TensorProto.FLOAT, (3, 3)
),
],
)
onnx_model = onnx.helper.make_model(graph)
large_model = onnx.model_container.make_large_model(
onnx_model.graph,
{
"#loc0": (np.arange(9) * 100).astype(np.float32).reshape((-1, 3)),
"#loc1": (np.arange(9) + 10).astype(np.float32).reshape((-1, 3)),
},
)
large_model.check_model()
return large_model
class TestLargeOnnx(unittest.TestCase):
def test_large_onnx_no_large_initializer(self):
model_proto = _linear_regression()
assert isinstance(model_proto, onnx.ModelProto)
large_model = onnx.model_container.make_large_model(model_proto.graph)
assert isinstance(large_model, onnx.model_container.ModelContainer)
with tempfile.TemporaryDirectory() as temp:
filename = os.path.join(temp, "model.onnx")
large_model.save(filename)
copy = onnx.model_container.ModelContainer()
with self.assertRaises(RuntimeError):
assert copy.model_proto
copy.load(filename)
assert copy.model_proto is not None
onnx.checker.check_model(copy.model_proto)
def test_large_one_weight_file(self):
large_model = _large_linear_regression()
assert isinstance(large_model, onnx.model_container.ModelContainer)
with tempfile.TemporaryDirectory() as temp:
filename = os.path.join(temp, "model.onnx")
saved_proto = large_model.save(filename, True)
assert isinstance(saved_proto, onnx.ModelProto)
copy = onnx.model_container.ModelContainer()
copy.load(filename)
copy.check_model()
loaded_model = onnx.load_model(filename, load_external_data=True)
onnx.checker.check_model(loaded_model)
def test_large_multi_files(self):
large_model = _large_linear_regression()
assert isinstance(large_model, onnx.model_container.ModelContainer)
with tempfile.TemporaryDirectory() as temp:
filename = os.path.join(temp, "model.onnx")
saved_proto = large_model.save(filename, False)
assert isinstance(saved_proto, onnx.ModelProto)
copy = onnx.load_model(filename)
onnx.checker.check_model(copy)
for tensor in ext_data._get_all_tensors(copy):
if ext_data.uses_external_data(tensor):
tested = 0
for ext in tensor.external_data:
if ext.key == "location": # type: ignore[attr-defined]
assert os.path.exists(ext.value)
tested += 1
self.assertEqual(tested, 1)
loaded_model = onnx.load_model(filename, load_external_data=True)
onnx.checker.check_model(loaded_model)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -0,0 +1,273 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import typing
import unittest
import onnx
import onnx.parser
import onnx.shape_inference
class TestModelInference(unittest.TestCase):
def _check(self, model_text: str, *expected: int):
"""Check that the model inference infers the expected types for outputs.
Restricted to the simple case of tensor types, so expected types specify
only the element type (ints corresponding to onnx.TensorProto.DataType).
"""
model = onnx.parser.parse_model(model_text)
inferred = onnx.shape_inference.infer_shapes(model)
outputs = inferred.graph.output
for output, expected_elem_type in zip(outputs, expected):
inferred_type = output.type
self.assertTrue(inferred_type.HasField("tensor_type"))
tensor_type = inferred_type.tensor_type
self.assertTrue(tensor_type.HasField("elem_type"))
elem_type = tensor_type.elem_type
self.assertEqual(elem_type, expected_elem_type)
def _check_inference_error(self, model_text: str):
"""Check that the model inference raises an InferenceError."""
model = onnx.parser.parse_model(model_text)
with self.assertRaises(onnx.shape_inference.InferenceError):
onnx.shape_inference.infer_shapes(model, True, True)
def test_unknown_op(self):
"""Test that model inference handles unknown ops.
This special treatment is to support custom ops.
See comments in shape inference code for details.
"""
model = """
<ir_version: 7, opset_import: [ "" : 17]>
agraph (float[N] x) => (y)
{
y = SomeUnknownOp (x)
}
"""
# No output types are inferred for unknown ops.
# But ensure that the inference does not fail.
self._check(model)
def test_mi_basic(self):
"""Test that model inference infers model output type."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17]
>
agraph (float[N] x) => (y)
{
y = Cast<to=6> (x)
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function(self):
"""Test use of functions."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x) => (y)
{
y = local.cast(x)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast (x) => (y)
{
y = Cast<to=6> (x)
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function_attr(self):
"""Test use of functions with attribute parameters."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x) => (y)
{
y = local.cast<target=6>(x)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast<target>(x) => (y)
{
y = Cast<to:int = @target> (x)
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function_subgraph_attr(self):
"""Test use of function attributes within subgraphs."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x, bool flag) => (y)
{
y = local.cast<target=6>(x, flag)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast<target>(x, flag) => (y)
{
y = If (flag) <
then_branch = g1 () => (z_then) { z_then = Cast<to:int = @target> (x) },
else_branch = g2 () => (z_else) { z_else = Cast<to:int = @target> (x) }
>
}
"""
self._check(model, onnx.TensorProto.INT32)
def test_mi_function_multiple_calls(self):
"""Test use of multiple invocation of functions."""
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
agraph (float[N] x, bool flag) => (y, z)
{
y = local.cast<target=6>(x, flag)
z = local.cast<target=7>(x, flag)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
cast<target>(x, flag) => (y)
{
y = If (flag) <
then_branch = g1 () => (z_then) { z_then = Cast<to:int = @target> (x) },
else_branch = g2 () => (z_else) { z_else = Cast<to:int = @target> (x) }
>
}
"""
self._check(model, onnx.TensorProto.INT32, onnx.TensorProto.INT64)
def _check_shape(self, model_text: str, *expected: typing.Sequence[int]):
"""Check that the model inference infers the expected shapes for outputs.
Restricted to the simple case of tensor type outputs with completely
known shapes.
"""
model = onnx.parser.parse_model(model_text)
inferred = onnx.shape_inference.infer_shapes(model, True, True, True)
outputs = inferred.graph.output
for output, expected_shape in zip(outputs, expected):
inferred_type = output.type
self.assertTrue(inferred_type.HasField("tensor_type"))
tensor_type = inferred_type.tensor_type
self.assertTrue(tensor_type.HasField("shape"))
inferred_shape = tensor_type.shape
self.assertEqual(len(inferred_shape.dim), len(expected_shape))
for inferred_dim, expected_dim in zip(inferred_shape.dim, expected_shape):
self.assertTrue(inferred_dim.HasField("dim_value"))
self.assertEqual(inferred_dim.dim_value, expected_dim)
def test_mi_constant(self):
model = """
<
ir_version: 7,
opset_import: [ "" : 17]
>
mymodel (float[4, 8, 16] x) => (y) {
shape = Constant<value_ints=[8,4,16]>()
y = Reshape(x, shape)
}
"""
self._check_shape(model, [8, 4, 16])
def test_mi_constant_2(self):
model = """
<
ir_version: 7,
opset_import: [ "" : 17]
>
mymodel (float[4, 8, 16] x) => (y) {
shape = Constant<value_ints=[4,2,8]>()
two = Constant<value_int=2>()
shape2 = Mul(shape, two)
y = Reshape(x, shape2)
}
"""
self._check_shape(model, [8, 4, 16])
def test_mi_constant_in_function(self):
model = """
<
ir_version: 7,
opset_import: [ "" : 17, "local" : 1]
>
main (float x) => (y, z) {
y, z = local.expand(x)
}
<
opset_import: [ "" : 17 ],
domain: "local"
>
expand (x) => (y, z) {
shape1 = Constant<value = int64[2] {4,4}>()
shape2 = Constant<value = int64[3] {8,8,8}>()
z = Expand (x, shape2)
y = Expand (x, shape1)
}
"""
self._check_shape(model, [4, 4], [8, 8, 8])
def test_mi_function_default_attr(self):
"""Test use of default values of function attributes."""
model = """
<ir_version: 7, opset_import: [ "" : 17, "local" : 1]>
agraph (float[N] x) => (y, z)
{
y = local.cast <target=6> (x) # casts to INT32 type (encoding value 6)
z = local.cast (x) # uses default-attribute value of 1 (FLOAT type)
}
<opset_import: [ "" : 17 ], domain: "local">
cast <target: int = 1> (x) => (y)
{
y = Cast <to:int = @target> (x)
}
"""
self._check(model, onnx.TensorProto.INT32, onnx.TensorProto.FLOAT)
def test_mi_overloaded_function(self):
"""Test use of functions."""
model = """
<ir_version: 10, opset_import: [ "" : 17, "local" : 1]>
agraph (float[N] x) => (y, z)
{
y = local.cast:to_int32 (x)
z = local.cast:to_int64 (x)
}
<opset_import: [ "" : 17 ], domain: "local", overload: "to_int32">
cast (x) => (y)
{
y = Cast<to=6> (x)
}
<opset_import: [ "" : 17 ], domain: "local", overload: "to_int64">
cast (x) => (y)
{
y = Cast<to=7> (x)
}
"""
self._check(model, onnx.TensorProto.INT32, onnx.TensorProto.INT64)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,656 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
from typing import Any
import numpy as np
import parameterized
import onnx
import onnx.reference
from onnx import helper, numpy_helper
def bfloat16_to_float32(ival: int) -> Any:
if ival == 0x7FC0:
return np.float32(np.nan)
expo = ival >> 7
prec = ival - (expo << 7)
sign = expo & 256
powe = expo & 255
fval = float(prec * 2 ** (-7) + 1) * 2.0 ** (powe - 127)
if sign:
fval = -fval
return np.float32(fval)
def float8e4m3_to_float32(ival: int) -> Any:
if ival < 0 or ival > 255:
raise ValueError(f"{ival} is not a float8.")
if ival == 255:
return np.float32(-np.nan)
if ival == 127:
return np.float32(np.nan)
if (ival & 0x7F) == 0:
return np.float32(0)
sign = ival & 0x80
ival &= 0x7F
expo = ival >> 3
mant = ival & 0x07
powe = expo & 0x0F
if expo == 0:
powe -= 6
fraction = 0
else:
powe -= 7
fraction = 1
fval = float(mant / 8 + fraction) * 2.0**powe
if sign:
fval = -fval
return np.float32(fval)
def float8e5m2_to_float32(ival: int) -> Any:
if ival < 0 or ival > 255:
raise ValueError(f"{ival} is not a float8.")
if ival in (255, 254, 253):
return np.float32(-np.nan)
if ival in (127, 126, 125):
return np.float32(np.nan)
if ival == 252:
return -np.float32(np.inf)
if ival == 124:
return np.float32(np.inf)
if (ival & 0x7F) == 0:
return np.float32(0)
sign = ival & 0x80
ival &= 0x7F
expo = ival >> 2
mant = ival & 0x03
powe = expo & 0x1F
if expo == 0:
powe -= 14
fraction = 0
else:
powe -= 15
fraction = 1
fval = float(mant / 4 + fraction) * 2.0**powe
if sign:
fval = -fval
return np.float32(fval)
class TestNumpyHelper(unittest.TestCase):
def _test_numpy_helper_float_type(self, dtype: np.number) -> None:
a = np.random.rand(13, 37).astype(dtype)
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def _test_numpy_helper_int_type(self, dtype: np.number) -> None:
a = np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max, dtype=dtype, size=(13, 37)
)
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def test_float(self) -> None:
self._test_numpy_helper_float_type(np.float32)
def test_uint8(self) -> None:
self._test_numpy_helper_int_type(np.uint8)
def test_int8(self) -> None:
self._test_numpy_helper_int_type(np.int8)
def test_uint16(self) -> None:
self._test_numpy_helper_int_type(np.uint16)
def test_int16(self) -> None:
self._test_numpy_helper_int_type(np.int16)
def test_int32(self) -> None:
self._test_numpy_helper_int_type(np.int32)
def test_int64(self) -> None:
self._test_numpy_helper_int_type(np.int64)
def test_string(self) -> None:
a = np.array(["Amy", "Billy", "Cindy", "David"]).astype(object)
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def test_bool(self) -> None:
a = np.random.randint(2, size=(13, 37)).astype(bool)
tensor_def = numpy_helper.from_array(a, "test")
self.assertEqual(tensor_def.name, "test")
a_recover = numpy_helper.to_array(tensor_def)
np.testing.assert_equal(a, a_recover)
def test_float16(self) -> None:
self._test_numpy_helper_float_type(np.float32)
def test_complex64(self) -> None:
self._test_numpy_helper_float_type(np.complex64)
def test_complex128(self) -> None:
self._test_numpy_helper_float_type(np.complex128)
@parameterized.parameterized.expand(
[
(1,),
(0.100097656,),
(130048,),
(1.2993813e-5,),
(np.nan,),
(np.inf,),
]
)
def test_bfloat16_to_float32(self, f):
f32 = np.float32(f)
bf16 = helper.float32_to_bfloat16(f32)
assert isinstance(bf16, int)
f32_1 = numpy_helper.bfloat16_to_float32(np.array([bf16]))[0]
f32_2 = bfloat16_to_float32(bf16)
if np.isnan(f32):
assert np.isnan(f32_1)
assert np.isnan(f32_2)
else:
self.assertEqual(f32, f32_1)
self.assertEqual(f32, f32_2)
def test_float8e4m3_to_float32(self):
self.assertEqual(numpy_helper.float8e4m3_to_float32(int("1111110", 2)), 448)
self.assertEqual(numpy_helper.float8e4m3_to_float32(int("1000", 2)), 2 ** (-6))
self.assertEqual(numpy_helper.float8e4m3_to_float32(int("1", 2)), 2 ** (-9))
self.assertEqual(
numpy_helper.float8e4m3_to_float32(int("111", 2)), 0.875 * 2 ** (-6)
)
for f in [
0,
1,
-1,
0.5,
-0.5,
0.1015625,
-0.1015625,
2,
3,
-2,
-3,
448,
2 ** (-6),
2 ** (-9),
0.875 * 2 ** (-6),
np.nan,
]:
with self.subTest(f=f):
f32 = np.float32(f)
f8 = helper.float32_to_float8e4m3(f32)
assert isinstance(f8, int)
f32_1 = numpy_helper.float8e4m3_to_float32(np.array([f8]))[0]
f32_2 = float8e4m3_to_float32(f8)
if np.isnan(f32):
assert np.isnan(f32_1)
assert np.isnan(f32_2)
else:
self.assertEqual(f32, f32_1)
self.assertEqual(f32, f32_2)
@parameterized.parameterized.expand(
[
(0.00439453125, 0.00390625),
(0.005859375, 0.005859375),
(0.005759375, 0.005859375),
(0.0046875, 0.00390625),
(0.001953125, 0.001953125),
(0.0029296875, 0.00390625),
(0.002053125, 0.001953125),
(0.00234375, 0.001953125),
(0.0087890625, 0.0078125),
(0.001171875, 0.001953125),
(1.8131605, 1.875),
]
)
def test_float8e4m3_to_float32_round(self, val, expected):
f8 = helper.float32_to_float8e4m3(val)
f32 = numpy_helper.float8e4m3_to_float32(f8)
self.assertEqual(f32, expected)
def test_float8e5m2_to_float32(self):
self.assertEqual(numpy_helper.float8e5m2_to_float32(int("1111011", 2)), 57344)
self.assertEqual(numpy_helper.float8e5m2_to_float32(int("100", 2)), 2 ** (-14))
self.assertEqual(
numpy_helper.float8e5m2_to_float32(int("11", 2)), 0.75 * 2 ** (-14)
)
self.assertEqual(numpy_helper.float8e5m2_to_float32(int("1", 2)), 2 ** (-16))
self.assertTrue(np.isnan(numpy_helper.float8e5m2_to_float32(int("1111101", 2))))
self.assertTrue(np.isnan(numpy_helper.float8e5m2_to_float32(int("1111110", 2))))
self.assertTrue(np.isnan(numpy_helper.float8e5m2_to_float32(int("1111111", 2))))
self.assertTrue(
np.isnan(numpy_helper.float8e5m2_to_float32(int("11111101", 2)))
)
self.assertTrue(
np.isnan(numpy_helper.float8e5m2_to_float32(int("11111110", 2)))
)
self.assertTrue(
np.isnan(numpy_helper.float8e5m2_to_float32(int("11111111", 2)))
)
self.assertEqual(numpy_helper.float8e5m2_to_float32(int("1111100", 2)), np.inf)
self.assertEqual(
numpy_helper.float8e5m2_to_float32(int("11111100", 2)), -np.inf
)
for f in [
0,
0.0017089844,
20480,
14,
-3584,
np.nan,
]:
with self.subTest(f=f):
f32 = np.float32(f)
f8 = helper.float32_to_float8e5m2(f32)
assert isinstance(f8, int)
f32_1 = numpy_helper.float8e5m2_to_float32(np.array([f8]))[0]
f32_2 = float8e5m2_to_float32(f8)
if np.isnan(f32):
assert np.isnan(f32_1)
assert np.isnan(f32_2)
else:
self.assertEqual(f32, f32_1)
self.assertEqual(f32, f32_2)
def test_float8_e4m3fn_inf(self):
x = np.float32(np.inf)
to = helper.float32_to_float8e4m3(x)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, 448)
x = np.float32(np.inf)
to = helper.float32_to_float8e4m3(x, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
x = np.float32(-np.inf)
to = helper.float32_to_float8e4m3(x)
self.assertEqual(to & 0x80, 0x80)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, -448)
x = np.float32(-np.inf)
to = helper.float32_to_float8e4m3(x, saturate=False)
self.assertEqual(to & 0x80, 0x80)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
def test_float8_e4m3fnuz_inf(self):
x = np.float32(np.inf)
to = helper.float32_to_float8e4m3(x, uz=True)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertEqual(back, 240)
x = np.float32(np.inf)
to = helper.float32_to_float8e4m3(x, uz=True, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertTrue(np.isnan(back))
x = np.float32(-np.inf)
to = helper.float32_to_float8e4m3(x, uz=True)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertEqual(back, -240)
x = np.float32(-np.inf)
to = helper.float32_to_float8e4m3(x, uz=True, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertTrue(np.isnan(back))
def test_float8_e5m2_inf(self):
x = np.float32(np.inf)
to = helper.float32_to_float8e5m2(x)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertEqual(back, 57344)
x = np.float32(np.inf)
to = helper.float32_to_float8e5m2(x, saturate=False)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertTrue(np.isinf(back))
x = np.float32(-np.inf)
to = helper.float32_to_float8e5m2(x)
self.assertEqual(to & 0x80, 0x80)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertEqual(back, -57344)
x = np.float32(-np.inf)
to = helper.float32_to_float8e5m2(x, saturate=False)
self.assertEqual(to & 0x80, 0x80)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertTrue(np.isinf(back))
self.assertLess(back, 0)
def test_float8_e5m2fnuz_inf(self):
x = np.float32(np.inf)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertEqual(back, 57344)
x = np.float32(np.inf)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True, saturate=False)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertTrue(np.isnan(back))
x = np.float32(-np.inf)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertEqual(back, -57344)
x = np.float32(-np.inf)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True, saturate=False)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertTrue(np.isnan(back))
def test_float8_e4m3fn_out_of_range(self):
x = np.float32(1000000)
to = helper.float32_to_float8e4m3(x)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, 448)
x = np.float32(1000000)
to = helper.float32_to_float8e4m3(x, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
x = np.float32(-1000000)
to = helper.float32_to_float8e4m3(x)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, -448)
x = np.float32(-1000000)
to = helper.float32_to_float8e4m3(x, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
def test_float8_e4m3fnuz_out_of_range(self):
x = np.float32(1000000)
to = helper.float32_to_float8e4m3(x, uz=True)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertEqual(back, 240)
x = np.float32(1000000)
to = helper.float32_to_float8e4m3(x, uz=True, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertTrue(np.isnan(back))
x = np.float32(-1000000)
to = helper.float32_to_float8e4m3(x, uz=True)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertEqual(back, -240)
x = np.float32(-1000000)
to = helper.float32_to_float8e4m3(x, uz=True, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertTrue(np.isnan(back))
def test_float8_e5m2_out_of_range(self):
x = np.float32(1000000)
to = helper.float32_to_float8e5m2(x)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertEqual(back, 57344)
x = np.float32(1000000)
to = helper.float32_to_float8e5m2(x, saturate=False)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertTrue(np.isinf(back))
x = np.float32(-1000000)
to = helper.float32_to_float8e5m2(x)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertEqual(back, -57344)
x = np.float32(-1000000)
to = helper.float32_to_float8e5m2(x, saturate=False)
back = numpy_helper.float8e5m2_to_float32(to)
self.assertTrue(np.isinf(back))
def test_float8_e5m2fnuz_out_of_range(self):
x = np.float32(1000000)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertEqual(back, 57344)
x = np.float32(1000000)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True, saturate=False)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertTrue(np.isnan(back))
x = np.float32(-1000000)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertEqual(back, -57344)
x = np.float32(-1000000)
to = helper.float32_to_float8e5m2(x, fn=True, uz=True, saturate=False)
back = numpy_helper.float8e5m2_to_float32(to, fn=True, uz=True)
self.assertTrue(np.isnan(back))
def test_float8_e4m3fn_negative_zero(self):
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e4m3(x)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, 0)
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e4m3(x, saturate=False)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, 0)
def test_float8_e4m3fnuz_negative_zero(self):
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e4m3(x, uz=True)
self.assertEqual(to, 0)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertEqual(back, 0)
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e4m3(x, uz=True, saturate=False)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertEqual(back, 0)
self.assertEqual(to, 0)
def test_float8_e5m2_negative_zero(self):
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e5m2(x)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, 0)
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e5m2(x, saturate=False)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertEqual(back, 0)
def test_float8_e5m2fnuz_negative_zero(self):
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e5m2(x, fn=True, uz=True)
self.assertEqual(to, 0)
back = numpy_helper.float8e4m3_to_float32(to, fn=True, uz=True)
self.assertEqual(back, 0)
x = numpy_helper.float8e5m2_to_float32(0x80) # -0
to = helper.float32_to_float8e5m2(x, fn=True, uz=True, saturate=False)
self.assertEqual(to, 0)
back = numpy_helper.float8e4m3_to_float32(to, fn=True, uz=True)
self.assertEqual(back, 0)
def test_float8_e4m3fn_negative_nan(self):
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e4m3(x)
self.assertEqual(to, 255)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e4m3(x, saturate=False)
self.assertEqual(to, 255)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
def test_float8_e4m3fnuz_negative_nan(self):
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e4m3(x, uz=True)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertTrue(np.isnan(back))
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e4m3(x, uz=True, saturate=False)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to, uz=True)
self.assertTrue(np.isnan(back))
def test_float8_e5m2_negative_nan(self):
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e5m2(x)
self.assertEqual(to, 255)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e5m2(x, saturate=False)
self.assertEqual(to, 255)
back = numpy_helper.float8e4m3_to_float32(to)
self.assertTrue(np.isnan(back))
def test_float8_e5m2fnuz_negative_nan(self):
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e5m2(x, fn=True, uz=True)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to, fn=True, uz=True)
self.assertTrue(np.isnan(back))
x = numpy_helper.float8e5m2_to_float32(255) # -nan
to = helper.float32_to_float8e5m2(x, fn=True, uz=True, saturate=False)
self.assertEqual(to, 0x80)
back = numpy_helper.float8e4m3_to_float32(to, fn=True, uz=True)
self.assertTrue(np.isnan(back))
def test_from_dict_values_are_np_arrays_of_float(self):
map_proto = numpy_helper.from_dict({0: np.array(0.1), 1: np.array(0.9)})
self.assertIsInstance(map_proto, onnx.MapProto)
self.assertEqual(
numpy_helper.to_array(map_proto.values.tensor_values[0]), np.array(0.1)
)
self.assertEqual(
numpy_helper.to_array(map_proto.values.tensor_values[1]), np.array(0.9)
)
def test_from_dict_values_are_np_arrays_of_int(self):
map_proto = numpy_helper.from_dict({0: np.array(1), 1: np.array(9)})
self.assertIsInstance(map_proto, onnx.MapProto)
self.assertEqual(
numpy_helper.to_array(map_proto.values.tensor_values[0]), np.array(1)
)
self.assertEqual(
numpy_helper.to_array(map_proto.values.tensor_values[1]), np.array(9)
)
def test_from_dict_values_are_np_arrays_of_ints(self):
zero_array = np.array([1, 2])
one_array = np.array([9, 10])
map_proto = numpy_helper.from_dict({0: zero_array, 1: one_array})
self.assertIsInstance(map_proto, onnx.MapProto)
out_tensor = numpy_helper.to_array(map_proto.values.tensor_values[0])
self.assertEqual(out_tensor[0], zero_array[0])
self.assertEqual(out_tensor[1], zero_array[1])
out_tensor = numpy_helper.to_array(map_proto.values.tensor_values[1])
self.assertEqual(out_tensor[0], one_array[0])
self.assertEqual(out_tensor[1], one_array[1])
def test_from_dict_raises_type_error_when_values_are_not_np_arrays(self):
with self.assertRaises(TypeError):
# from_dict/from_array expects tensors to be numpy array's or similar.
numpy_helper.from_dict({0: 0.1, 1: 0.9})
def test_from_dict_differing_key_types(self):
with self.assertRaises(TypeError):
# Differing key types should raise a TypeError
numpy_helper.from_dict({0: np.array(0.1), 1.1: np.array(0.9)})
def test_from_dict_differing_value_types(self):
with self.assertRaises(TypeError):
# Differing value types should raise a TypeError
numpy_helper.from_dict({0: np.array(1), 1: np.array(0.9)})
def _to_array_from_array(self, value: int, check_dtype: bool = True):
onnx_model = helper.make_model(
helper.make_graph(
[helper.make_node("Cast", ["X"], ["Y"], to=value)],
"test",
[helper.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [4])],
[helper.make_tensor_value_info("Y", value, [4])],
)
)
ref = onnx.reference.ReferenceEvaluator(onnx_model)
start = ref.run(None, {"X": np.array([0, 1, -2, 3], dtype=np.float32)})
tp = numpy_helper.from_array(start[0], name="check")
self.assertEqual(tp.data_type, value)
back = numpy_helper.to_array(tp)
self.assertEqual(start[0].shape, back.shape)
if check_dtype:
self.assertEqual(start[0].dtype, back.dtype)
again = numpy_helper.from_array(back, name="check")
self.assertEqual(tp.data_type, again.data_type)
self.assertEqual(tp.name, again.name)
self.assertEqual(len(tp.raw_data), len(again.raw_data))
self.assertEqual(list(tp.raw_data), list(again.raw_data))
self.assertEqual(tp.raw_data, again.raw_data)
self.assertEqual(tuple(tp.dims), tuple(again.dims))
self.assertEqual(tp.SerializeToString(), again.SerializeToString())
self.assertEqual(tp.data_type, helper.np_dtype_to_tensor_dtype(back.dtype))
@parameterized.parameterized.expand([(att,) for att in dir(onnx.TensorProto)])
def test_to_array_from_array(self, att):
if att in {
"INT4",
"UINT4",
"STRING",
"UNDEFINED",
"DEFAULT",
"NAME_FIELD_NUMBER",
}:
return
if att[0] < "A" or att[0] > "Z":
return
value = getattr(onnx.TensorProto, att)
if not isinstance(value, int):
return
self._to_array_from_array(value)
def test_to_array_from_array_subtype(self):
self._to_array_from_array(onnx.TensorProto.INT4)
self._to_array_from_array(onnx.TensorProto.UINT4)
def test_to_array_from_array_string(self):
self._to_array_from_array(onnx.TensorProto.STRING, False)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -0,0 +1,317 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
from parameterized import parameterized
import onnx
from onnx import GraphProto, OperatorSetIdProto, TensorProto, checker
class TestBasicFunctions(unittest.TestCase):
def check_graph(self, graph: GraphProto) -> None:
self.assertEqual(len(graph.node), 3)
self.assertEqual(graph.node[0].op_type, "MatMul")
self.assertEqual(graph.node[1].op_type, "Add")
self.assertEqual(graph.node[2].op_type, "Softmax")
def test_parse_graph(self) -> None:
input = """
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
graph = onnx.parser.parse_graph(input)
self.check_graph(graph)
def test_parse_model(self) -> None:
input = """
<
ir_version: 7,
opset_import: [ "" : 10, "com.microsoft": 1]
>
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
model = onnx.parser.parse_model(input)
self.assertEqual(model.ir_version, 7)
self.assertEqual(len(model.opset_import), 2)
self.check_graph(model.graph)
def test_parse_graph_error(self) -> None:
input = """
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul[X, W]
S = Add(T, B)
C = Softmax(S)
}
"""
self.assertRaises(
onnx.parser.ParseError, lambda: onnx.parser.parse_graph(input)
)
def test_parse_model_error(self) -> None:
input = """
<
ir_version: 7,
opset_import: [ "" : 10 "com.microsoft": 1]
>
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
self.assertRaises(
onnx.parser.ParseError, lambda: onnx.parser.parse_model(input)
)
def test_parse_function_with_attributes(self) -> None:
input = """
<
ir_version: 9,
opset_import: [ "" : 15, "custom_domain" : 1],
producer_name: "FunctionProtoTest",
producer_version: "1.0",
model_version: 1,
doc_string: "A test model for model local functions."
>
agraph (float[N] x) => (float[N] out)
{
out = custom_domain.Selu<alpha=2.0, gamma=3.0>(x)
}
<
domain: "custom_domain",
opset_import: [ "" : 15],
doc_string: "Test function proto"
>
Selu
<alpha: float=1.67326319217681884765625, gamma: float=1.05070102214813232421875>
(X) => (C)
{
constant_alpha = Constant<value_float: float=@alpha>()
constant_gamma = Constant<value_float: float=@gamma>()
alpha_x = CastLike(constant_alpha, X)
gamma_x = CastLike(constant_gamma, X)
exp_x = Exp(X)
alpha_x_exp_x = Mul(alpha_x, exp_x)
alpha_x_exp_x_ = Sub(alpha_x_exp_x, alpha_x)
neg = Mul(gamma_x, alpha_x_exp_x_)
pos = Mul(gamma_x, X)
_zero = Constant<value_float=0.0>()
zero = CastLike(_zero, X)
less_eq = LessOrEqual(X, zero)
C = Where(less_eq, neg, pos)
}
"""
model = onnx.parser.parse_model(input)
checker.check_model(model)
@parameterized.expand(
[
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu(x) }",
{},
),
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu<alpha=2.0>(x) }",
{"alpha": 2.0},
),
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu<gamma=3.0>(x) }",
{"gamma": 3.0},
),
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu<alpha=2.0, gamma=3.0>(x) }",
{"alpha": 2.0, "gamma": 3.0},
),
]
)
def test_composite_parse_function_with_attributes(
self, graph_text: str, expected_attribute: dict
) -> None:
default_alpha = 1.67326319217681884765625
default_gamma = 1.05070102214813232421875
def expect_custom_node_attribute(node, attributes):
for key in attributes:
match_attr = [attr for attr in node.attribute if attr.name == key]
assert len(match_attr) == 1
assert match_attr[0].f == attributes[key]
def expect_model_function_attribute(model):
assert len(model.functions[0].attribute_proto) == 2
attr_proto_alpha = [
attr_proto
for attr_proto in model.functions[0].attribute_proto
if attr_proto.name == "alpha"
]
assert len(attr_proto_alpha) == 1 and attr_proto_alpha[0].f == default_alpha
attr_proto_gamma = [
attr_proto
for attr_proto in model.functions[0].attribute_proto
if attr_proto.name == "gamma"
]
assert len(attr_proto_gamma) == 1 and attr_proto_gamma[0].f == default_gamma
function_text = f"""
<
domain: "custom_domain",
opset_import: [ "" : 15],
doc_string: "Test function proto"
>
Selu
<alpha: float={default_alpha}, gamma: float={default_gamma}>
(X) => (C)
{{
constant_alpha = Constant<value_float: float=@alpha>()
constant_gamma = Constant<value_float: float=@gamma>()
alpha_x = CastLike(constant_alpha, X)
gamma_x = CastLike(constant_gamma, X)
exp_x = Exp(X)
alpha_x_exp_x = Mul(alpha_x, exp_x)
alpha_x_exp_x_ = Sub(alpha_x_exp_x, alpha_x)
neg = Mul(gamma_x, alpha_x_exp_x_)
pos = Mul(gamma_x, X)
_zero = Constant<value_float=0.0>()
zero = CastLike(_zero, X)
less_eq = LessOrEqual(X, zero)
C = Where(less_eq, neg, pos)
}}
"""
functions = [onnx.parser.parse_function(function_text)]
graph = onnx.parser.parse_graph(graph_text)
opset_imports = [
OperatorSetIdProto(domain="", version=15),
OperatorSetIdProto(domain="custom_domain", version=1),
]
model = onnx.helper.make_model(
graph, functions=functions, opset_imports=opset_imports
)
checker.check_model(model)
expect_model_function_attribute(model)
expect_custom_node_attribute(model.graph.node[0], expected_attribute)
def test_parse_node(self):
node = onnx.parser.parse_node(
"out1, out2 = SomeDomain.SomeOp <attr1 = 1> (in1, in2)"
)
self.assertEqual(list(node.input), ["in1", "in2"])
self.assertEqual(list(node.output), ["out1", "out2"])
self.assertEqual(len(node.attribute), 1)
attr_val = onnx.helper.get_node_attr_value(node, "attr1")
self.assertEqual(attr_val, 1)
self.assertEqual(node.domain, "SomeDomain")
self.assertEqual(node.op_type, "SomeOp")
@parameterized.expand(
[
("not_a_good_float", True),
("inf1", True),
("-inf1", True),
("nan0", True),
("-nan0", True),
("naninf", True),
("inf", False),
("-inf", False),
("infinity", False),
("-infinity", False),
("nan", False),
("-NaN", False),
]
)
def test_parse_various_float_values(self, test_literal, expect_exception):
model_text = f"""
<
ir_version: 8,
opset_import: ["" : 18, "this" : 1],
producer_name: "FunctionProtoTest",
producer_version: "1.0"
>
_func () => ()
{{
tmp = Constant <value_float = {test_literal}>()
}}
"""
if expect_exception:
self.assertRaises(
onnx.parser.ParseError, lambda: onnx.parser.parse_model(model_text)
)
else:
model = onnx.parser.parse_model(model_text)
self.assertEqual(model.ir_version, 8)
self.assertEqual(model.producer_name, "FunctionProtoTest")
self.assertEqual(model.producer_version, "1.0")
self.assertEqual(len(model.graph.node), 1)
self.assertEqual(len(model.graph.node[0].attribute), 1)
self.assertEqual(model.graph.node[0].attribute[0].name, "value_float")
self.assertEqual(
model.graph.node[0].attribute[0].type, onnx.AttributeProto.FLOAT
)
self.assertEqual(
str(model.graph.node[0].attribute[0].f), str(float(test_literal))
)
@parameterized.expand(
[
("bfloat16", TensorProto.BFLOAT16),
("bool", TensorProto.BOOL),
("complex64", TensorProto.COMPLEX64),
("complex128", TensorProto.COMPLEX128),
("double", TensorProto.DOUBLE),
("float16", TensorProto.FLOAT16),
("float", TensorProto.FLOAT),
("float8e4m3fn", TensorProto.FLOAT8E4M3FN),
("float8e4m3fnuz", TensorProto.FLOAT8E4M3FNUZ),
("float8e5m2", TensorProto.FLOAT8E5M2),
("float8e5m2fnuz", TensorProto.FLOAT8E5M2FNUZ),
("int4", TensorProto.INT4),
("int8", TensorProto.INT8),
("int16", TensorProto.INT16),
("int32", TensorProto.INT32),
("int64", TensorProto.INT64),
("string", TensorProto.STRING),
("uint4", TensorProto.UINT4),
("uint8", TensorProto.UINT8),
("uint16", TensorProto.UINT16),
("uint32", TensorProto.UINT32),
("uint64", TensorProto.UINT64),
]
)
def test_parse_graph_types(self, name, itype) -> None:
w = '{"0"}' if itype == TensorProto.STRING else "{0}"
text_graph = f"""
<
ir_version: 10,
opset_import: [ "" : 19]
>
agraph (float[N] X) => ({name}[N] C)
<
{name}[1] weight = {w}
>
{{
C = Cast<to={itype}>(X)
}}
"""
graph = onnx.parser.parse_model(text_graph)
self.assertEqual(len(graph.graph.node), 1)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -0,0 +1,40 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
import onnx
from onnx import parser, printer
class TestBasicFunctions(unittest.TestCase):
def check_graph(self, graph: onnx.GraphProto) -> None:
self.assertEqual(len(graph.node), 3)
self.assertEqual(graph.node[0].op_type, "MatMul")
self.assertEqual(graph.node[1].op_type, "Add")
self.assertEqual(graph.node[2].op_type, "Softmax")
def test_parse_graph(self) -> None:
text0 = """
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
graph1 = parser.parse_graph(text0)
text1 = printer.to_text(graph1)
graph2 = parser.parse_graph(text1)
text2 = printer.to_text(graph2)
# Note that text0 and text1 should be semantically-equivalent, but may differ
# in white-space and other syntactic sugar. However, we expect text1 and text2
# to be identical.
self.assertEqual(text1, text2)
self.check_graph(graph2)
if __name__ == "__main__":
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,132 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# type: ignore
from __future__ import annotations
import unittest
import numpy as np
import onnx
import onnx.helper as oh
import onnx.numpy_helper as onh
import onnx.reference as orf
def create_model():
"""The following model is equivalent to the following function.
.. code-block:: python
from onnx importonnx.TensorProto
from onnx.helper import oh.make_tensor
from onnxscript import script
from onnxscript.onnx_opset import opset15 as op
from onnxscript.onnx_types import FLOAT
@script()
def loop_range_cond_only(A: FLOAT["N"]) -> FLOAT["N"]:
T = A
cond = op.Constant(value=make_tensor("true",onnx.TensorProto.BOOL, [1], [1]))
while cond:
T = T + A
cond = op.ReduceSum(T) > -10
return T
model = loop_range_cond_only.to_model_proto()
"""
opset_imports = [
oh.make_opsetid("", 15),
]
inputs = []
outputs = []
nodes = []
initializers = []
sparse_initializers = []
functions = []
inputs.append(oh.make_tensor_value_info("A", onnx.TensorProto.FLOAT, shape=("N",)))
nodes.append(
oh.make_node(
"Constant",
[],
["cond"],
value=onh.from_array(np.array([True], dtype=np.bool_), name="value"),
)
)
nodes.append(
oh.make_node(
"Constant",
[],
["true"],
value=onh.from_array(np.array(True, dtype=np.bool_), name="value"),
)
)
def _make_local_graph_body():
inputs = []
outputs = []
nodes = []
initializers = []
sparse_initializers = []
inputs.append(
oh.make_tensor_value_info("infinite_loop", onnx.TensorProto.INT64, shape=[])
)
inputs.append(
oh.make_tensor_value_info("cond", onnx.TensorProto.BOOL, shape=[])
)
inputs.append(oh.make_tensor_value_info("T", onnx.TensorProto.UNDEFINED, []))
nodes.append(oh.make_node("Add", ["T", "A"], ["T_0"]))
nodes.append(oh.make_node("ReduceSum", ["T_0"], ["tmp"]))
nodes.append(
oh.make_node(
"Constant",
[],
["int64_m10"],
value=onh.from_array(np.array(-10, dtype=np.int64), name="value"),
)
)
nodes.append(oh.make_node("CastLike", ["int64_m10", "tmp"], ["int64_m10_cast"]))
nodes.append(oh.make_node("Greater", ["tmp", "int64_m10_cast"], ["cond_1"]))
nodes.append(oh.make_node("Identity", ["cond_1"], ["cond_out"]))
outputs.append(
oh.make_tensor_value_info("cond_out", onnx.TensorProto.BOOL, shape=[])
)
outputs.append(oh.make_tensor_value_info("T_0", onnx.TensorProto.UNDEFINED, []))
graph = oh.make_graph(
nodes,
"loop_body",
inputs,
outputs,
initializers,
sparse_initializer=sparse_initializers,
)
return graph
body = _make_local_graph_body()
nodes.append(oh.make_node("Loop", ["", "true", "A"], ["T_2"], body=body))
outputs.append(
oh.make_tensor_value_info("T_2", onnx.TensorProto.FLOAT, shape=("N",))
)
graph = oh.make_graph(
nodes,
"loop_range_cond_only",
inputs,
outputs,
initializers,
sparse_initializer=sparse_initializers,
)
model = oh.make_model(graph, functions=functions, opset_imports=opset_imports)
return model
class TestReferenceEvaluatorModel(unittest.TestCase):
def test_loop_fft(self):
model = create_model()
session = orf.ReferenceEvaluator(model)
session.run(None, {"A": -np.arange(10).astype(np.float32)})
if __name__ == "__main__":
unittest.main(verbosity=2)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
from onnx import defs, helper
class TestRelu(unittest.TestCase):
def test_relu(self) -> None:
self.assertTrue(defs.has("Relu"))
helper.make_node("Relu", ["X"], ["Y"])
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,429 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import contextlib
import unittest
from typing import Sequence
import parameterized
import onnx
from onnx import defs
class TestSchema(unittest.TestCase):
def test_get_schema(self) -> None:
defs.get_schema("Relu")
def test_typecheck(self) -> None:
defs.get_schema("Conv")
def test_attr_default_value(self) -> None:
v = defs.get_schema("BatchNormalization").attributes["epsilon"].default_value
self.assertEqual(type(v), onnx.AttributeProto)
self.assertEqual(v.type, onnx.AttributeProto.FLOAT)
def test_function_body(self) -> None:
self.assertEqual(
type(defs.get_schema("Selu").function_body), onnx.FunctionProto
)
class TestOpSchema(unittest.TestCase):
def test_init(self):
# Test that the constructor creates an OpSchema object
schema = defs.OpSchema("test_op", "test_domain", 1)
self.assertIsInstance(schema, defs.OpSchema)
def test_init_with_inputs(self) -> None:
op_schema = defs.OpSchema(
"test_op",
"test_domain",
1,
inputs=[defs.OpSchema.FormalParameter("input1", "T")],
type_constraints=[("T", ["tensor(int64)"], "")],
)
self.assertEqual(op_schema.name, "test_op")
self.assertEqual(op_schema.domain, "test_domain")
self.assertEqual(op_schema.since_version, 1)
self.assertEqual(len(op_schema.inputs), 1)
self.assertEqual(op_schema.inputs[0].name, "input1")
self.assertEqual(op_schema.inputs[0].type_str, "T")
self.assertEqual(len(op_schema.type_constraints), 1)
self.assertEqual(op_schema.type_constraints[0].type_param_str, "T")
self.assertEqual(
op_schema.type_constraints[0].allowed_type_strs, ["tensor(int64)"]
)
def test_init_creates_multi_input_output_schema(self) -> None:
op_schema = defs.OpSchema(
"test_op",
"test_domain",
1,
inputs=[
defs.OpSchema.FormalParameter("input1", "T"),
defs.OpSchema.FormalParameter("input2", "T"),
],
outputs=[
defs.OpSchema.FormalParameter("output1", "T"),
defs.OpSchema.FormalParameter("output2", "T"),
],
type_constraints=[("T", ["tensor(int64)"], "")],
attributes=[
defs.OpSchema.Attribute(
"attr1", defs.OpSchema.AttrType.INTS, "attr1 description"
)
],
)
self.assertEqual(len(op_schema.inputs), 2)
self.assertEqual(op_schema.inputs[0].name, "input1")
self.assertEqual(op_schema.inputs[0].type_str, "T")
self.assertEqual(op_schema.inputs[1].name, "input2")
self.assertEqual(op_schema.inputs[1].type_str, "T")
self.assertEqual(len(op_schema.outputs), 2)
self.assertEqual(op_schema.outputs[0].name, "output1")
self.assertEqual(op_schema.outputs[0].type_str, "T")
self.assertEqual(op_schema.outputs[1].name, "output2")
self.assertEqual(op_schema.outputs[1].type_str, "T")
self.assertEqual(len(op_schema.type_constraints), 1)
self.assertEqual(op_schema.type_constraints[0].type_param_str, "T")
self.assertEqual(
op_schema.type_constraints[0].allowed_type_strs, ["tensor(int64)"]
)
self.assertEqual(len(op_schema.attributes), 1)
self.assertEqual(op_schema.attributes["attr1"].name, "attr1")
self.assertEqual(
op_schema.attributes["attr1"].type, defs.OpSchema.AttrType.INTS
)
self.assertEqual(op_schema.attributes["attr1"].description, "attr1 description")
def test_init_without_optional_arguments(self) -> None:
op_schema = defs.OpSchema("test_op", "test_domain", 1)
self.assertEqual(op_schema.name, "test_op")
self.assertEqual(op_schema.domain, "test_domain")
self.assertEqual(op_schema.since_version, 1)
self.assertEqual(len(op_schema.inputs), 0)
self.assertEqual(len(op_schema.outputs), 0)
self.assertEqual(len(op_schema.type_constraints), 0)
def test_name(self):
# Test that the name parameter is required and is a string
with self.assertRaises(TypeError):
defs.OpSchema(domain="test_domain", since_version=1) # type: ignore
with self.assertRaises(TypeError):
defs.OpSchema(123, "test_domain", 1) # type: ignore
schema = defs.OpSchema("test_op", "test_domain", 1)
self.assertEqual(schema.name, "test_op")
def test_domain(self):
# Test that the domain parameter is required and is a string
with self.assertRaises(TypeError):
defs.OpSchema(name="test_op", since_version=1) # type: ignore
with self.assertRaises(TypeError):
defs.OpSchema("test_op", 123, 1) # type: ignore
schema = defs.OpSchema("test_op", "test_domain", 1)
self.assertEqual(schema.domain, "test_domain")
def test_since_version(self):
# Test that the since_version parameter is required and is an integer
with self.assertRaises(TypeError):
defs.OpSchema("test_op", "test_domain") # type: ignore
schema = defs.OpSchema("test_op", "test_domain", 1)
self.assertEqual(schema.since_version, 1)
def test_doc(self):
schema = defs.OpSchema("test_op", "test_domain", 1, doc="test_doc")
self.assertEqual(schema.doc, "test_doc")
def test_inputs(self):
# Test that the inputs parameter is optional and is a sequence of FormalParameter tuples
inputs = [
defs.OpSchema.FormalParameter(
name="input1", type_str="T", description="The first input."
)
]
schema = defs.OpSchema(
"test_op",
"test_domain",
1,
inputs=inputs,
type_constraints=[("T", ["tensor(int64)"], "")],
)
self.assertEqual(len(schema.inputs), 1)
self.assertEqual(schema.inputs[0].name, "input1")
self.assertEqual(schema.inputs[0].type_str, "T")
self.assertEqual(schema.inputs[0].description, "The first input.")
def test_outputs(self):
# Test that the outputs parameter is optional and is a sequence of FormalParameter tuples
outputs = [
defs.OpSchema.FormalParameter(
name="output1", type_str="T", description="The first output."
)
]
schema = defs.OpSchema(
"test_op",
"test_domain",
1,
outputs=outputs,
type_constraints=[("T", ["tensor(int64)"], "")],
)
self.assertEqual(len(schema.outputs), 1)
self.assertEqual(schema.outputs[0].name, "output1")
self.assertEqual(schema.outputs[0].type_str, "T")
self.assertEqual(schema.outputs[0].description, "The first output.")
class TestFormalParameter(unittest.TestCase):
def test_init(self):
name = "input1"
type_str = "tensor(float)"
description = "The first input."
param_option = defs.OpSchema.FormalParameterOption.Single
is_homogeneous = True
min_arity = 1
differentiation_category = defs.OpSchema.DifferentiationCategory.Unknown
formal_parameter = defs.OpSchema.FormalParameter(
name,
type_str,
description,
param_option=param_option,
is_homogeneous=is_homogeneous,
min_arity=min_arity,
differentiation_category=differentiation_category,
)
self.assertEqual(formal_parameter.name, name)
self.assertEqual(formal_parameter.type_str, type_str)
self.assertEqual(formal_parameter.description, description)
self.assertEqual(formal_parameter.option, param_option)
self.assertEqual(formal_parameter.is_homogeneous, is_homogeneous)
self.assertEqual(formal_parameter.min_arity, min_arity)
self.assertEqual(
formal_parameter.differentiation_category, differentiation_category
)
class TestTypeConstraintParam(unittest.TestCase):
@parameterized.parameterized.expand(
[
("single_type", "T", ["tensor(float)"], "Test description"),
(
"double_types",
"T",
["tensor(float)", "tensor(int64)"],
"Test description",
),
("tuple", "T", ("tensor(float)", "tensor(int64)"), "Test description"),
]
)
def test_init(
self,
_: str,
type_param_str: str,
allowed_types: Sequence[str],
description: str,
) -> None:
type_constraint = defs.OpSchema.TypeConstraintParam(
type_param_str, allowed_types, description
)
self.assertEqual(type_constraint.description, description)
self.assertEqual(type_constraint.allowed_type_strs, list(allowed_types))
self.assertEqual(type_constraint.type_param_str, type_param_str)
class TestAttribute(unittest.TestCase):
def test_init(self):
name = "test_attr"
type_ = defs.OpSchema.AttrType.STRINGS
description = "Test attribute"
attribute = defs.OpSchema.Attribute(name, type_, description)
self.assertEqual(attribute.name, name)
self.assertEqual(attribute.type, type_)
self.assertEqual(attribute.description, description)
def test_init_with_default_value(self):
default_value = (
defs.get_schema("BatchNormalization").attributes["epsilon"].default_value
)
self.assertIsInstance(default_value, onnx.AttributeProto)
attribute = defs.OpSchema.Attribute("attr1", default_value, "attr1 description")
self.assertEqual(default_value, attribute.default_value)
self.assertEqual("attr1", attribute.name)
self.assertEqual("attr1 description", attribute.description)
@parameterized.parameterized_class(
[
# register to exist domain
{
"op_type": "CustomOp",
"op_version": 5,
"op_domain": "",
"trap_op_version": [1, 2, 6, 7],
},
# register to new domain
{
"op_type": "CustomOp",
"op_version": 5,
"op_domain": "test",
"trap_op_version": [1, 2, 6, 7],
},
]
)
class TestOpSchemaRegister(unittest.TestCase):
op_type: str
op_version: int
op_domain: str
# register some fake schema to check behavior
trap_op_version: list[int]
def setUp(self) -> None:
# Ensure the schema is unregistered
self.assertFalse(onnx.defs.has(self.op_type, self.op_domain))
def tearDown(self) -> None:
# Clean up the registered schema
for version in [*self.trap_op_version, self.op_version]:
with contextlib.suppress(onnx.defs.SchemaError):
onnx.defs.deregister_schema(self.op_type, version, self.op_domain)
def test_register_multi_schema(self):
for version in [*self.trap_op_version, self.op_version]:
op_schema = defs.OpSchema(
self.op_type,
self.op_domain,
version,
)
onnx.defs.register_schema(op_schema)
self.assertTrue(onnx.defs.has(self.op_type, version, self.op_domain))
for version in [*self.trap_op_version, self.op_version]:
# Also make sure the `op_schema` is accessible after register
registered_op = onnx.defs.get_schema(
op_schema.name, version, op_schema.domain
)
op_schema = defs.OpSchema(
self.op_type,
self.op_domain,
version,
)
self.assertEqual(str(registered_op), str(op_schema))
def test_using_the_specified_version_in_onnx_check(self):
input = f"""
<
ir_version: 7,
opset_import: [
"{self.op_domain}" : {self.op_version}
]
>
agraph (float[N, 128] X, int32 Y) => (float[N] Z)
{{
Z = {self.op_domain}.{self.op_type}<attr1=[1,2]>(X, Y)
}}
"""
model = onnx.parser.parse_model(input)
op_schema = defs.OpSchema(
self.op_type,
self.op_domain,
self.op_version,
inputs=[
defs.OpSchema.FormalParameter("input1", "T"),
defs.OpSchema.FormalParameter("input2", "int32"),
],
outputs=[
defs.OpSchema.FormalParameter("output1", "T"),
],
type_constraints=[("T", ["tensor(float)"], "")],
attributes=[
defs.OpSchema.Attribute(
"attr1", defs.OpSchema.AttrType.INTS, "attr1 description"
)
],
)
with self.assertRaises(onnx.checker.ValidationError):
onnx.checker.check_model(model, check_custom_domain=True)
onnx.defs.register_schema(op_schema)
# The fake schema will raise check exception if selected in checker
for version in self.trap_op_version:
onnx.defs.register_schema(
defs.OpSchema(
self.op_type,
self.op_domain,
version,
outputs=[
defs.OpSchema.FormalParameter("output1", "int32"),
],
)
)
onnx.checker.check_model(model, check_custom_domain=True)
def test_register_schema_raises_error_when_registering_a_schema_twice(self):
op_schema = defs.OpSchema(
self.op_type,
self.op_domain,
self.op_version,
)
onnx.defs.register_schema(op_schema)
with self.assertRaises(onnx.defs.SchemaError):
onnx.defs.register_schema(op_schema)
def test_deregister_the_specified_schema(self):
for version in [*self.trap_op_version, self.op_version]:
op_schema = defs.OpSchema(
self.op_type,
self.op_domain,
version,
)
onnx.defs.register_schema(op_schema)
self.assertTrue(onnx.defs.has(op_schema.name, version, op_schema.domain))
onnx.defs.deregister_schema(op_schema.name, self.op_version, op_schema.domain)
for version in self.trap_op_version:
self.assertTrue(onnx.defs.has(op_schema.name, version, op_schema.domain))
# Maybe has lesser op version in trap list
if onnx.defs.has(op_schema.name, self.op_version, op_schema.domain):
schema = onnx.defs.get_schema(
op_schema.name, self.op_version, op_schema.domain
)
self.assertLess(schema.since_version, self.op_version)
def test_deregister_schema_raises_error_when_opschema_does_not_exist(self):
with self.assertRaises(onnx.defs.SchemaError):
onnx.defs.deregister_schema(self.op_type, self.op_version, self.op_domain)
def test_legacy_schema_accessible_after_deregister(self):
op_schema = defs.OpSchema(
self.op_type,
self.op_domain,
self.op_version,
)
onnx.defs.register_schema(op_schema)
schema_a = onnx.defs.get_schema(
op_schema.name, op_schema.since_version, op_schema.domain
)
schema_b = onnx.defs.get_schema(op_schema.name, op_schema.domain)
def filter_schema(schemas):
return [op for op in schemas if op.name == op_schema.name]
schema_c = filter_schema(onnx.defs.get_all_schemas())
schema_d = filter_schema(onnx.defs.get_all_schemas_with_history())
self.assertEqual(len(schema_c), 1)
self.assertEqual(len(schema_d), 1)
# Avoid memory residue and access storage as much as possible
self.assertEqual(str(schema_a), str(op_schema))
self.assertEqual(str(schema_b), str(op_schema))
self.assertEqual(str(schema_c[0]), str(op_schema))
self.assertEqual(str(schema_d[0]), str(op_schema))
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,97 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import os
import tempfile
import unittest
import onnx
_TEST_MODEL = """\
<
ir_version: 8,
opset_import: ["" : 17, "local" : 1]
>
agraph (float[N] X) => (float[N] Y) {
Y = local.foo (X)
}
<opset_import: ["" : 17, "local" : 1], domain: "local">
foo (x) => (y) {
temp = Add(x, x)
y = local.bar(temp)
}
<opset_import: ["" : 17], domain: "local">
bar (x) => (y) {
y = Mul (x, x)
}"""
class _OnnxTestTextualSerializer(onnx.serialization.ProtoSerializer):
"""Serialize and deserialize the ONNX textual representation."""
supported_format = "onnxtext"
file_extensions = frozenset({".onnxtext"})
def serialize_proto(self, proto) -> bytes:
text = onnx.printer.to_text(proto)
return text.encode("utf-8")
def deserialize_proto(self, serialized: bytes, proto):
text = serialized.decode("utf-8")
if isinstance(proto, onnx.ModelProto):
return onnx.parser.parse_model(text)
if isinstance(proto, onnx.GraphProto):
return onnx.parser.parse_graph(text)
if isinstance(proto, onnx.FunctionProto):
return onnx.parser.parse_function(text)
if isinstance(proto, onnx.NodeProto):
return onnx.parser.parse_node(text)
raise ValueError(f"Unsupported proto type: {type(proto)}")
class TestRegistry(unittest.TestCase):
def setUp(self) -> None:
self.serializer = _OnnxTestTextualSerializer()
onnx.serialization.registry.register(self.serializer)
def test_get_returns_the_registered_instance(self) -> None:
serializer = onnx.serialization.registry.get("onnxtext")
self.assertIs(serializer, self.serializer)
def test_get_raises_for_unsupported_format(self) -> None:
with self.assertRaises(ValueError):
onnx.serialization.registry.get("unsupported")
def test_onnx_save_load_model_uses_the_custom_serializer(self) -> None:
model = onnx.parser.parse_model(_TEST_MODEL)
with tempfile.TemporaryDirectory() as tmpdir:
model_path = os.path.join(tmpdir, "model.onnx")
onnx.save_model(model, model_path, format="onnxtext")
# Check the file content
with open(model_path, encoding="utf-8") as f:
content = f.read()
self.assertEqual(content, onnx.printer.to_text(model))
loaded_model = onnx.load_model(model_path, format="onnxtext")
self.assertEqual(
model.SerializeToString(deterministic=True),
loaded_model.SerializeToString(deterministic=True),
)
class TestCustomSerializer(unittest.TestCase):
def test_serialize_deserialize_model(self) -> None:
serializer = _OnnxTestTextualSerializer()
model = onnx.parser.parse_model(_TEST_MODEL)
serialized = serializer.serialize_proto(model)
deserialized = serializer.deserialize_proto(serialized, onnx.ModelProto())
self.assertEqual(
model.SerializeToString(deterministic=True),
deserialized.SerializeToString(deterministic=True),
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,199 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
import onnx.shape_inference
from onnx import ModelProto, TensorProto, TensorShapeProto, ValueInfoProto, helper
from onnx.helper import make_model, make_tensor_value_info
class TestSymbolicShape(unittest.TestCase):
def _assert_valueinfo_shape(
self, onnx_model: ModelProto, value_infos: list[ValueInfoProto]
) -> None:
"""Assert onnx_model.value_info should be the same as expected value_infos
Instead of exact symbol, use -1 to represent symbolic shape in expected value_infos
"""
for expected_vi in value_infos:
shape = self._get_shape_from_name(onnx_model, expected_vi.name)
assert shape is not None, f"{onnx_model}"
if expected_vi.type.HasField("tensor_type"):
expected_shape = expected_vi.type.tensor_type.shape
elif expected_vi.type.HasField("sparse_tensor_type"):
expected_shape = expected_vi.type.sparse_tensor_type.shape
assert len(shape.dim) == len(expected_shape.dim), f"{onnx_model}"
for dim_i, dim in enumerate(shape.dim):
expected_dim = expected_shape.dim[dim_i]
# -1 means it's a symbolic shape
if expected_dim.dim_value == -1:
# symbolic dimension must exist
assert dim.dim_param, f"{onnx_model}"
else:
assert dim.dim_value == expected_dim.dim_value, f"{onnx_model}"
def _count_unique_dim_param_number(self, onnx_model: ModelProto) -> int:
"""Return the total number of unique symbolic shape"""
symbol_shape_set = set()
inputs = list(onnx_model.graph.input)
outputs = list(onnx_model.graph.output)
valueinfos = list(onnx_model.graph.value_info)
for v in inputs + outputs + valueinfos:
for dim in v.type.tensor_type.shape.dim:
if dim.dim_param:
symbol_shape_set.add(dim.dim_param)
return len(symbol_shape_set)
def _get_shape_from_name(
self, onnx_model: ModelProto, name: str
) -> TensorShapeProto | None:
"""Get shape from tensor_type or sparse_tensor_type according to given name"""
inputs = list(onnx_model.graph.input)
outputs = list(onnx_model.graph.output)
valueinfos = list(onnx_model.graph.value_info)
for v in inputs + outputs + valueinfos:
if v.name == name:
if v.type.HasField("tensor_type"):
return v.type.tensor_type.shape # type: ignore
if v.type.HasField("sparse_tensor_type"):
return v.type.sparse_tensor_type.shape # type: ignore
return None
def test_concat_enable_symbolic(self) -> None:
concat = helper.make_node(
"Concat", inputs=["A", "B"], outputs=["C"], name="Concat", axis=1
)
cast = onnx.helper.make_node(
"Cast", inputs=["C"], outputs=["output"], to=TensorProto.FLOAT
)
graph_def = helper.make_graph(
name="test_graph",
nodes=[concat, cast],
inputs=[
helper.make_tensor_value_info("A", TensorProto.FLOAT, [2, "A"]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [2, 3]),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, [2, None])
],
)
onnx_model = make_model(graph_def)
inferred_model = onnx.shape_inference.infer_shapes(onnx_model, strict_mode=True)
self._assert_valueinfo_shape(
inferred_model, [make_tensor_value_info("C", TensorProto.FLOAT, (2, -1))]
)
# the symbolic shape of C and output should be the same
assert self._get_shape_from_name(
inferred_model, "C"
) == self._get_shape_from_name(inferred_model, "output")
def test_two_symbolic_concat(self) -> None:
concat1 = helper.make_node(
"Concat", inputs=["A", "B"], outputs=["C"], name="Concat", axis=1
)
concat2 = helper.make_node(
"Concat", inputs=["C", "D"], outputs=["E"], name="Concat", axis=1
)
cast = onnx.helper.make_node(
"Cast", inputs=["E"], outputs=["output"], to=TensorProto.FLOAT
)
graph_def = helper.make_graph(
name="test_graph",
nodes=[concat1, concat2, cast],
inputs=[
helper.make_tensor_value_info("A", TensorProto.FLOAT, [2, "A"]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [2, 3]),
helper.make_tensor_value_info("D", TensorProto.FLOAT, [2, "D"]),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, [2, None])
],
)
onnx_model = make_model(graph_def)
inferred_model = onnx.shape_inference.infer_shapes(onnx_model, strict_mode=True)
self._assert_valueinfo_shape(
inferred_model,
[
make_tensor_value_info("C", TensorProto.FLOAT, (2, -1)),
make_tensor_value_info("E", TensorProto.FLOAT, (2, -1)),
],
)
# the symbolic shape of E and output should be the same
assert self._get_shape_from_name(
inferred_model, "E"
) == self._get_shape_from_name(inferred_model, "output")
def test_duplicate_symbolic_shape(self) -> None:
concat1 = helper.make_node(
"Concat", inputs=["A", "B"], outputs=["C"], name="Concat", axis=1
)
concat2 = helper.make_node(
"Concat", inputs=["C", "D"], outputs=["E"], name="Concat", axis=1
)
cast = onnx.helper.make_node(
"Cast", inputs=["E"], outputs=["output"], to=TensorProto.FLOAT
)
graph_def = helper.make_graph(
name="test_graph",
nodes=[concat1, concat2, cast],
inputs=[
helper.make_tensor_value_info("A", TensorProto.FLOAT, [2, "unk__0"]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [2, 3]),
helper.make_tensor_value_info("D", TensorProto.FLOAT, [2, "unk__1"]),
],
outputs=[
helper.make_tensor_value_info(
"output", TensorProto.FLOAT, [2, "unk__0"]
)
],
)
onnx_model = make_model(graph_def)
original_count = self._count_unique_dim_param_number(onnx_model)
inferred_model = onnx.shape_inference.infer_shapes(onnx_model, strict_mode=True)
inferred_count = self._count_unique_dim_param_number(inferred_model)
# to prevent duplicate so the inferred count will be count + 2
# new symbol 'unk__2' and 'unk__3' should be generated
# original: {'unk_0', 'unk__1'}
# inferred: {'unk_0', 'unk__1', 'unk__2', 'unk__3'}
assert inferred_count == original_count + 2, f"{inferred_model}{onnx_model}"
def test_unknown_shape(self) -> None:
concat = helper.make_node(
"Concat", inputs=["A", "B"], outputs=["C"], name="Concat", axis=1
)
cast = onnx.helper.make_node(
"Cast", inputs=["C"], outputs=["output"], to=TensorProto.FLOAT
)
graph_def = helper.make_graph(
name="test_graph",
nodes=[concat, cast],
inputs=[
helper.make_tensor_value_info(
"A", TensorProto.FLOAT, [3, None]
), # unknown shape
helper.make_tensor_value_info("B", TensorProto.FLOAT, [3, None]),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, [3, None])
],
)
onnx_model = make_model(graph_def)
inferred_model = onnx.shape_inference.infer_shapes(onnx_model, strict_mode=True)
self._assert_valueinfo_shape(
inferred_model, [make_tensor_value_info("C", TensorProto.FLOAT, (3, -1))]
)
# the symbolic shape of C and output should be the same
# ('unk__0', 'unk__1')
assert self._get_shape_from_name(
inferred_model, "C"
) == self._get_shape_from_name(inferred_model, "output")
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,599 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import platform
import unittest
from typing import Any
import numpy
from packaging.version import Version
import onnx.backend.base
import onnx.backend.test
import onnx.shape_inference
import onnx.version_converter
from onnx.backend.base import Device, DeviceType
try:
import onnxruntime as ort
ort_version = Version(ort.__version__)
except ImportError:
# onnxruntime is not installed, all tests are skipped.
ort: Any = None # type: ignore[no-redef]
ort_version: Any = None # type: ignore[no-redef]
# The following just executes a backend based on InferenceSession through the backend test
class InferenceSessionBackendRep(onnx.backend.base.BackendRep):
def __init__(self, session):
self._session = session
def run(self, inputs, **kwargs):
del kwargs # Unused
if isinstance(inputs, numpy.ndarray):
inputs = [inputs]
if isinstance(inputs, list):
input_names = [i.name for i in self._session.get_inputs()]
input_shapes = [i.shape for i in self._session.get_inputs()]
if len(inputs) == len(input_names):
feeds = dict(zip(input_names, inputs))
else:
feeds = {}
pos_inputs = 0
for inp, shape in zip(input_names, input_shapes):
if shape == inputs[pos_inputs].shape:
feeds[inp] = inputs[pos_inputs]
pos_inputs += 1
if pos_inputs >= len(inputs):
break
elif isinstance(inputs, dict):
feeds = inputs
else:
raise TypeError(f"Unexpected input type {type(inputs)!r}.")
outs = self._session.run(None, feeds)
return outs
def _create_inference_session(model: onnx.ModelProto, device: str):
if device == "CPU":
providers = ("CPUExecutionProvider",)
elif device == "CUDA":
providers = ("CUDAExecutionProvider",)
else:
raise ValueError(f"Unexpected device {device!r}.")
try:
session = ort.InferenceSession(model.SerializeToString(), providers=providers)
except Exception as e:
raise RuntimeError(
f"Unable to create inference session. Model is:\n\n{onnx.printer.to_text(model)}"
) from e
return session
class InferenceSessionBackend(onnx.backend.base.Backend):
@classmethod
def supports_device(cls, device: str) -> bool:
providers = set(ort.get_available_providers())
d = Device(device)
if d.type == DeviceType.CPU and "CPUExecutionProvider" in providers:
return True
if d.type == DeviceType.CUDA and "CUDAExecutionProvider" in providers:
return True
return False
@classmethod
def prepare(
cls, model: onnx.ModelProto, device: str = "CPU", **kwargs: Any
) -> InferenceSessionBackendRep:
del kwargs # Unused
if not isinstance(model, (str, bytes, onnx.ModelProto)):
raise TypeError(f"Unexpected type {type(model)} for model.")
session = _create_inference_session(model, device)
return InferenceSessionBackendRep(session)
@classmethod
def run_model(cls, model: onnx.ModelProto, inputs, device=None, **kwargs):
return super().run_model(model, inputs, device=device, **kwargs)
@classmethod
def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs):
raise NotImplementedError("Unable to run the model node by node.")
if ort is not None:
backend_test = onnx.backend.test.BackendTest(InferenceSessionBackend, __name__)
if platform.architecture()[0] == "32bit":
backend_test.exclude("(test_vgg19|test_zfnet|test_bvlc_alexnet)")
if platform.system() == "Windows":
backend_test.exclude("test_sequence_model")
# The following tests cannot pass because they consists in generating random number.
backend_test.exclude("(test_bernoulli)")
# The following tests are not supported by onnxruntime.
backend_test.exclude(
"("
"test_adagrad"
"|test_adam"
"|test_add_uint8"
"|bitshift_left_uint16"
"|bitshift_right_uint16"
"|cast_BFLOAT16_to_FLOAT"
"|cast_FLOAT_to_BFLOAT16"
"|castlike_BFLOAT16_to_FLOAT"
"|castlike_FLOAT_to_BFLOAT16"
"|clip_default_int8_min_expanded"
"|clip_default_int8_max_expanded"
"|div_uint8"
"|gru_batchwise" # Batchwise recurrent operations (layout == 1) are not supported.
"|loop16_seq_none" # The graph is missing type information needed to construct the ORT tensor.
"|lstm_batchwise" # Batchwise recurrent operations (layout == 1) are not supported.
"|m(in|ax)_u?int(16|8)"
"|momentum"
"|mul_uint8"
"|pow_types_float32_uint32"
"|pow_types_float32_uint64"
"|simple_rnn_batchwise" # Batchwise recurrent operations (layout == 1) are not supported.
"|sub_uint8"
"|gradient_of_add"
"|test_batchnorm_epsilon_training_mode" # Training mode does not support BN opset 14 (or higher) yet.
"|test_batchnorm_example_training_mode" # Training mode does not support BN opset 14 (or higher) yet.
"|_to_FLOAT8E4M3FN" # No corresponding Numpy type for Tensor Type.
"|_to_FLOAT8E5M2" # No corresponding Numpy type for Tensor Type.
"|cast_FLOAT8E" # No corresponding Numpy type for Tensor Type.
"|castlike_FLOAT8E" # No corresponding Numpy type for Tensor Type.
"|test_dequantizelinear_axis" # y_scale must be a scalar or 1D tensor of size 1.
"|test_dequantizelinear" # No corresponding Numpy type for Tensor Type.
"|test_quantizelinear_axis" # y_scale must be a scalar or 1D tensor of size 1.
"|test_quantizelinear" # No corresponding Numpy type for Tensor Type.
"|test_affine_grid_" # new IR version 9 and opset version 20 not supported yet.
"|test_quantizelinear_uint4" # No corresponding Numpy type for Tensor Type.
"|test_quantizelinear_int4" # No corresponding Numpy type for Tensor Type.
"|test_dequantizelinear_uint4" # No corresponding Numpy type for Tensor Type.
"|test_dequantizelinear_int4" # No corresponding Numpy type for Tensor Type.
"|test_cast_UINT4_to_FLOAT" # No corresponding Numpy type for Tensor Type.
"|test_cast_INT4_to_FLOAT" # No corresponding Numpy type for Tensor Type.
"|test_cast_UINT4_to_FLOAT16" # No corresponding Numpy type for Tensor Type.
"|test_cast_INT4_to_FLOAT16" # No corresponding Numpy type for Tensor Type.
"|test_maxpool_2d_ceil_output_size_reduce_by_one" # TODO: remove after https://github.com/microsoft/onnxruntime/pull/18377 in Ort release.
")"
)
# Exclude all tests that require IR10 until onnxruntime aligns
# TODO: Unwaive tests once onnxruntime supports Opset21/IR10 https://github.com/onnx/onnx/issues/5840
backend_test.exclude(
"("
"test_cast_"
"|test_castlike_"
"|test_constant"
"|test_edge_pad_cpu"
"|test_flatten_"
"|test_identity"
"|test_reflect_pad"
"|test_reshape_"
"|test_shape_"
"|test_size_"
"|test_squeeze_"
"|test_transpose_"
"|test_unsqueeze_"
"|test_wrap_pad_"
"|test_acos_cpu"
"|test_acos_example_cpu"
"|test_acosh_cpu"
"|test_acosh_example_cpu"
"|test_asin_cpu"
"|test_asin_example_cpu"
"|test_asinh_cpu"
"|test_asinh_example_cpu"
"|test_atan_cpu"
"|test_atan_example_cpu"
"|test_atanh_cpu"
"|test_atanh_example_cpu"
"|test_averagepool_1d_default_cpu"
"|test_averagepool_2d_ceil_cpu"
"|test_averagepool_2d_default_cpu"
"|test_averagepool_2d_dilations_cpu"
"|test_averagepool_2d_pads_count_include_pad_cpu"
"|test_averagepool_2d_pads_cpu"
"|test_averagepool_2d_precomputed_pads_count_include_pad_cpu"
"|test_averagepool_2d_precomputed_pads_cpu"
"|test_averagepool_2d_precomputed_same_upper_cpu"
"|test_averagepool_2d_precomputed_strides_cpu"
"|test_averagepool_2d_same_lower_cpu"
"|test_averagepool_2d_same_upper_cpu"
"|test_averagepool_2d_strides_cpu"
"|test_averagepool_3d_default_cpu"
"|test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_False_cpu"
"|test_averagepool_3d_dilations_large_count_include_pad_is_0_ceil_mode_is_True_cpu"
"|test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_False_cpu"
"|test_averagepool_3d_dilations_large_count_include_pad_is_1_ceil_mode_is_True_cpu"
"|test_averagepool_3d_dilations_small_cpu"
"|test_basic_conv_with_padding_cpu"
"|test_basic_conv_without_padding_cpu"
"|test_conv_with_autopad_same_cpu"
"|test_conv_with_strides_and_asymmetric_padding_cpu"
"|test_conv_with_strides_no_padding_cpu"
"|test_conv_with_strides_padding_cpu"
"|test_convtranspose_1d_cpu"
"|test_convtranspose_3d_cpu"
"|test_convtranspose_autopad_same_cpu"
"|test_convtranspose_cpu"
"|test_convtranspose_dilations_cpu"
"|test_convtranspose_kernel_shape_cpu"
"|test_convtranspose_output_shape_cpu"
"|test_convtranspose_pad_cpu"
"|test_convtranspose_pads_cpu"
"|test_cos_cpu"
"|test_cos_example_cpu"
"|test_cosh_cpu"
"|test_cosh_example_cpu"
"|test_det_2d_cpu"
"|test_det_nd_cpu"
"|test_dropout_default_cpu"
"|test_dropout_default_mask_cpu"
"|test_dropout_default_mask_ratio_cpu"
"|test_dropout_default_ratio_cpu"
"|test_elu_cpu"
"|test_elu_default_cpu"
"|test_elu_example_cpu"
"|test_eyelike_populate_off_main_diagonal_cpu"
"|test_eyelike_with_dtype_cpu"
"|test_eyelike_without_dtype_cpu"
"|test_globalaveragepool_cpu"
"|test_globalaveragepool_precomputed_cpu"
"|test_gridsample_aligncorners_true_cpu"
"|test_gridsample_bicubic_align_corners_0_additional_1_cpu"
"|test_gridsample_bicubic_align_corners_1_additional_1_cpu"
"|test_gridsample_bicubic_cpu"
"|test_gridsample_bilinear_align_corners_0_additional_1_cpu"
"|test_gridsample_bilinear_align_corners_1_additional_1_cpu"
"|test_gridsample_bilinear_cpu"
"|test_gridsample_border_padding_cpu"
"|test_gridsample_cpu"
"|test_gridsample_nearest_align_corners_0_additional_1_cpu"
"|test_gridsample_nearest_align_corners_1_additional_1_cpu"
"|test_gridsample_nearest_cpu"
"|test_gridsample_reflection_padding_cpu"
"|test_gridsample_volumetric_bilinear_align_corners_0_cpu"
"|test_gridsample_volumetric_bilinear_align_corners_1_cpu"
"|test_gridsample_volumetric_nearest_align_corners_0_cpu"
"|test_gridsample_volumetric_nearest_align_corners_1_cpu"
"|test_gridsample_zeros_padding_cpu"
"|test_gru_defaults_cpu"
"|test_gru_seq_length_cpu"
"|test_gru_with_initial_bias_cpu"
"|test_hardsigmoid_cpu"
"|test_hardsigmoid_default_cpu"
"|test_hardsigmoid_example_cpu"
"|test_hardswish_cpu"
"|test_hardswish_expanded_cpu"
"|test_lppool_1d_default_cpu"
"|test_lppool_2d_default_cpu"
"|test_lppool_2d_dilations_cpu"
"|test_lppool_2d_pads_cpu"
"|test_lppool_2d_same_lower_cpu"
"|test_lppool_2d_same_upper_cpu"
"|test_lppool_2d_strides_cpu"
"|test_lppool_3d_default_cpu"
"|test_lstm_defaults_cpu"
"|test_lstm_with_initial_bias_cpu"
"|test_lstm_with_peepholes_cpu"
"|test_maxpool_1d_default_cpu"
"|test_maxpool_2d_ceil_cpu"
"|test_maxpool_2d_default_cpu"
"|test_maxpool_2d_dilations_cpu"
"|test_maxpool_2d_pads_cpu"
"|test_maxpool_2d_precomputed_pads_cpu"
"|test_maxpool_2d_precomputed_same_upper_cpu"
"|test_maxpool_2d_precomputed_strides_cpu"
"|test_maxpool_2d_same_lower_cpu"
"|test_maxpool_2d_same_upper_cpu"
"|test_maxpool_2d_strides_cpu"
"|test_maxpool_2d_uint8_cpu"
"|test_maxpool_3d_default_cpu"
"|test_maxpool_3d_dilations_cpu"
"|test_maxpool_3d_dilations_use_ref_impl_cpu"
"|test_maxpool_3d_dilations_use_ref_impl_large_cpu"
"|test_maxpool_with_argmax_2d_precomputed_pads_cpu"
"|test_maxpool_with_argmax_2d_precomputed_strides_cpu"
"|test_maxunpool_export_without_output_shape_cpu"
"|test_mish_cpu"
"|test_mish_expanded_cpu"
"|test_nllloss_NC_cpu"
"|test_nllloss_NC_expanded_cpu"
"|test_nllloss_NCd1_cpu"
"|test_nllloss_NCd1_expanded_cpu"
"|test_nllloss_NCd1_ii_cpu"
"|test_nllloss_NCd1_ii_expanded_cpu"
"|test_nllloss_NCd1_mean_weight_negative_ii_cpu"
"|test_nllloss_NCd1_mean_weight_negative_ii_expanded_cpu"
"|test_nllloss_NCd1_weight_cpu"
"|test_nllloss_NCd1_weight_expanded_cpu"
"|test_nllloss_NCd1_weight_ii_cpu"
"|test_nllloss_NCd1_weight_ii_expanded_cpu"
"|test_nllloss_NCd1d2_cpu"
"|test_nllloss_NCd1d2_expanded_cpu"
"|test_nllloss_NCd1d2_no_weight_reduction_mean_ii_cpu"
"|test_nllloss_NCd1d2_no_weight_reduction_mean_ii_expanded_cpu"
"|test_nllloss_NCd1d2_reduction_mean_cpu"
"|test_nllloss_NCd1d2_reduction_mean_expanded_cpu"
"|test_nllloss_NCd1d2_reduction_sum_cpu"
"|test_nllloss_NCd1d2_reduction_sum_expanded_cpu"
"|test_nllloss_NCd1d2_with_weight_cpu"
"|test_nllloss_NCd1d2_with_weight_expanded_cpu"
"|test_nllloss_NCd1d2_with_weight_reduction_mean_cpu"
"|test_nllloss_NCd1d2_with_weight_reduction_mean_expanded_cpu"
"|test_nllloss_NCd1d2_with_weight_reduction_sum_cpu"
"|test_nllloss_NCd1d2_with_weight_reduction_sum_expanded_cpu"
"|test_nllloss_NCd1d2_with_weight_reduction_sum_ii_cpu"
"|test_nllloss_NCd1d2_with_weight_reduction_sum_ii_expanded_cpu"
"|test_nllloss_NCd1d2d3_none_no_weight_negative_ii_cpu"
"|test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded_cpu"
"|test_nllloss_NCd1d2d3_sum_weight_high_ii_cpu"
"|test_nllloss_NCd1d2d3_sum_weight_high_ii_expanded_cpu"
"|test_nllloss_NCd1d2d3d4d5_mean_weight_cpu"
"|test_nllloss_NCd1d2d3d4d5_mean_weight_expanded_cpu"
"|test_nllloss_NCd1d2d3d4d5_none_no_weight_cpu"
"|test_nllloss_NCd1d2d3d4d5_none_no_weight_expanded_cpu"
"|test_rnn_seq_length_cpu"
"|test_roialign_aligned_false_cpu"
"|test_roialign_aligned_true_cpu"
"|test_roialign_mode_max_cpu"
"|test_round_cpu"
"|test_selu_cpu"
"|test_selu_default_cpu"
"|test_selu_example_cpu"
"|test_simple_rnn_defaults_cpu"
"|test_simple_rnn_with_initial_bias_cpu"
"|test_sin_cpu"
"|test_sin_example_cpu"
"|test_sinh_cpu"
"|test_sinh_example_cpu"
"|test_softplus_cpu"
"|test_softplus_example_cpu"
"|test_softsign_cpu"
"|test_softsign_example_cpu"
"|test_tan_cpu"
"|test_tan_example_cpu"
"|test_thresholdedrelu_cpu"
"|test_thresholdedrelu_default_cpu"
"|test_thresholdedrelu_example_cpu"
"|test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu"
"|test_resize_downsample_scales_cubic_antialias_cpu"
"|test_resize_downsample_scales_cubic_cpu"
"|test_resize_downsample_scales_linear_antialias_cpu"
"|test_resize_downsample_scales_linear_cpu"
"|test_resize_downsample_scales_linear_half_pixel_symmetric_cpu"
"|test_resize_downsample_scales_nearest_cpu"
"|test_resize_downsample_sizes_cubic_antialias_cpu"
"|test_resize_downsample_sizes_cubic_cpu"
"|test_resize_downsample_sizes_linear_antialias_cpu"
"|test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu"
"|test_resize_downsample_sizes_nearest_cpu"
"|test_resize_downsample_sizes_nearest_not_larger_cpu"
"|test_resize_downsample_sizes_nearest_not_smaller_cpu"
"|test_resize_tf_crop_and_resize_axes_2_3_cpu"
"|test_resize_tf_crop_and_resize_axes_3_2_cpu"
"|test_resize_tf_crop_and_resize_cpu"
"|test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu"
"|test_resize_upsample_scales_cubic_align_corners_cpu"
"|test_resize_upsample_scales_cubic_asymmetric_cpu"
"|test_resize_upsample_scales_cubic_cpu"
"|test_resize_upsample_scales_linear_align_corners_cpu"
"|test_resize_upsample_scales_linear_cpu"
"|test_resize_upsample_scales_linear_half_pixel_symmetric_cpu"
"|test_resize_upsample_scales_nearest_axes_2_3_cpu"
"|test_resize_upsample_scales_nearest_axes_3_2_cpu"
"|test_resize_upsample_scales_nearest_cpu"
"|test_resize_upsample_sizes_cubic_cpu"
"|test_resize_upsample_sizes_nearest_axes_2_3_cpu"
"|test_resize_upsample_sizes_nearest_axes_3_2_cpu"
"|test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu"
"|test_resize_upsample_sizes_nearest_cpu"
"|test_resize_upsample_sizes_nearest_floor_align_corners_cpu"
"|test_resize_upsample_sizes_nearest_not_larger_cpu"
"|test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu"
"|test_qlinearmatmul_2D_uint8_float32_cuda"
"|test_qlinearmatmul_2D_int8_float32_cpu"
"|test_image_decoder_decode_jpeg_rgb_cpu"
"|test_basic_deform_conv_without_padding_cuda"
"|test_qlinearmatmul_3D_int8_float16_cuda"
"|test_image_decoder_decode_bmp_rgb_cuda"
"|test_qlinearmatmul_2D_uint8_float16_cpu"
"|test_image_decoder_decode_jpeg2k_rgb_cuda"
"|test_image_decoder_decode_jpeg_bgr_cuda"
"|test_qlinearmatmul_3D_uint8_float32_cpu"
"|test_qlinearmatmul_3D_uint8_float16_cuda"
"|test_deform_conv_with_mask_bias_cpu"
"|test_qlinearmatmul_2D_int8_float16_cuda"
"|test_image_decoder_decode_jpeg_grayscale_cpu"
"|test_basic_deform_conv_without_padding_cpu"
"|test_qlinearmatmul_3D_int8_float32_cuda"
"|test_qlinearmatmul_3D_int8_float16_cpu"
"|test_qlinearmatmul_2D_int8_float32_cuda"
"|test_deform_conv_with_mask_bias_cuda"
"|test_image_decoder_decode_tiff_rgb_cuda"
"|test_image_decoder_decode_jpeg2k_rgb_cpu"
"|test_image_decoder_decode_jpeg_rgb_cuda"
"|test_image_decoder_decode_jpeg_grayscale_cuda"
"|test_qlinearmatmul_3D_uint8_float32_cuda"
"|test_image_decoder_decode_png_rgb_cpu"
"|test_image_decoder_decode_png_rgb_cuda"
"|test_image_decoder_decode_bmp_rgb_cpu"
"|test_qlinearmatmul_3D_uint8_float16_cpu"
"|test_deform_conv_with_multiple_offset_groups_cuda"
"|test_image_decoder_decode_webp_rgb_cpu"
"|test_basic_deform_conv_with_padding_cpu"
"|test_qlinearmatmul_2D_uint8_float16_cuda"
"|test_image_decoder_decode_webp_rgb_cuda"
"|test_basic_deform_conv_with_padding_cuda"
"|test_image_decoder_decode_pnm_rgb_cpu"
"|test_qlinearmatmul_3D_int8_float32_cpu"
"|test_image_decoder_decode_jpeg_bgr_cpu"
"|test_qlinearmatmul_2D_int8_float16_cpu"
"|test_image_decoder_decode_pnm_rgb_cuda"
"|test_deform_conv_with_multiple_offset_groups_cpu"
"|test_qlinearmatmul_2D_uint8_float32_cpu"
"|test_image_decoder_decode_tiff_rgb_cpu"
"|test_globalmaxpool_cpu"
"|test_globalmaxpool_precomputed_cpu"
"|test_instancenorm_example_cpu"
"|test_instancenorm_epsilon_cpu"
")"
)
# The following tests fail due to small discrepancies.
backend_test.exclude("(cast_FLOAT_to_STRING|castlike_FLOAT_to_STRING|stft)")
# The following tests fail due to huge discrepancies.
backend_test.exclude(
"("
"resize_downsample_scales_cubic_align_corners"
"|resize_downsample_scales_linear_align_corners"
"|training_dropout"
")"
)
# The followiing tests fail due to a bug in onnxruntime in handling reduction
# ops that perform reduction over an empty set of values.
backend_test.exclude(
"("
"test_reduce_sum_empty_set"
"|test_reduce_prod_empty_set"
"|test_reduce_min_empty_set"
"|test_reduce_max_empty_set"
"|test_reduce_sum_square_empty_set"
"|test_reduce_log_sum_empty_set"
"|test_reduce_log_sum_exp_empty_set"
"|test_reduce_l1_empty_set"
"|test_reduce_l2_empty_set"
")"
)
# The following tests fail for no obvious reason.
backend_test.exclude(
"("
"maxunpool_export_with_output_shape" # not the same expected output
"|softplus_example_expanded" # Could not find an implementation for Exp(1) node with name ''
"|softplus_expanded" # Could not find an implementation for Exp(1) node with name ''
"|AvgPool[1-3]d" # Could not find an implementation for AveragePool(1) node with name ''
"|BatchNorm1d_3d_input_eval" # Could not find an implementation for BatchNormalization(6) node with name ''
"|BatchNorm[2-3]d_eval" # Could not find an implementation for BatchNormalization(6) node with name ''
"|GLU" # Could not find an implementation for Mul(6) node with name ''
"|Linear" # Could not find an implementation for Gemm(6) node with name ''
"|PReLU" # Could not find an implementation for PRelu(6) node with name ''
"|PoissonNLL" # Could not find an implementation for Mul(6) node with name ''
"|Softsign" # Could not find an implementation for Gemm(6) node with name ''
"|operator_add_broadcast" # Could not find an implementation for Gemm(6) node with name ''
"|operator_add_size1" # Could not find an implementation for Gemm(6) node with name ''
"|operator_addconstant" # Could not find an implementation for Gemm(6) node with name ''
"|operator_addmm" # Could not find an implementation for Gemm(6) node with name ''
"|operator_basic" # Could not find an implementation for Add(6) node with name ''
"|operator_mm" # Could not find an implementation for Gemm(6) node with name ''
"|operator_non_float_params" # Could not find an implementation for Add(6) node with name ''
"|operator_params" # Could not find an implementation for Add(6) node with name ''
"|operator_pow" # Could not find an implementation for Pow(1) node with name ''
")"
)
# The following tests are new with opset 19 and 20, or ai.onnx.ml 4
if ort_version is not None and ort_version < Version("1.16"):
backend_test.exclude(
"("
"averagepool"
"|_pad_"
"|_resize_"
"|_size_"
"|cast"
"|castlike"
"|equal_string_broadcast"
"|equal_string"
"|equal"
"|half_pixel_symmetric"
"|identity"
"|reshape"
")"
)
if ort_version is not None and ort_version < Version("1.17"):
backend_test.exclude(
"("
"deform_conv"
"|dequantizelinear_uint16"
"|dequantizelinear_int16"
"|quantizelinear_uint16"
"|quantizelinear_int16"
"|dft"
"|gelu"
"|gridsample"
"|group_normalization"
"|identity_opt"
"|image_decoder"
"|isinf_float16"
"|label_encoder"
"|optional_get_element_optional_sequence"
"|qlinearmatmul_2D_int8"
"|qlinearmatmul_2D_uint8_float16"
"|qlinearmatmul_3D_int8"
"|qlinearmatmul_3D_uint8_float16"
"|reduce_max_bool_inputs"
"|reduce_min_bool_inputs"
"|regex_full_match"
"|string_concat"
"|string_split"
"|constantofshape_float_ones"
"|constantofshape_int_shape_zero"
"|constantofshape_int_zeros"
"|isinf"
"|isinf_negative"
"|isinf_positive"
"|isnan"
"|isnan_float16"
"|qlinearmatmul_2D_uint8_float32"
"|qlinearmatmul_3D_uint8_float32"
")"
)
if ort_version is not None and ort_version < Version("1.18"):
# when adding new tests to the list, please add a comment with the reason for exclusion
# for tests that "not supported by onnxruntime 1.17", it will be solved in the next
# onnxruntime release with ONNX 1.16.0 integrated. The work is covered in ONNX integration procedure.
backend_test.exclude(
"("
"deform_conv" # deform_conv is not supported in onnxruntime
"|group_normalization" # new/updated test cases with opset and/or IR version not supported by onnxruntime 1.17
"|identity_opt" # fixed in ort 1.18 (https://github.com/microsoft/onnxruntime/pull/19273)
"|image_decoder" # image_decoder is not supported in onnxruntime
"|optional_get_element_optional_sequence" # fixed in ort 1.18 (https://github.com/microsoft/onnxruntime/pull/19273)
"|qlinearmatmul_2D_int8" # new/updated test cases with opset and/or IR version not supported by onnxruntime 1.17
"|qlinearmatmul_2D_uint8_float16" # new/updated test cases with opset and/or IR version not supported by onnxruntime 1.17
"|qlinearmatmul_3D_int8" # new/updated test cases with opset and/or IR version not supported by onnxruntime 1.17
"|qlinearmatmul_3D_uint8_float16" # new/updated test cases with opset and/or IR version not supported by onnxruntime 1.17
"|qlinearmatmul_2D_uint8_float32" # new/updated test cases with opset and/or IR version not supported by onnxruntime 1.17
"|qlinearmatmul_3D_uint8_float32" # new/updated test cases with opset and/or IR version not supported by onnxruntime 1.17
"|tree_ensemble" # tree_ensemble not yet implemented in ort
")"
)
if ort_version is not None and ort_version < Version("1.20"):
backend_test.exclude(
"("
"tree_ensemble_set_membership"
"|tree_ensemble_single_tree"
"|convtranspose_group_2"
"|dft"
")"
)
# Import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test.test_cases)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,243 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import os
import platform
import sys
import unittest
from typing import Any
import numpy
import version_utils
import onnx.backend.base
import onnx.backend.test
import onnx.shape_inference
import onnx.version_converter
from onnx import ModelProto
from onnx.backend.base import Device, DeviceType
from onnx.reference import ReferenceEvaluator
# The following just executes a backend based on ReferenceEvaluator through the backend test
class ReferenceEvaluatorBackendRep(onnx.backend.base.BackendRep):
def __init__(self, session):
self._session = session
def run(self, inputs, **kwargs): # noqa: ARG002
if isinstance(inputs, numpy.ndarray):
inputs = [inputs]
if isinstance(inputs, list):
if len(inputs) == len(self._session.input_names):
feeds = dict(zip(self._session.input_names, inputs))
else:
feeds = {}
pos_inputs = 0
for inp, tshape in zip(
self._session.input_names, self._session.input_types
):
shape = tuple(d.dim_value for d in tshape.tensor_type.shape.dim)
if shape == inputs[pos_inputs].shape:
feeds[inp] = inputs[pos_inputs]
pos_inputs += 1
if pos_inputs >= len(inputs):
break
elif isinstance(inputs, dict):
feeds = inputs
else:
raise TypeError(f"Unexpected input type {type(inputs)!r}.")
outs = self._session.run(None, feeds)
return outs
class ReferenceEvaluatorBackend(onnx.backend.base.Backend):
@classmethod
def is_opset_supported(cls, model): # noqa: ARG003
return True, ""
@classmethod
def supports_device(cls, device: str) -> bool:
d = Device(device)
return d.type == DeviceType.CPU # type: ignore[no-any-return]
@classmethod
def create_inference_session(cls, model):
return ReferenceEvaluator(model)
@classmethod
def prepare(
cls, model: Any, device: str = "CPU", **kwargs: Any
) -> ReferenceEvaluatorBackendRep:
# if isinstance(model, ReferenceEvaluatorBackendRep):
# return model
if isinstance(model, ReferenceEvaluator):
return ReferenceEvaluatorBackendRep(model)
if isinstance(model, (str, bytes, ModelProto)):
inf = cls.create_inference_session(model)
return cls.prepare(inf, device, **kwargs)
raise TypeError(f"Unexpected type {type(model)} for model.")
@classmethod
def run_model(cls, model, inputs, device=None, **kwargs):
rep = cls.prepare(model, device, **kwargs)
return rep.run(inputs, **kwargs)
@classmethod
def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs):
raise NotImplementedError("Unable to run the model node by node.")
dft_atol = 1e-3 if sys.platform != "linux" else 1e-6
backend_test = onnx.backend.test.BackendTest(
ReferenceEvaluatorBackend,
__name__,
test_kwargs={
"test_dft": {"atol": dft_atol},
"test_dft_axis": {"atol": dft_atol},
"test_dft_axis_opset19": {"atol": dft_atol},
"test_dft_inverse": {"atol": dft_atol},
"test_dft_inverse_opset19": {"atol": dft_atol},
"test_dft_opset19": {"atol": dft_atol},
},
)
if os.getenv("APPVEYOR"):
backend_test.exclude("(test_vgg19|test_zfnet)")
if platform.architecture()[0] == "32bit":
backend_test.exclude("(test_vgg19|test_zfnet|test_bvlc_alexnet)")
if platform.system() == "Windows":
backend_test.exclude("test_sequence_model")
# The following tests are not supported.
backend_test.exclude(
"(test_gradient"
"|test_if_opt"
"|test_loop16_seq_none"
"|test_range_float_type_positive_delta_expanded"
"|test_range_int32_type_negative_delta_expanded"
"|test_scan_sum)"
)
# The following tests are about deprecated operators.
backend_test.exclude("(test_scatter_with_axis|test_scatter_without)")
# The following tests are using types not supported by numpy.
# They could be if method to_array is extended to support custom
# types the same as the reference implementation does
# (see onnx.reference.op_run.to_array_extended).
backend_test.exclude(
"(test_cast_FLOAT_to_FLOAT8"
"|test_cast_FLOAT16_to_FLOAT8"
"|test_castlike_FLOAT_to_FLOAT8"
"|test_castlike_FLOAT16_to_FLOAT8"
"|test_cast_FLOAT_to_UINT4"
"|test_cast_FLOAT16_to_UINT4"
"|test_cast_FLOAT_to_INT4"
"|test_cast_FLOAT16_to_INT4"
"|test_cast_no_saturate_FLOAT_to_FLOAT8"
"|test_cast_no_saturate_FLOAT16_to_FLOAT8"
"|test_cast_BFLOAT16_to_FLOAT"
"|test_castlike_BFLOAT16_to_FLOAT"
"|test_quantizelinear_e4m3"
"|test_quantizelinear_e5m2"
"|test_quantizelinear_uint4"
"|test_quantizelinear_int4"
")"
)
# The following tests are using types not supported by NumPy.
# They could be if method to_array is extended to support custom
# types the same as the reference implementation does
# (see onnx.reference.op_run.to_array_extended).
backend_test.exclude(
"(test_cast_FLOAT_to_BFLOAT16"
"|test_castlike_FLOAT_to_BFLOAT16"
"|test_castlike_FLOAT_to_BFLOAT16_expanded"
")"
)
# The following tests are too slow with the reference implementation (Conv).
backend_test.exclude(
"(test_bvlc_alexnet"
"|test_densenet121"
"|test_inception_v1"
"|test_inception_v2"
"|test_resnet50"
"|test_shufflenet"
"|test_squeezenet"
"|test_vgg19"
"|test_zfnet512)"
)
# The following tests cannot pass because they consists in generating random number.
backend_test.exclude("(test_bernoulli)")
# The following tests fail due to a bug in the backend test comparison.
backend_test.exclude(
"(test_cast_FLOAT_to_STRING|test_castlike_FLOAT_to_STRING|test_strnorm)"
)
# The following tests fail due to a shape mismatch.
backend_test.exclude(
"(test_center_crop_pad_crop_axes_hwc_expanded"
"|test_lppool_2d_dilations"
"|test_averagepool_2d_dilations)"
)
# The following tests fail due to a type mismatch.
backend_test.exclude("(test_eyelike_without_dtype)")
# The following tests fail due to discrepancies (small but still higher than 1e-7).
backend_test.exclude("test_adam_multiple") # 1e-2
# Currently google-re2/Pillow is not supported on Win32 and is required for the reference implementation of RegexFullMatch.
if sys.platform == "win32":
backend_test.exclude("test_regex_full_match_basic_cpu")
backend_test.exclude("test_regex_full_match_email_domain_cpu")
backend_test.exclude("test_regex_full_match_empty_cpu")
backend_test.exclude("test_image_decoder_decode_")
if sys.version_info <= (3, 10):
# AttributeError: module 'numpy.typing' has no attribute 'NDArray'
backend_test.exclude("test_image_decoder_decode_")
if sys.platform == "darwin":
# FIXME: https://github.com/onnx/onnx/issues/5792
backend_test.exclude("test_qlinearmatmul_3D_int8_float16_cpu")
backend_test.exclude("test_qlinearmatmul_3D_int8_float32_cpu")
# op_dft and op_stft requires numpy >= 1.21.5
if version_utils.numpy_older_than("1.21.5"):
backend_test.exclude("test_stft")
backend_test.exclude("test_stft_with_window")
backend_test.exclude("test_stft_cpu")
backend_test.exclude("test_dft")
backend_test.exclude("test_dft_axis")
backend_test.exclude("test_dft_inverse")
backend_test.exclude("test_dft_opset19")
backend_test.exclude("test_dft_axis_opset19")
backend_test.exclude("test_dft_inverse_opset19")
if version_utils.pillow_older_than("10.0"):
backend_test.exclude("test_image_decoder_decode_webp_rgb")
backend_test.exclude("test_image_decoder_decode_jpeg2k_rgb")
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test.test_cases)
if __name__ == "__main__":
res = unittest.main(verbosity=2, exit=False)
tests_run = res.result.testsRun
errors = len(res.result.errors)
skipped = len(res.result.skipped)
unexpected_successes = len(res.result.unexpectedSuccesses)
expected_failures = len(res.result.expectedFailures)
print("---------------------------------")
print(
f"tests_run={tests_run} errors={errors} skipped={skipped} "
f"unexpected_successes={unexpected_successes} "
f"expected_failures={expected_failures}"
)

View File

@ -0,0 +1,129 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import itertools
import os
import platform
import unittest
from typing import Any, Sequence
import numpy
import onnx.backend.base
import onnx.backend.test
import onnx.shape_inference
import onnx.version_converter
from onnx import ModelProto, NodeProto, TensorProto
from onnx.backend.base import Device, DeviceType
from onnx.backend.test.runner import BackendIsNotSupposedToImplementIt
# The following just executes the fake backend through the backend test
# infrastructure. Since we don't have full reference implementation of all ops
# in ONNX repo, it's impossible to produce the proper results. However, we can
# run 'checker' (that's what base Backend class does) to verify that all tests
# fed are actually well-formed ONNX models.
#
# If everything is fine, all the tests would be marked as "skipped".
#
# We don't enable report in this test because the report collection logic itself
# fails when models are mal-formed.
class DummyBackend(onnx.backend.base.Backend):
@classmethod
def prepare(
cls, model: ModelProto, device: str = "CPU", **kwargs: Any
) -> onnx.backend.base.BackendRep | None:
super().prepare(model, device, **kwargs)
onnx.checker.check_model(model)
# by default test strict shape inference
kwargs = {"check_type": True, "strict_mode": True, **kwargs}
model = onnx.shape_inference.infer_shapes(model, **kwargs)
value_infos = {
vi.name: vi
for vi in itertools.chain(model.graph.value_info, model.graph.output)
}
if do_enforce_test_coverage_safelist(model):
for node in model.graph.node:
for i, output in enumerate(node.output):
if node.op_type == "Dropout" and i != 0:
continue
assert output in value_infos
tt = value_infos[output].type.tensor_type
assert tt.elem_type != TensorProto.UNDEFINED
for dim in tt.shape.dim:
assert dim.WhichOneof("value") == "dim_value"
raise BackendIsNotSupposedToImplementIt(
"This is the dummy backend test that doesn't verify the results but does run the checker"
)
@classmethod
def run_node(
cls,
node: NodeProto,
inputs: Any,
device: str = "CPU",
outputs_info: Sequence[tuple[numpy.dtype, tuple[int, ...]]] | None = None,
**kwargs: Any, # noqa: ARG003
) -> tuple[Any, ...] | None:
super().run_node(node, inputs, device=device, outputs_info=outputs_info)
raise BackendIsNotSupposedToImplementIt(
"This is the dummy backend test that doesn't verify the results but does run the checker"
)
@classmethod
def supports_device(cls, device: str) -> bool:
d = Device(device)
if d.type == DeviceType.CPU:
return True
return False
test_coverage_safelist = {
"bvlc_alexnet",
"densenet121",
"inception_v1",
"inception_v2",
"resnet50",
"shufflenet",
"SingleRelu",
"squeezenet_old",
"vgg19",
"zfnet",
}
def do_enforce_test_coverage_safelist(model: ModelProto) -> bool:
if model.graph.name not in test_coverage_safelist:
return False
return all(node.op_type not in {"RNN", "LSTM", "GRU"} for node in model.graph.node)
test_kwargs = {
# https://github.com/onnx/onnx/issues/5510 (test_mvn fails with test_backend_test.py)
"test_mvn": {"strict_mode": False},
}
backend_test = onnx.backend.test.BackendTest(
DummyBackend, __name__, test_kwargs=test_kwargs
)
if os.getenv("APPVEYOR"):
backend_test.exclude(r"(test_vgg19|test_zfnet)")
if platform.architecture()[0] == "32bit":
backend_test.exclude(r"(test_vgg19|test_zfnet|test_bvlc_alexnet)")
# Needs investigation on onnxruntime.
backend_test.exclude("test_dequantizelinear_e4m3fn_float16")
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test.test_cases)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,887 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import itertools
import os
import pathlib
import tempfile
import unittest
import uuid
from typing import Any, Sequence
import numpy as np
import parameterized
import onnx
from onnx import (
ModelProto,
NodeProto,
TensorProto,
checker,
helper,
parser,
shape_inference,
)
from onnx.external_data_helper import (
convert_model_from_external_data,
convert_model_to_external_data,
load_external_data_for_model,
load_external_data_for_tensor,
set_external_data,
)
from onnx.numpy_helper import from_array, to_array
class TestLoadExternalDataBase(unittest.TestCase):
"""Base class for testing external data related behaviors.
Subclasses should be parameterized with a serialization format.
"""
serialization_format: str = "protobuf"
def setUp(self) -> None:
self._temp_dir_obj = tempfile.TemporaryDirectory()
self.temp_dir: str = self._temp_dir_obj.name
self.initializer_value = np.arange(6).reshape(3, 2).astype(np.float32) + 512
self.attribute_value = np.arange(6).reshape(2, 3).astype(np.float32) + 256
self.model_filename = self.create_test_model()
def tearDown(self) -> None:
self._temp_dir_obj.cleanup()
def get_temp_model_filename(self) -> str:
return os.path.join(self.temp_dir, str(uuid.uuid4()) + ".onnx")
def create_external_data_tensor(
self, value: list[Any], tensor_name: str, location: str = ""
) -> TensorProto:
tensor = from_array(np.array(value))
tensor.name = tensor_name
tensor_filename = location or f"{tensor_name}.bin"
set_external_data(tensor, location=tensor_filename)
with open(os.path.join(self.temp_dir, tensor_filename), "wb") as data_file:
data_file.write(tensor.raw_data)
tensor.ClearField("raw_data")
tensor.data_location = onnx.TensorProto.EXTERNAL
return tensor
def create_test_model(self, location: str = "") -> str:
constant_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["values"],
value=self.create_external_data_tensor(
self.attribute_value, "attribute_value" # type: ignore[arg-type]
),
)
initializers = [
self.create_external_data_tensor(
self.initializer_value, "input_value", location # type: ignore[arg-type]
)
]
inputs = [
helper.make_tensor_value_info(
"input_value", onnx.TensorProto.FLOAT, self.initializer_value.shape
)
]
graph = helper.make_graph(
[constant_node],
"test_graph",
inputs=inputs,
outputs=[],
initializer=initializers,
)
model = helper.make_model(graph)
model_filename = os.path.join(self.temp_dir, "model.onnx")
onnx.save_model(model, model_filename, self.serialization_format)
return model_filename
def test_check_model(self) -> None:
if self.serialization_format != "protobuf":
self.skipTest(
"check_model supports protobuf only as binary when provided as a path"
)
checker.check_model(self.model_filename)
@parameterized.parameterized_class(
[
{"serialization_format": "protobuf"},
{"serialization_format": "textproto"},
]
)
class TestLoadExternalData(TestLoadExternalDataBase):
def test_load_external_data(self) -> None:
model = onnx.load_model(self.model_filename, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = model.graph.node[0].attribute[0].t
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
def test_load_external_data_for_model(self) -> None:
model = onnx.load_model(
self.model_filename, self.serialization_format, load_external_data=False
)
load_external_data_for_model(model, self.temp_dir)
initializer_tensor = model.graph.initializer[0]
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = model.graph.node[0].attribute[0].t
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
def test_save_external_data(self) -> None:
model = onnx.load_model(self.model_filename, self.serialization_format)
temp_dir = os.path.join(self.temp_dir, "save_copy")
os.mkdir(temp_dir)
new_model_filename = os.path.join(temp_dir, "model.onnx")
onnx.save_model(model, new_model_filename, self.serialization_format)
new_model = onnx.load_model(new_model_filename, self.serialization_format)
initializer_tensor = new_model.graph.initializer[0]
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = new_model.graph.node[0].attribute[0].t
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
@parameterized.parameterized_class(
[
{"serialization_format": "protobuf"},
{"serialization_format": "textproto"},
]
)
class TestLoadExternalDataSingleFile(TestLoadExternalDataBase):
def create_external_data_tensors(
self, tensors_data: list[tuple[list[Any], Any]]
) -> list[TensorProto]:
tensor_filename = "tensors.bin"
tensors = []
with open(os.path.join(self.temp_dir, tensor_filename), "ab") as data_file:
for value, tensor_name in tensors_data:
tensor = from_array(np.array(value))
offset = data_file.tell()
if offset % 4096 != 0:
data_file.write(b"\0" * (4096 - offset % 4096))
offset = offset + 4096 - offset % 4096
data_file.write(tensor.raw_data)
set_external_data(
tensor,
location=tensor_filename,
offset=offset,
length=data_file.tell() - offset,
)
tensor.name = tensor_name
tensor.ClearField("raw_data")
tensor.data_location = onnx.TensorProto.EXTERNAL
tensors.append(tensor)
return tensors
def test_load_external_single_file_data(self) -> None:
model = onnx.load_model(self.model_filename, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = model.graph.node[0].attribute[0].t
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
def test_save_external_single_file_data(self) -> None:
model = onnx.load_model(self.model_filename, self.serialization_format)
temp_dir = os.path.join(self.temp_dir, "save_copy")
os.mkdir(temp_dir)
new_model_filename = os.path.join(temp_dir, "model.onnx")
onnx.save_model(model, new_model_filename, self.serialization_format)
new_model = onnx.load_model(new_model_filename, self.serialization_format)
initializer_tensor = new_model.graph.initializer[0]
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = new_model.graph.node[0].attribute[0].t
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
@parameterized.parameterized.expand(itertools.product((True, False), (True, False)))
def test_save_external_invalid_single_file_data_and_check(
self, use_absolute_path: bool, use_model_path: bool
) -> None:
model = onnx.load_model(self.model_filename, self.serialization_format)
model_dir = os.path.join(self.temp_dir, "save_copy")
os.mkdir(model_dir)
traversal_external_data_dir = os.path.join(
self.temp_dir, "invlid_external_data"
)
os.mkdir(traversal_external_data_dir)
if use_absolute_path:
traversal_external_data_location = os.path.join(
traversal_external_data_dir, "tensors.bin"
)
else:
traversal_external_data_location = "../invlid_external_data/tensors.bin"
external_data_dir = os.path.join(self.temp_dir, "external_data")
os.mkdir(external_data_dir)
new_model_filepath = os.path.join(model_dir, "model.onnx")
def convert_model_to_external_data_no_check(model: ModelProto, location: str):
for tensor in model.graph.initializer:
if tensor.HasField("raw_data"):
set_external_data(tensor, location)
convert_model_to_external_data_no_check(
model,
location=traversal_external_data_location,
)
onnx.save_model(model, new_model_filepath, self.serialization_format)
if use_model_path:
with self.assertRaises(onnx.checker.ValidationError):
_ = onnx.load_model(new_model_filepath, self.serialization_format)
else:
onnx_model = onnx.load_model(
new_model_filepath, self.serialization_format, load_external_data=False
)
with self.assertRaises(onnx.checker.ValidationError):
load_external_data_for_model(onnx_model, external_data_dir)
@parameterized.parameterized_class(
[
{"serialization_format": "protobuf"},
{"serialization_format": "textproto"},
]
)
class TestSaveAllTensorsAsExternalData(unittest.TestCase):
serialization_format: str = "protobuf"
def setUp(self) -> None:
self._temp_dir_obj = tempfile.TemporaryDirectory()
self.temp_dir: str = self._temp_dir_obj.name
self.initializer_value = np.arange(6).reshape(3, 2).astype(np.float32) + 512
self.attribute_value = np.arange(6).reshape(2, 3).astype(np.float32) + 256
self.model = self.create_test_model_proto()
def get_temp_model_filename(self):
return os.path.join(self.temp_dir, str(uuid.uuid4()) + ".onnx")
def create_data_tensors(
self, tensors_data: list[tuple[list[Any], Any]]
) -> list[TensorProto]:
tensors = []
for value, tensor_name in tensors_data:
tensor = from_array(np.array(value))
tensor.name = tensor_name
tensors.append(tensor)
return tensors
def create_test_model_proto(self) -> ModelProto:
tensors = self.create_data_tensors(
[
(self.attribute_value, "attribute_value"), # type: ignore[list-item]
(self.initializer_value, "input_value"), # type: ignore[list-item]
]
)
constant_node = onnx.helper.make_node(
"Constant", inputs=[], outputs=["values"], value=tensors[0]
)
inputs = [
helper.make_tensor_value_info(
"input_value", onnx.TensorProto.FLOAT, self.initializer_value.shape
)
]
graph = helper.make_graph(
[constant_node],
"test_graph",
inputs=inputs,
outputs=[],
initializer=[tensors[1]],
)
return helper.make_model(graph)
@unittest.skipIf(
serialization_format != "protobuf",
"check_model supports protobuf only when provided as a path",
)
def test_check_model(self) -> None:
checker.check_model(self.model)
def test_convert_model_to_external_data_with_size_threshold(self) -> None:
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(self.model, size_threshold=1024)
onnx.save_model(self.model, model_file_path, self.serialization_format)
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
self.assertFalse(initializer_tensor.HasField("data_location"))
def test_convert_model_to_external_data_without_size_threshold(self) -> None:
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(self.model, size_threshold=0)
onnx.save_model(self.model, model_file_path, self.serialization_format)
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(initializer_tensor.HasField("data_location"))
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
def test_convert_model_to_external_data_from_one_file_with_location(self) -> None:
model_file_path = self.get_temp_model_filename()
external_data_file = str(uuid.uuid4())
convert_model_to_external_data(
self.model,
size_threshold=0,
all_tensors_to_one_file=True,
location=external_data_file,
)
onnx.save_model(self.model, model_file_path, self.serialization_format)
self.assertTrue(os.path.isfile(os.path.join(self.temp_dir, external_data_file)))
model = onnx.load_model(model_file_path, self.serialization_format)
# test convert model from external data
convert_model_from_external_data(model)
model_file_path = self.get_temp_model_filename()
onnx.save_model(model, model_file_path, self.serialization_format)
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
self.assertFalse(len(initializer_tensor.external_data))
self.assertEqual(initializer_tensor.data_location, TensorProto.DEFAULT)
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertFalse(len(attribute_tensor.external_data))
self.assertEqual(attribute_tensor.data_location, TensorProto.DEFAULT)
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
def test_convert_model_to_external_data_from_one_file_without_location_uses_model_name(
self,
) -> None:
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(
self.model, size_threshold=0, all_tensors_to_one_file=True
)
onnx.save_model(self.model, model_file_path, self.serialization_format)
self.assertTrue(os.path.isfile(model_file_path))
self.assertTrue(os.path.isfile(os.path.join(self.temp_dir, model_file_path)))
def test_convert_model_to_external_data_one_file_per_tensor_without_attribute(
self,
) -> None:
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(
self.model,
size_threshold=0,
all_tensors_to_one_file=False,
convert_attribute=False,
)
onnx.save_model(self.model, model_file_path, self.serialization_format)
self.assertTrue(os.path.isfile(model_file_path))
self.assertTrue(os.path.isfile(os.path.join(self.temp_dir, "input_value")))
self.assertFalse(os.path.isfile(os.path.join(self.temp_dir, "attribute_value")))
def test_convert_model_to_external_data_one_file_per_tensor_with_attribute(
self,
) -> None:
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(
self.model,
size_threshold=0,
all_tensors_to_one_file=False,
convert_attribute=True,
)
onnx.save_model(self.model, model_file_path, self.serialization_format)
self.assertTrue(os.path.isfile(model_file_path))
self.assertTrue(os.path.isfile(os.path.join(self.temp_dir, "input_value")))
self.assertTrue(os.path.isfile(os.path.join(self.temp_dir, "attribute_value")))
def test_convert_model_to_external_data_does_not_convert_attribute_values(
self,
) -> None:
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(
self.model,
size_threshold=0,
convert_attribute=False,
all_tensors_to_one_file=False,
)
onnx.save_model(self.model, model_file_path, self.serialization_format)
self.assertTrue(os.path.isfile(os.path.join(self.temp_dir, "input_value")))
self.assertFalse(os.path.isfile(os.path.join(self.temp_dir, "attribute_value")))
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(initializer_tensor.HasField("data_location"))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertFalse(attribute_tensor.HasField("data_location"))
def test_convert_model_to_external_data_converts_attribute_values(self) -> None:
model_file_path = self.get_temp_model_filename()
convert_model_to_external_data(
self.model, size_threshold=0, convert_attribute=True
)
onnx.save_model(self.model, model_file_path, self.serialization_format)
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
self.assertTrue(initializer_tensor.HasField("data_location"))
attribute_tensor = model.graph.node[0].attribute[0].t
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
self.assertTrue(attribute_tensor.HasField("data_location"))
def test_save_model_does_not_convert_to_external_data_and_saves_the_model(
self,
) -> None:
model_file_path = self.get_temp_model_filename()
onnx.save_model(
self.model,
model_file_path,
self.serialization_format,
save_as_external_data=False,
)
self.assertTrue(os.path.isfile(model_file_path))
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
self.assertFalse(initializer_tensor.HasField("data_location"))
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertFalse(attribute_tensor.HasField("data_location"))
def test_save_model_does_convert_and_saves_the_model(self) -> None:
model_file_path = self.get_temp_model_filename()
onnx.save_model(
self.model,
model_file_path,
self.serialization_format,
save_as_external_data=True,
all_tensors_to_one_file=True,
location=None,
size_threshold=0,
convert_attribute=False,
)
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(initializer_tensor.HasField("data_location"))
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertFalse(attribute_tensor.HasField("data_location"))
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
def test_save_model_without_loading_external_data(self) -> None:
model_file_path = self.get_temp_model_filename()
onnx.save_model(
self.model,
model_file_path,
self.serialization_format,
save_as_external_data=True,
location=None,
size_threshold=0,
convert_attribute=False,
)
# Save without load_external_data
model = onnx.load_model(
model_file_path, self.serialization_format, load_external_data=False
)
onnx.save_model(
model,
model_file_path,
self.serialization_format,
save_as_external_data=True,
location=None,
size_threshold=0,
convert_attribute=False,
)
# Load the saved model again; Only works if the saved path is under the same directory
model = onnx.load_model(model_file_path, self.serialization_format)
initializer_tensor = model.graph.initializer[0]
self.assertTrue(initializer_tensor.HasField("data_location"))
np.testing.assert_allclose(to_array(initializer_tensor), self.initializer_value)
attribute_tensor = model.graph.node[0].attribute[0].t
self.assertFalse(attribute_tensor.HasField("data_location"))
np.testing.assert_allclose(to_array(attribute_tensor), self.attribute_value)
def test_save_model_with_existing_raw_data_should_override(self) -> None:
model_file_path = self.get_temp_model_filename()
original_raw_data = self.model.graph.initializer[0].raw_data
onnx.save_model(
self.model,
model_file_path,
self.serialization_format,
save_as_external_data=True,
size_threshold=0,
)
self.assertTrue(os.path.isfile(model_file_path))
model = onnx.load_model(
model_file_path, self.serialization_format, load_external_data=False
)
initializer_tensor = model.graph.initializer[0]
initializer_tensor.raw_data = b"dummpy_raw_data"
# If raw_data and external tensor exist at the same time, override existing raw_data
load_external_data_for_tensor(initializer_tensor, self.temp_dir)
self.assertEqual(initializer_tensor.raw_data, original_raw_data)
@parameterized.parameterized_class(
[
{"serialization_format": "protobuf"},
{"serialization_format": "textproto"},
]
)
class TestExternalDataToArray(unittest.TestCase):
serialization_format: str = "protobuf"
def setUp(self) -> None:
self._temp_dir_obj = tempfile.TemporaryDirectory()
self.temp_dir: str = self._temp_dir_obj.name
self._model_file_path: str = os.path.join(self.temp_dir, "model.onnx")
self.large_data = np.random.rand(10, 60, 100).astype(np.float32)
self.small_data = (200, 300)
self.model = self.create_test_model()
@property
def model_file_path(self):
return self._model_file_path
def tearDown(self) -> None:
self._temp_dir_obj.cleanup()
def create_test_model(self) -> ModelProto:
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, self.large_data.shape)
input_init = helper.make_tensor(
name="X",
data_type=TensorProto.FLOAT,
dims=self.large_data.shape,
vals=self.large_data.tobytes(),
raw=True,
)
shape_data = np.array(self.small_data, np.int64)
shape_init = helper.make_tensor(
name="Shape",
data_type=TensorProto.INT64,
dims=shape_data.shape,
vals=shape_data.tobytes(),
raw=True,
)
C = helper.make_tensor_value_info("C", TensorProto.INT64, self.small_data)
reshape = onnx.helper.make_node(
"Reshape",
inputs=["X", "Shape"],
outputs=["Y"],
)
cast = onnx.helper.make_node(
"Cast", inputs=["Y"], outputs=["C"], to=TensorProto.INT64
)
graph_def = helper.make_graph(
[reshape, cast],
"test-model",
[X],
[C],
initializer=[input_init, shape_init],
)
model = helper.make_model(graph_def, producer_name="onnx-example")
return model
@unittest.skipIf(
serialization_format != "protobuf",
"check_model supports protobuf only when provided as a path",
)
def test_check_model(self) -> None:
checker.check_model(self.model)
def test_reshape_inference_with_external_data_fail(self) -> None:
onnx.save_model(
self.model,
self.model_file_path,
self.serialization_format,
save_as_external_data=True,
all_tensors_to_one_file=False,
size_threshold=0,
)
model_without_external_data = onnx.load(
self.model_file_path, self.serialization_format, load_external_data=False
)
# Shape inference of Reshape uses ParseData
# ParseData cannot handle external data and should throw the error as follows:
# Cannot parse data from external tensors. Please load external data into raw data for tensor: Shape
self.assertRaises(
shape_inference.InferenceError,
shape_inference.infer_shapes,
model_without_external_data,
strict_mode=True,
)
def test_to_array_with_external_data(self) -> None:
onnx.save_model(
self.model,
self.model_file_path,
self.serialization_format,
save_as_external_data=True,
all_tensors_to_one_file=False,
size_threshold=0,
)
# raw_data of external tensor is not loaded
model = onnx.load(
self.model_file_path, self.serialization_format, load_external_data=False
)
# Specify self.temp_dir to load external tensor
loaded_large_data = to_array(model.graph.initializer[0], self.temp_dir)
np.testing.assert_allclose(loaded_large_data, self.large_data)
def test_save_model_with_external_data_multiple_times(self) -> None:
# Test onnx.save should respectively handle typical tensor and external tensor properly
# 1st save: save two tensors which have raw_data
# Only w_large will be stored as external tensors since it's larger than 1024
onnx.save_model(
self.model,
self.model_file_path,
self.serialization_format,
save_as_external_data=True,
all_tensors_to_one_file=False,
location=None,
size_threshold=1024,
convert_attribute=True,
)
model_without_loading_external = onnx.load(
self.model_file_path, self.serialization_format, load_external_data=False
)
large_input_tensor = model_without_loading_external.graph.initializer[0]
self.assertTrue(large_input_tensor.HasField("data_location"))
np.testing.assert_allclose(
to_array(large_input_tensor, self.temp_dir), self.large_data
)
small_shape_tensor = model_without_loading_external.graph.initializer[1]
self.assertTrue(not small_shape_tensor.HasField("data_location"))
np.testing.assert_allclose(to_array(small_shape_tensor), self.small_data)
# 2nd save: one tensor has raw_data (small); one external tensor (large)
# Save them both as external tensors this time
onnx.save_model(
model_without_loading_external,
self.model_file_path,
self.serialization_format,
save_as_external_data=True,
all_tensors_to_one_file=False,
location=None,
size_threshold=0,
convert_attribute=True,
)
model_without_loading_external = onnx.load(
self.model_file_path, self.serialization_format, load_external_data=False
)
large_input_tensor = model_without_loading_external.graph.initializer[0]
self.assertTrue(large_input_tensor.HasField("data_location"))
np.testing.assert_allclose(
to_array(large_input_tensor, self.temp_dir), self.large_data
)
small_shape_tensor = model_without_loading_external.graph.initializer[1]
self.assertTrue(small_shape_tensor.HasField("data_location"))
np.testing.assert_allclose(
to_array(small_shape_tensor, self.temp_dir), self.small_data
)
class TestNotAllowToLoadExternalDataOutsideModelDirectory(TestLoadExternalDataBase):
"""Essential test to check that onnx (validate) C++ code will not allow to load external_data outside the model
directory.
"""
def create_external_data_tensor(
self, value: list[Any], tensor_name: str, location: str = ""
) -> TensorProto:
tensor = from_array(np.array(value))
tensor.name = tensor_name
tensor_filename = location or f"{tensor_name}.bin"
set_external_data(tensor, location=tensor_filename)
tensor.ClearField("raw_data")
tensor.data_location = onnx.TensorProto.EXTERNAL
return tensor
def test_check_model(self) -> None:
"""We only test the model validation as onnxruntime uses this to load the model."""
self.model_filename = self.create_test_model("../../file.bin")
with self.assertRaises(onnx.checker.ValidationError):
checker.check_model(self.model_filename)
def test_check_model_relative(self) -> None:
"""More relative path test."""
self.model_filename = self.create_test_model("../test/../file.bin")
with self.assertRaises(onnx.checker.ValidationError):
checker.check_model(self.model_filename)
def test_check_model_absolute(self) -> None:
"""ONNX checker disallows using absolute path as location in external tensor."""
self.model_filename = self.create_test_model("//file.bin")
with self.assertRaises(onnx.checker.ValidationError):
checker.check_model(self.model_filename)
@unittest.skipIf(os.name != "nt", reason="Skip Windows test")
class TestNotAllowToLoadExternalDataOutsideModelDirectoryOnWindows(
TestNotAllowToLoadExternalDataOutsideModelDirectory
):
"""Essential test to check that onnx (validate) C++ code will not allow to load external_data outside the model
directory.
"""
def test_check_model(self) -> None:
"""We only test the model validation as onnxruntime uses this to load the model."""
self.model_filename = self.create_test_model("..\\..\\file.bin")
with self.assertRaises(onnx.checker.ValidationError):
checker.check_model(self.model_filename)
def test_check_model_relative(self) -> None:
"""More relative path test."""
self.model_filename = self.create_test_model("..\\test\\..\\file.bin")
with self.assertRaises(onnx.checker.ValidationError):
checker.check_model(self.model_filename)
def test_check_model_absolute(self) -> None:
"""ONNX checker disallows using absolute path as location in external tensor."""
self.model_filename = self.create_test_model("C:/file.bin")
with self.assertRaises(onnx.checker.ValidationError):
checker.check_model(self.model_filename)
class TestSaveAllTensorsAsExternalDataWithPath(TestSaveAllTensorsAsExternalData):
def get_temp_model_filename(self) -> pathlib.Path:
return pathlib.Path(super().get_temp_model_filename())
class TestExternalDataToArrayWithPath(TestExternalDataToArray):
@property
def model_file_path(self) -> pathlib.Path:
return pathlib.Path(self._model_file_path)
class TestFunctionsAndSubGraphs(unittest.TestCase):
def setUp(self) -> None:
self._temp_dir_obj = tempfile.TemporaryDirectory()
temp_dir = self._temp_dir_obj.name
self._model_file_path: str = os.path.join(temp_dir, "model.onnx")
array = np.arange(4096).astype(np.float32)
self._tensor = from_array(array, "tensor")
def tearDown(self) -> None:
self._temp_dir_obj.cleanup()
def _check_is_internal(self, tensor: TensorProto) -> None:
self.assertEqual(tensor.data_location, TensorProto.DEFAULT)
def _check_is_external(self, tensor: TensorProto) -> None:
self.assertEqual(tensor.data_location, TensorProto.EXTERNAL)
def _check(self, model: ModelProto, nodes: Sequence[NodeProto]) -> None:
"""Check that the tensors in the model are externalized.
The tensors in the specified sequence of Constant nodes are set to self._tensor,
an internal tensor. The model is then converted to external data format.
The tensors are then checked to ensure that they are externalized.
Arguments:
model: The model to check.
nodes: A sequence of Constant nodes.
"""
for node in nodes:
self.assertEqual(node.op_type, "Constant")
tensor = node.attribute[0].t
tensor.CopyFrom(self._tensor)
self._check_is_internal(tensor)
convert_model_to_external_data(model, size_threshold=0, convert_attribute=True)
for node in nodes:
tensor = node.attribute[0].t
self._check_is_external(tensor)
def test_function(self) -> None:
model_text = """
<ir_version: 7, opset_import: ["": 15, "local": 1]>
agraph (float[N] X) => (float[N] Y)
{
Y = local.add(X)
}
<opset_import: ["" : 15], domain: "local">
add (float[N] X) => (float[N] Y) {
C = Constant <value = float[1] {1.0}> ()
Y = Add (X, C)
}
"""
model = parser.parse_model(model_text)
self._check(model, [model.functions[0].node[0]])
def test_subgraph(self) -> None:
model_text = """
<ir_version: 7, opset_import: ["": 15, "local": 1]>
agraph (bool flag, float[N] X) => (float[N] Y)
{
Y = if (flag) <
then_branch = g1 () => (float[N] Y_then) {
B = Constant <value = float[1] {0.0}> ()
Y_then = Add (X, C)
},
else_branch = g2 () => (float[N] Y_else) {
C = Constant <value = float[1] {1.0}> ()
Y_else = Add (X, C)
}
>
}
"""
model = parser.parse_model(model_text)
if_node = model.graph.node[0]
constant_nodes = [attr.g.node[0] for attr in if_node.attribute]
self._check(model, constant_nodes)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,51 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
# This file is for testing ONNX with ONNX Runtime
# Create a general scenario to use ONNX Runtime with ONNX
from __future__ import annotations
import unittest
class TestONNXRuntime(unittest.TestCase):
def test_with_ort_example(self) -> None:
try:
import onnxruntime
del onnxruntime
except ImportError:
raise unittest.SkipTest("onnxruntime not installed") from None
from numpy import float32, random
from onnxruntime import InferenceSession
from onnxruntime.datasets import get_example
from onnx import checker, load, shape_inference, version_converter
# get certain example model from ORT using opset 9
example1 = get_example("sigmoid.onnx")
# test ONNX functions
model = load(example1)
checker.check_model(model)
checker.check_model(model, full_check=True)
inferred_model = shape_inference.infer_shapes(
model, check_type=True, strict_mode=True, data_prop=True
)
converted_model = version_converter.convert_version(inferred_model, 10)
# test ONNX Runtime functions
sess = InferenceSession(
converted_model.SerializeToString(), providers=["CPUExecutionProvider"]
)
input_name = sess.get_inputs()[0].name
output_name = sess.get_outputs()[0].name
x = random.random((3, 4, 5))
x = x.astype(float32)
sess.run([output_name], {input_name: x})
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,327 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
import numpy as np
from numpy.testing import assert_allclose
import onnx
from onnx import TensorProto, helper, numpy_helper
from onnx.defs import onnx_opset_version
from onnx.reference import ReferenceEvaluator
from onnx.tools import update_model_dims
from onnx.tools.replace_constants import replace_initializer_by_constant_of_shape
class TestToolsFunctions(unittest.TestCase):
def test_update_inputs_outputs_dim(self) -> None:
node_def = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
strides=[2, 2],
)
graph_def = helper.make_graph(
[node_def],
"test",
[
helper.make_tensor_value_info("x", TensorProto.FLOAT, [1, 1, 5, 5]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [1, 1, 3, 3]),
],
[helper.make_tensor_value_info("y", TensorProto.FLOAT, [1, 1, 2, 2])],
)
model_def = helper.make_model(graph_def, producer_name="test")
updated_def = update_model_dims.update_inputs_outputs_dims(
model_def,
{
"x": [1, 1, "x1", -1],
"W": [1, 1, 3, 3],
},
{
"y": [1, 1, -1, -1],
},
)
onnx.checker.check_model(updated_def)
self.assertEqual(
updated_def.graph.input[0].type.tensor_type.shape.dim[2].dim_param, "x1"
)
self.assertEqual(
updated_def.graph.input[0].type.tensor_type.shape.dim[3].dim_param, "x_3"
)
self.assertEqual(
updated_def.graph.output[0].type.tensor_type.shape.dim[2].dim_param, "y_2"
)
self.assertEqual(
updated_def.graph.output[0].type.tensor_type.shape.dim[3].dim_param, "y_3"
)
def test_replace_initializer(self):
dtype = np.float32
value = np.random.randn(2, 100).astype(dtype)
A = numpy_helper.from_array(value, name="A")
value = np.array([1], dtype=dtype)
C = numpy_helper.from_array(value, name="C")
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node1 = helper.make_node("MatMul", ["X", "A"], ["AX"])
node2 = helper.make_node("Sub", ["AX", "C"], ["Y"])
graph = helper.make_graph([node1, node2], "lr", [X], [Y], [A, C])
model_def = helper.make_model(graph)
x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
oinf1 = ReferenceEvaluator(model_def)
y1 = oinf1.run(None, {"X": x})[0] # type: ignore[index]
repl = replace_initializer_by_constant_of_shape(model_def)
node_types = {n.op_type for n in repl.graph.node}
self.assertIn("ConstantOfShape", node_types)
oinf2 = ReferenceEvaluator(repl)
y1[:, :] = 3.5
y1[0, :] = 0.5
y2 = oinf2.run(None, {"X": x})[0] # type: ignore[index]
assert_allclose(y1, y2)
def test_replace_constant(self):
dtype = np.float32
value = np.random.randn(2, 100).astype(dtype)
A = numpy_helper.from_array(value, name="A")
value = np.array([1], dtype=dtype)
C = numpy_helper.from_array(value, name="C")
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node0 = helper.make_node("Constant", [], ["A"], value=A)
node1 = helper.make_node("MatMul", ["X", "A"], ["AX"])
node2 = helper.make_node("Sub", ["AX", "C"], ["Y"])
graph = helper.make_graph([node0, node1, node2], "lr", [X], [Y], [C])
model_def = helper.make_model(graph)
x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
oinf1 = ReferenceEvaluator(model_def)
y1 = oinf1.run(None, {"X": x})[0] # type: ignore[index]
repl = replace_initializer_by_constant_of_shape(model_def)
node_types = {n.op_type for n in repl.graph.node}
self.assertIn("ConstantOfShape", node_types)
oinf2 = ReferenceEvaluator(repl)
y1[:, :] = 3.5
y1[0, :] = 0.5
y2 = oinf2.run(None, {"X": x})[0] # type: ignore[index]
assert_allclose(y1, y2)
def test_replace_range(self):
dtype = np.float32
value = np.random.randn(2, 100).astype(dtype)
A = numpy_helper.from_array(value, name="A")
value = np.array([1], dtype=dtype)
C = numpy_helper.from_array(value, name="C")
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None])
node0 = helper.make_node("Constant", [], ["A"], value=A)
node1 = helper.make_node("MatMul", ["X", "A"], ["AX"])
node2 = helper.make_node("Sub", ["AX", "C"], ["Y"])
graph = helper.make_graph([node0, node1, node2], "lr", [X], [Y], [C])
model_def = helper.make_model(graph)
x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
oinf1 = ReferenceEvaluator(model_def)
y1 = oinf1.run(None, {"X": x})[0] # type: ignore[index]
repl = replace_initializer_by_constant_of_shape(model_def, use_range=True)
node_types = {n.op_type for n in repl.graph.node}
self.assertIn("Range", node_types)
self.assertNotIn("ConstantOfShape", node_types)
oinf2 = ReferenceEvaluator(repl)
y2 = oinf2.run(None, {"X": x})[0] # type: ignore[index]
assert_allclose(y1.shape, y2.shape)
def test_replace_constant_function(self):
dtype = np.float32
value = np.random.randn(2, 100).astype(dtype)
A = numpy_helper.from_array(value, name="A")
value = np.array([1], dtype=dtype)
C = numpy_helper.from_array(value, name="C")
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None])
nodeC = helper.make_node("Constant", [], ["C"], value=C)
node0 = helper.make_node("Constant", [], ["A"], value=A)
node1 = helper.make_node("MatMul", ["X", "A"], ["AX"])
node2 = helper.make_node("Sub", ["AX", "C"], ["Y"])
opset_imports = [
helper.make_opsetid("", onnx_opset_version()),
helper.make_opsetid("custom", 1),
]
fct = helper.make_function(
"custom",
"unittest",
["X"],
["Y"],
[nodeC, node0, node1, node2],
opset_imports,
)
node = helper.make_node("unittest", ["X"], ["Y"], domain="custom")
graph = helper.make_graph([node], "lr", [X], [Y], [C])
model_def = helper.make_model(
graph, functions=[fct], opset_imports=opset_imports
)
x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
oinf1 = ReferenceEvaluator(model_def)
y1 = oinf1.run(None, {"X": x})[0] # type: ignore[index]
repl = replace_initializer_by_constant_of_shape(model_def)
node_types = {n.op_type for n in repl.functions[0].node}
self.assertIn("ConstantOfShape", node_types)
oinf2 = ReferenceEvaluator(repl)
y1[:, :] = 3.5
y1[0, :] = 0.5
y2 = oinf2.run(None, {"X": x})[0] # type: ignore[index]
assert_allclose(y1, y2)
def test_replace_range_function(self):
dtype = np.float32
value = np.random.randn(2, 100).astype(dtype)
A = numpy_helper.from_array(value, name="A")
value = np.array([1], dtype=dtype)
C = numpy_helper.from_array(value, name="C")
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None])
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None])
nodeC = helper.make_node("Constant", [], ["C"], value=C)
node0 = helper.make_node("Constant", [], ["A"], value=A)
node1 = helper.make_node("MatMul", ["X", "A"], ["AX"])
node2 = helper.make_node("Sub", ["AX", "C"], ["Y"])
opset_imports = [
helper.make_opsetid("", onnx_opset_version()),
helper.make_opsetid("custom", 1),
]
fct = helper.make_function(
"custom",
"unittest",
["X"],
["Y"],
[nodeC, node0, node1, node2],
opset_imports,
)
node = helper.make_node("unittest", ["X"], ["Y"], domain="custom")
graph = helper.make_graph([node], "lr", [X], [Y], [C])
model_def = helper.make_model(
graph, functions=[fct], opset_imports=opset_imports
)
x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2))
oinf1 = ReferenceEvaluator(model_def)
y1 = oinf1.run(None, {"X": x})[0] # type: ignore[index]
repl = replace_initializer_by_constant_of_shape(model_def, use_range=True)
node_types = {n.op_type for n in repl.functions[0].node}
self.assertIn("Range", node_types)
self.assertNotIn("ConstantOfShape", node_types)
oinf2 = ReferenceEvaluator(repl)
y2 = oinf2.run(None, {"X": x})[0] # type: ignore[index]
assert_allclose(y1.shape, y2.shape)
def test_replace_constant_graph(self):
value = np.array([0], dtype=np.float32)
zero = numpy_helper.from_array(value, name="zero")
X = helper.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [None, None])
Y = helper.make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [None])
rsum = helper.make_node("ReduceSum", ["X"], ["rsum"])
cond = helper.make_node("Greater", ["rsum", "zero"], ["cond"])
then_out = helper.make_tensor_value_info(
"then_out", onnx.TensorProto.FLOAT, None
)
then_cst = numpy_helper.from_array(np.array([1] * 129).astype(np.float32))
then_const_node = helper.make_node(
"Constant", inputs=[], outputs=["then_out"], value=then_cst, name="cst1"
)
then_body = helper.make_graph([then_const_node], "then_body", [], [then_out])
else_out = helper.make_tensor_value_info(
"else_out", onnx.TensorProto.FLOAT, None
)
else_cst = numpy_helper.from_array(np.array([-1] * 129).astype(np.float32))
else_const_node = helper.make_node(
"Constant", inputs=[], outputs=["else_out"], value=else_cst, name="cst2"
)
else_body = helper.make_graph([else_const_node], "else_body", [], [else_out])
if_node = onnx.helper.make_node(
"If", ["cond"], ["Y"], then_branch=then_body, else_branch=else_body
)
graph = helper.make_graph([rsum, cond, if_node], "if", [X], [Y], [zero])
onnx_model = helper.make_model(
graph, opset_imports=[helper.make_opsetid("", onnx_opset_version())]
)
self.assertNotIn("ConstantOfShape", str(onnx_model))
x = np.ones((3, 2), dtype=np.float32)
oinf1 = ReferenceEvaluator(onnx_model)
y1 = oinf1.run(None, {"X": x})[0] # type: ignore[index]
repl = replace_initializer_by_constant_of_shape(onnx_model)
self.assertIn("ConstantOfShape", str(repl))
oinf2 = ReferenceEvaluator(repl)
y2 = oinf2.run(None, {"X": x})[0] # type: ignore[index]
y1 = y1.copy()
y1[:] = 0.5
assert_allclose(y1, y2)
def test_replace_range_graph(self):
value = np.array([0], dtype=np.float32)
zero = numpy_helper.from_array(value, name="zero")
X = helper.make_tensor_value_info("X", onnx.TensorProto.FLOAT, [None, None])
Y = helper.make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [None])
rsum = helper.make_node("ReduceSum", ["X"], ["rsum"])
cond = helper.make_node("Greater", ["rsum", "zero"], ["cond"])
then_out = helper.make_tensor_value_info(
"then_out", onnx.TensorProto.FLOAT, None
)
then_cst = numpy_helper.from_array(np.array([1] * 129).astype(np.float32))
then_const_node = helper.make_node(
"Constant", inputs=[], outputs=["then_out"], value=then_cst, name="cst1"
)
then_body = helper.make_graph([then_const_node], "then_body", [], [then_out])
else_out = helper.make_tensor_value_info(
"else_out", onnx.TensorProto.FLOAT, None
)
else_cst = numpy_helper.from_array(np.array([-1] * 129).astype(np.float32))
else_const_node = helper.make_node(
"Constant", inputs=[], outputs=["else_out"], value=else_cst, name="cst2"
)
else_body = helper.make_graph([else_const_node], "else_body", [], [else_out])
if_node = onnx.helper.make_node(
"If", ["cond"], ["Y"], then_branch=then_body, else_branch=else_body
)
graph = helper.make_graph([rsum, cond, if_node], "if", [X], [Y], [zero])
onnx_model = helper.make_model(
graph, opset_imports=[helper.make_opsetid("", onnx_opset_version())]
)
self.assertNotIn("ConstantOfShape", str(onnx_model))
x = np.ones((3, 2), dtype=np.float32)
oinf1 = ReferenceEvaluator(onnx_model)
y1 = oinf1.run(None, {"X": x})[0] # type: ignore[index]
repl = replace_initializer_by_constant_of_shape(onnx_model, use_range=True)
self.assertNotIn("ConstantOfShape", str(repl))
self.assertIn("Range", str(repl))
oinf2 = ReferenceEvaluator(repl)
y2 = oinf2.run(None, {"X": x})[0] # type: ignore[index]
assert_allclose(y1.shape, y2.shape)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -0,0 +1,101 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
import numpy as np
import onnx
from onnx import TensorProto, helper, numpy_helper, shape_inference
class TestTrainingTool(unittest.TestCase):
def test_training_info_proto(self) -> None:
# Inference graph.
A_shape = [2, 2]
A_name = "A"
A = np.random.rand(*A_shape).astype(np.float32)
A_initializer = numpy_helper.from_array(A, name=A_name)
A_value_info = helper.make_tensor_value_info(A_name, TensorProto.FLOAT, A_shape)
B_shape = [2, 2]
B_name = "B"
B = np.random.rand(*B_shape).astype(np.float32)
B_initializer = numpy_helper.from_array(B, name=B_name)
B_value_info = helper.make_tensor_value_info(B_name, TensorProto.FLOAT, B_shape)
C_shape = [2, 2]
C_name = "C"
C_value_info = helper.make_tensor_value_info(C_name, TensorProto.FLOAT, C_shape)
inference_node = helper.make_node(
"MatMul", inputs=[A_name, B_name], outputs=[C_name]
)
inference_graph = helper.make_graph(
[inference_node],
"simple_inference",
[A_value_info, B_value_info],
[C_value_info],
[A_initializer, B_initializer],
)
# Training graph
X_shape = [2, 2]
X_name = "X"
X = np.random.rand(*X_shape).astype(np.float32)
X_initializer = numpy_helper.from_array(X, name=X_name)
X_value_info = helper.make_tensor_value_info(X_name, TensorProto.FLOAT, X_shape)
Y_shape = [2, 2]
Y_name = "Y"
Y_value_info = helper.make_tensor_value_info(Y_name, TensorProto.FLOAT, Y_shape)
node = helper.make_node(
"MatMul",
inputs=[X_name, C_name], # tensor "C" is from inference graph.
outputs=[Y_name],
)
training_graph = helper.make_graph(
[node], "simple_training", [X_value_info], [Y_value_info], [X_initializer]
)
# Capture assignment of B <--- Y.
training_info = helper.make_training_info(
training_graph, [(B_name, Y_name)], None, None
)
# Create a model with both inference and training information.
model = helper.make_model(inference_graph)
# Check if the inference-only part is correct.
onnx.checker.check_model(model)
# Insert training information.
new_training_info = model.training_info.add()
new_training_info.CopyFrom(training_info)
# Generate the actual training graph from training information so that
# we can run onnx checker to check if the full training graph is a valid
# graph. As defined in spec, full training graph forms by concatenating
# corresponding fields.
full_training_graph = helper.make_graph(
list(model.graph.node) + list(model.training_info[0].algorithm.node),
"full_training_graph",
list(model.graph.input) + list(model.training_info[0].algorithm.input),
list(model.graph.output) + list(model.training_info[0].algorithm.output),
list(model.graph.initializer)
+ list(model.training_info[0].algorithm.initializer),
)
# Wrap full training graph as a ModelProto so that we can run checker.
full_training_model = helper.make_model(full_training_graph)
full_training_model_with_shapes = shape_inference.infer_shapes(
full_training_model
)
onnx.checker.check_model(full_training_model_with_shapes)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,64 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import os
import shutil
import tempfile
import unittest
import onnx
from onnx import TensorProto, helper
class TestUtilityFunctions(unittest.TestCase):
def test_extract_model(self) -> None:
def create_tensor(name): # type: ignore
return helper.make_tensor_value_info(name, TensorProto.FLOAT, [1, 2])
A0 = create_tensor("A0")
A1 = create_tensor("A1")
B0 = create_tensor("B0")
B1 = create_tensor("B1")
B2 = create_tensor("B2")
C0 = create_tensor("C0")
C1 = create_tensor("C1")
D0 = create_tensor("D0")
L0_0 = helper.make_node("Add", ["A0", "A1"], ["B0"])
L0_1 = helper.make_node("Sub", ["A0", "A1"], ["B1"])
L0_2 = helper.make_node("Mul", ["A0", "A1"], ["B2"])
L1_0 = helper.make_node("Add", ["B0", "B1"], ["C0"])
L1_1 = helper.make_node("Sub", ["B1", "B2"], ["C1"])
L2_0 = helper.make_node("Mul", ["C0", "C1"], ["D0"])
g0 = helper.make_graph(
[L0_0, L0_1, L0_2, L1_0, L1_1, L2_0], "test", [A0, A1], [D0]
)
m0 = helper.make_model(g0, producer_name="test")
tdir = tempfile.mkdtemp()
p0 = os.path.join(tdir, "original.onnx")
onnx.save(m0, p0)
p1 = os.path.join(tdir, "extracted.onnx")
input_names = ["B0", "B1", "B2"]
output_names = ["C0", "C1"]
onnx.utils.extract_model(p0, p1, input_names, output_names)
m1 = onnx.load(p1)
self.assertEqual(m1.producer_name, "onnx.utils.extract_model")
self.assertEqual(m1.ir_version, m0.ir_version)
self.assertEqual(m1.opset_import, m0.opset_import)
self.assertEqual(len(m1.graph.node), 2)
self.assertEqual(len(m1.graph.input), 3)
self.assertEqual(len(m1.graph.output), 2)
self.assertEqual(m1.graph.input[0], B0)
self.assertEqual(m1.graph.input[1], B1)
self.assertEqual(m1.graph.input[2], B2)
self.assertEqual(m1.graph.output[0], C0)
self.assertEqual(m1.graph.output[1], C1)
shutil.rmtree(tdir, ignore_errors=True)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,146 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import string
import unittest
from typing import Any, List, Sequence, cast
import onnx
from onnx import TensorProto, ValueInfoProto, helper, shape_inference, version_converter
LATEST_OPSET = onnx.defs.onnx_opset_version()
class TestAutomaticConversion(unittest.TestCase):
def _test_model_conversion(
self, to_opset: int, model: str | onnx.ModelProto
) -> None:
if isinstance(model, str):
model = onnx.parser.parse_model(model)
onnx.checker.check_model(model)
shape_inference.infer_shapes(model, strict_mode=True)
converted = version_converter.convert_version(model, to_opset)
onnx.checker.check_model(converted)
shape_inference.infer_shapes(converted, strict_mode=True)
def _test_model_conversion_fails(
self, to_opset: int, model: str | onnx.ModelProto
) -> None:
if isinstance(model, str):
model = onnx.parser.parse_model(model)
onnx.checker.check_model(model)
shape_inference.infer_shapes(model, strict_mode=True)
with self.assertRaises(RuntimeError):
version_converter.convert_version(model, to_opset)
def _test_op_conversion(
self,
op: str,
from_opset: int,
input_shapes: Sequence[Sequence[int | None] | str] = ((3, 4, 5),),
output_shapes: Sequence[Sequence[int | None]] = ((3, 4, 5),),
input_types: Sequence[Any] | None = None,
output_types: Sequence[Any] | None = None,
initializer: Sequence[Any] = (),
attrs: dict[str, Any] | None = None,
seq_inputs: Sequence[int] = (),
seq_outputs: Sequence[int] = (),
optional_inputs: Sequence[int] = (),
optional_outputs: Sequence[int] = (),
is_upgrade: bool = True,
) -> None:
"""Test conversion.
Args:
op: A string representing the name of the operator to test.
from_opset: An integer representing the lowest opset version to convert.
input_shapes: A sequence of tuples or strings representing the shapes of the input tensors.
The default value is ((3, 4, 5),).
output_shapes: A sequence of tuples representing the shapes of the output tensors.
The default value is ((3, 4, 5),).
input_types: An optional sequence of types representing the data types of the input tensors.
output_types: An optional sequence of types representing the data types of the output tensors.
initializer: A sequence of values representing the initial values of the input tensors.
attrs: An optional dictionary of attributes for the operator.
seq_inputs: A sequence of integers representing the indices of the input tensors that are sequences.
seq_outputs: A sequence of integers representing the indices of the output tensors that are sequences.
optional_inputs: A sequence of integers representing the indices of the input tensors that are optional.
optional_outputs: A sequence of integers representing the indices of the output tensors that are optional.
is_upgrade: A boolean value indicating whether to run the version converter from from_opset to
the most recent opset version (True) or from the most recent opset version to from_opset (False).
The default value is True. In both cases, runs checker and shape inference on the final model.
"""
if attrs is None:
attrs = {}
n_inputs = len(input_shapes)
letters = list(string.ascii_lowercase)[:n_inputs]
input_names = [
letter if shape != "" else ""
for (letter, shape) in zip(letters, input_shapes)
]
if input_types is None:
input_types = [TensorProto.FLOAT] * n_inputs
is_sequence = [0 if id not in seq_inputs else 1 for id in range(n_inputs)]
is_optional = [0 if id not in optional_inputs else 1 for id in range(n_inputs)]
# turn empty strings into [0] to ease type analysis, even though those entries
# will be ignored
input_shapes_cast = cast(
List[List[int]],
[[0] if isinstance(shape, str) else shape for shape in input_shapes],
)
inputs: list[ValueInfoProto] = []
for name, ttype, shape, is_seq, is_opt in zip(
input_names, input_types, input_shapes_cast, is_sequence, is_optional
):
if name != "":
if is_seq:
inputs += [
helper.make_tensor_sequence_value_info(name, ttype, shape)
]
elif is_opt:
type_proto = helper.make_tensor_type_proto(ttype, shape)
optional_type_proto = helper.make_optional_type_proto(type_proto)
inputs += [helper.make_value_info(name, optional_type_proto)]
else:
inputs += [helper.make_tensor_value_info(name, ttype, shape)]
n_outputs = len(output_shapes)
output_names = list(string.ascii_lowercase)[n_inputs : n_inputs + n_outputs]
if output_types is None:
output_types = [TensorProto.FLOAT] * n_outputs
is_sequence = [0 if id not in seq_outputs else 1 for id in range(n_outputs)]
is_optional = [
0 if id not in optional_outputs else 1 for id in range(n_outputs)
]
output_shapes_cast = cast(
List[List[int]],
[[0] if isinstance(shape, str) else shape for shape in output_shapes],
)
outputs: list[ValueInfoProto] = []
for name, ttype, shape, is_seq, is_opt in zip(
output_names, output_types, output_shapes_cast, is_sequence, is_optional
):
if is_seq:
outputs += [helper.make_tensor_sequence_value_info(name, ttype, shape)]
elif is_opt:
type_proto = helper.make_tensor_type_proto(ttype, shape)
optional_type_proto = helper.make_optional_type_proto(type_proto)
outputs += [helper.make_value_info(name, optional_type_proto)]
else:
outputs += [helper.make_tensor_value_info(name, ttype, shape)]
node = helper.make_node(op, input_names, output_names, **attrs)
graph = helper.make_graph([node], op, inputs, outputs, initializer)
start_opset = from_opset if is_upgrade else LATEST_OPSET
end_opset = LATEST_OPSET if is_upgrade else from_opset
original = helper.make_model(
graph,
producer_name="test",
opset_imports=[helper.make_opsetid("", start_opset)],
)
self._test_model_conversion(end_opset, original)

View File

@ -0,0 +1,106 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import unittest
import automatic_conversion_test_base
import numpy as np
import parameterized
import onnx
from onnx import helper
#####################################################################################
# Every test calls _test_op_conversion to downgrade a model from the most recent opset version
# to a early version and runs checker + shape inference on the downgraded model.
####################################################################################
class TestAutomaticDowngrade(automatic_conversion_test_base.TestAutomaticConversion):
def _test_op_downgrade(self, op: str, *args, **kwargs):
self._test_op_conversion(op, *args, **kwargs, is_upgrade=False)
@parameterized.parameterized.expand(
[
"ReduceL1",
"ReduceL2",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceMean",
"ReduceMax",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
]
)
def test_reduce_ops(self, op) -> None:
# TODO: need to add test cases for missing axes input which depends on this pr:
# https://github.com/onnx/onnx/pull/5613
axes = helper.make_tensor(
"b", onnx.TensorProto.INT64, dims=[3], vals=np.array([0, 1, 2])
)
self._test_op_downgrade(
op,
from_opset=13,
input_shapes=[[3, 4, 5], [3]],
output_shapes=[[1, 1, 1]],
input_types=[onnx.TensorProto.FLOAT, onnx.TensorProto.INT64],
initializer=[axes],
)
def test_dft20_no_axis(self) -> None:
self._test_model_conversion(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x) => (float[N, M, 2] y)
{
y = DFT (x)
}
""",
)
def test_dft20_initializer_axis(self) -> None:
self._test_model_conversion(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x, int64 dft_length) => (float[N, K, 2] y)
<int64 axis = {1}>
{
y = DFT (x, dft_length, axis)
}
""",
)
def test_dft20_constant_axis(self) -> None:
self._test_model_conversion(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x, int64 dft_length) => (float[N, K, 2] y)
{
axis = Constant <value = int64{1}>()
y = DFT (x, dft_length, axis)
}
""",
)
def test_dft20_unknown_axis(self) -> None:
self._test_model_conversion_fails(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x, int64 dft_length, int64 axis) => (float[P, K, 2] y)
{
y = DFT (x, dft_length, axis)
}
""",
)
if __name__ == "__main__":
unittest.main()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,20 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from packaging.version import parse as version
def numpy_older_than(ver: str) -> bool:
"""Returns True if the numpy version is older than the given version."""
import numpy # pylint: disable=import-outside-toplevel
return version(numpy.__version__) < version(ver)
def pillow_older_than(ver: str) -> bool:
"""Returns True if the pillow version is older than the given version."""
import PIL # pylint: disable=import-outside-toplevel
return version(PIL.__version__) < version(ver)