I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,8 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
__all__ = ["BackendTest"]
# for backward compatibility
from onnx.backend.test.runner import Runner as BackendTest

View File

@ -0,0 +1,14 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import sys
from onnx.backend.test.case.base import Snippets
from onnx.backend.test.case.utils import import_recursive
def collect_snippets() -> dict[str, list[tuple[str, str]]]:
import_recursive(sys.modules[__name__])
return Snippets

View File

@ -0,0 +1,47 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import inspect
from collections import defaultdict
from textwrap import dedent
from typing import Any, ClassVar
import numpy as np
def process_snippet(op_name: str, name: str, export: Any) -> tuple[str, str]:
snippet_name = name[len("export_") :] or op_name.lower()
source_code = dedent(inspect.getsource(export))
# remove the function signature line
lines = source_code.splitlines()
assert lines[0] == "@staticmethod"
assert lines[1].startswith("def export")
return snippet_name, dedent("\n".join(lines[2:]))
Snippets: dict[str, list[tuple[str, str]]] = defaultdict(list)
class _Exporter(type):
exports: ClassVar[dict[str, list[tuple[str, str]]]] = defaultdict(list)
def __init__(
cls, name: str, bases: tuple[type[Any], ...], dct: dict[str, Any]
) -> None:
for k, v in dct.items():
if k.startswith("export"):
if not isinstance(v, staticmethod):
raise ValueError("Only staticmethods could be named as export.*")
export = getattr(cls, k)
Snippets[name].append(process_snippet(name, k, export))
# export functions should call expect and so populate
# TestCases
np.random.seed(seed=0)
export()
super().__init__(name, bases, dct)
class Base(metaclass=_Exporter):
pass

View File

@ -0,0 +1,78 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import sys
from typing import Sequence
import numpy as np
from onnx import ModelProto
from onnx.backend.test.case.test_case import TestCase
from onnx.backend.test.case.utils import import_recursive
_SimpleModelTestCases = []
def expect(
model: ModelProto,
inputs: Sequence[np.ndarray],
outputs: Sequence[np.ndarray],
name: str | None = None,
) -> None:
name = name or model.graph.name
_SimpleModelTestCases.append(
TestCase(
name=name,
model_name=model.graph.name,
url=None,
model_dir=None,
model=model,
data_sets=[(inputs, outputs)],
kind="simple",
rtol=1e-3,
atol=1e-7,
)
)
# BASE_URL = "https://download.onnxruntime.ai/onnx/models"
BASE_URL = "onnx/backend/test/data/light/light_%s.onnx"
def collect_testcases() -> list[TestCase]:
"""Collect model test cases defined in python/numpy code."""
real_model_testcases = []
model_tests = [
("test_bvlc_alexnet", "bvlc_alexnet", 1e-3, 1e-7),
("test_densenet121", "densenet121", 2e-3, 1e-7),
("test_inception_v1", "inception_v1", 1e-3, 1e-7),
("test_inception_v2", "inception_v2", 1e-3, 1e-7),
("test_resnet50", "resnet50", 1e-3, 1e-7),
("test_shufflenet", "shufflenet", 1e-3, 1e-7),
("test_squeezenet", "squeezenet", 1e-3, 1e-7),
("test_vgg19", "vgg19", 1e-3, 1e-7),
("test_zfnet512", "zfnet512", 1e-3, 1e-7),
]
for test_name, model_name, rtol, atol in model_tests:
url = BASE_URL % model_name
real_model_testcases.append(
TestCase(
name=test_name,
model_name=model_name,
url=url,
model_dir=None,
model=None,
data_sets=None,
kind="real",
rtol=rtol,
atol=atol,
)
)
import_recursive(sys.modules[__name__])
return real_model_testcases + _SimpleModelTestCases

View File

@ -0,0 +1,89 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Sequence
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class ExpandDynamicShape(Base):
@staticmethod
def export() -> None:
def make_graph(
node: onnx.helper.NodeProto,
input_shape: Sequence[int],
shape_shape: Sequence[int],
output_shape: Sequence[int],
) -> onnx.helper.GraphProto:
graph = onnx.helper.make_graph(
nodes=[node],
name="Expand",
inputs=[
onnx.helper.make_tensor_value_info(
"X", onnx.TensorProto.FLOAT, input_shape
),
onnx.helper.make_tensor_value_info(
"shape", onnx.TensorProto.INT64, shape_shape
),
],
outputs=[
onnx.helper.make_tensor_value_info(
"Y", onnx.TensorProto.FLOAT, output_shape
)
],
)
return graph
node = onnx.helper.make_node("Expand", ["X", "shape"], ["Y"], name="test")
input_shape = [1, 3, 1]
x = np.ones(input_shape, dtype=np.float32)
# 1st testcase
shape = np.array([3, 1], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model1")
# 2nd testcase
shape = np.array([1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model2")
# 3rd testcase
shape = np.array([3, 1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model3")
# 4th testcase
shape = np.array([3, 3, 1, 3], dtype=np.int64)
y = x * np.ones(shape, dtype=np.float32)
graph = make_graph(node, input_shape, shape.shape, y.shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x, shape], outputs=[y], name="test_expand_shape_model4")

View File

@ -0,0 +1,110 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
from onnx.defs import AI_ONNX_PREVIEW_TRAINING_DOMAIN, ONNX_DOMAIN
class Gradient(Base):
@staticmethod
def export_gradient_scalar_add() -> None:
add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add")
gradient_node = onnx.helper.make_node(
"Gradient",
["a", "b"],
["dc_da", "dc_db"],
name="my_gradient",
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
xs=["a", "b"],
y="c",
)
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# dc / da = d(a+b) / da = 1
dc_da = np.array(1).astype(np.float32)
# db / db = d(a+b) / db = 1
dc_db = np.array(1).astype(np.float32)
graph = onnx.helper.make_graph(
nodes=[add_node, gradient_node],
name="GradientOfAdd",
inputs=[
onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []),
],
outputs=[
onnx.helper.make_tensor_value_info("c", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dc_da", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dc_db", onnx.TensorProto.FLOAT, []),
],
)
opsets = [
onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),
]
model = onnx.helper.make_model_gen_version(
graph, producer_name="backend-test", opset_imports=opsets
)
expect(
model, inputs=[a, b], outputs=[c, dc_da, dc_db], name="test_gradient_of_add"
)
@staticmethod
def export_gradient_scalar_add_and_mul() -> None:
add_node = onnx.helper.make_node("Add", ["a", "b"], ["c"], name="my_add")
mul_node = onnx.helper.make_node("Mul", ["c", "a"], ["d"], name="my_mul")
gradient_node = onnx.helper.make_node(
"Gradient",
["a", "b"],
["dd_da", "dd_db"],
name="my_gradient",
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
xs=["a", "b"],
y="d",
)
a = np.array(1.0).astype(np.float32)
b = np.array(2.0).astype(np.float32)
c = a + b
# d = a * c = a * (a + b)
d = a * c
# dd / da = d(a*a+a*b) / da = 2 * a + b
dd_da = (2 * a + b).astype(np.float32)
# dd / db = d(a*a+a*b) / db = a
dd_db = a
graph = onnx.helper.make_graph(
nodes=[add_node, mul_node, gradient_node],
name="GradientOfTwoOperators",
inputs=[
onnx.helper.make_tensor_value_info("a", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("b", onnx.TensorProto.FLOAT, []),
],
outputs=[
onnx.helper.make_tensor_value_info("d", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dd_da", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("dd_db", onnx.TensorProto.FLOAT, []),
],
)
opsets = [
onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),
onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1),
]
model = onnx.helper.make_model_gen_version(
graph, producer_name="backend-test", opset_imports=opsets
)
expect(
model,
inputs=[a, b],
outputs=[d, dd_da, dd_db],
name="test_gradient_of_add_and_mul",
)

View File

@ -0,0 +1,457 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import typing
import numpy as np
import onnx
from onnx import TensorProto
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
def SequenceEmptyImpl() -> list[np.ndarray | None]:
return []
def SequenceConstructImpl(*tensors: np.ndarray) -> list[np.ndarray]:
return list(tensors)
def SequenceInsertImpl(
sequence: list[np.ndarray], tensor: np.ndarray, position: int | None = None
) -> list[np.ndarray]:
if position is None:
position = len(sequence)
sequence.insert(position, tensor)
return sequence
def SequenceAtImpl(sequence: list[np.ndarray], position: int) -> np.ndarray:
return sequence[position]
def SequenceEraseImpl(
sequence: list[np.ndarray], position: int | None = None
) -> list[np.ndarray | None]:
if position is None:
position = -1
del sequence[position]
return sequence
def SequenceLengthImpl(sequence: list[np.ndarray]) -> np.int64:
return np.int64(len(sequence))
def SplitToSequenceImpl(
tensor: np.ndarray,
split: int | list[int] | None = None,
axis: int = 0,
keepdims: int = 1,
) -> list[np.ndarray]:
dim_size = tensor.shape[axis]
if split is None:
split = 1
split_indices = [
i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size
]
if not keepdims:
results = np.array_split(tensor, split_indices, axis)
return [np.squeeze(res, axis) for res in results]
if np.isscalar(split):
split_indices = [
i * split + 1 for i in range(dim_size) if i * split + 1 < dim_size
] # type: ignore
else:
split_indices = np.cumsum(split) + 1
return np.array_split(tensor, split_indices, axis) # type: ignore
def ConcatFromSequenceImpl(
sequence: list[np.ndarray], axis: int, new_axis: int | None = 0
) -> np.ndarray:
if not new_axis:
return np.concatenate(sequence, axis)
return np.stack(sequence, axis)
class Sequence(Base):
@staticmethod
def export() -> None:
def make_graph(
nodes: list[onnx.helper.NodeProto],
input_shapes: list[typing.Sequence[str | int] | None],
output_shapes: list[typing.Sequence[str | int] | None],
input_names: list[str],
output_names: list[str],
input_types: list[TensorProto.DataType],
output_types: list[TensorProto.DataType],
initializers: list[TensorProto] | None = None,
) -> onnx.helper.GraphProto:
graph = onnx.helper.make_graph(
nodes=nodes,
name="Sequence",
inputs=[
onnx.helper.make_tensor_value_info(name, input_type, input_shape)
for name, input_type, input_shape in zip(
input_names, input_types, input_shapes
)
],
outputs=[
onnx.helper.make_tensor_value_info(name, output_type, output_shape)
for name, output_type, output_shape in zip(
output_names, output_types, output_shapes
)
],
initializer=initializers,
)
return graph
# 1st testcase - insert and at.
# 1. SequenceEmpty: -> []
# 2. SequenceInsert(x): -> [x]
# 3. SequenceInsert(y): -> [x, y]
# 4. SequenceInsert(z, 1): -> [x, z, y]
# 5. SequenceAt(2): -> y
seq_empty_node = onnx.helper.make_node("SequenceEmpty", [], ["Seq_empty"])
seq_insert_node = onnx.helper.make_node(
"SequenceInsert", ["Seq_empty", "X"], ["Seq_1"]
)
seq_insert_node2 = onnx.helper.make_node(
"SequenceInsert", ["Seq_1", "Y"], ["Seq_2"]
)
seq_insert_node3 = onnx.helper.make_node(
"SequenceInsert", ["Seq_2", "Z", "pos"], ["Seq_3"]
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["Seq_3", "pos_at"], ["out"])
x_shape = [2, 3, 4]
y_shape = [1, 3, 4]
z_shape = [3, 3, 4]
out_shape = [None, 3, 4]
x = np.ones(x_shape, dtype=np.float32)
y = np.zeros(y_shape, dtype=np.float32)
z = np.ones(z_shape, dtype=np.float32) * 2
pos_val = 1
pos_at_val = 2
out = SequenceEmptyImpl()
out = SequenceInsertImpl(out, x)
out = SequenceInsertImpl(out, y)
out = SequenceInsertImpl(out, z, pos_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, y)
pos = onnx.helper.make_tensor("pos", TensorProto.INT64, (), (pos_val,))
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[
seq_empty_node,
seq_insert_node,
seq_insert_node2,
seq_insert_node3,
seq_at_node,
],
[x_shape, y_shape, z_shape, [], []], # type: ignore
[out_shape], # type: ignore
["X", "Y", "Z", "pos", "pos_at"],
["out"],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore
[onnx.TensorProto.FLOAT],
[pos, pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model1")
# 2nd testcase - erase and at.
# 1. SequenceConstruct(x, y, z): -> [x, y, z]
# 2. SequenceErase(1): -> [x, z]
# 3. SequenceAt(1): -> z
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_erase_node = onnx.helper.make_node(
"SequenceErase", ["seq_1", "pos_erase"], ["seq_2"]
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_2", "pos_at"], ["out"])
tensor_shape = [2, 3, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
pos_erase_val = 1
pos_at_val = 1
out = SequenceConstructImpl(x, y, z)
out = SequenceEraseImpl(out, pos_erase_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, z)
pos_erase = onnx.helper.make_tensor(
"pos_erase", TensorProto.INT64, (), (pos_erase_val,)
)
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[seq_construct_node, seq_erase_node, seq_at_node],
[tensor_shape, tensor_shape, tensor_shape, [], []], # type: ignore
[tensor_shape], # type: ignore
["X", "Y", "Z", "pos_erase", "pos_at"],
["out"],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 2, # type: ignore
[onnx.TensorProto.FLOAT],
[pos_erase, pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model2")
# 3rd testcase - erase, insert and at, with negative index value.
# 1. SequenceConstruct(x, y, z): -> [x, y, z]
# 2. SequenceErase(-3): -> [y, z]
# 3. SequenceInsert(x, -1): -> [y, x, z]
# 4. SequenceAt(-1): -> z
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_erase_node = onnx.helper.make_node(
"SequenceErase", ["seq_1", "pos_erase"], ["seq_2"]
)
seq_insert_node = onnx.helper.make_node(
"SequenceInsert", ["seq_2", "X", "pos_insert"], ["seq_3"]
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_3", "pos_at"], ["out"])
tensor_shape = [2, 3, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
pos_erase_val = -3
pos_insert_val = -1
pos_at_val = -1
out = SequenceConstructImpl(x, y, z)
out = SequenceEraseImpl(out, pos_erase_val)
out = SequenceInsertImpl(out, x, pos_insert_val)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, z)
pos_erase = onnx.helper.make_tensor(
"pos_erase", TensorProto.INT64, (), (pos_erase_val,)
)
pos_insert = onnx.helper.make_tensor(
"pos_insert", TensorProto.INT64, (), (pos_insert_val,)
)
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[seq_construct_node, seq_erase_node, seq_insert_node, seq_at_node],
[tensor_shape, tensor_shape, tensor_shape, [], [], []], # type: ignore
[tensor_shape], # type: ignore
["X", "Y", "Z", "pos_erase", "pos_insert", "pos_at"],
["out"],
[onnx.TensorProto.FLOAT] * 3 + [onnx.TensorProto.INT64] * 3, # type: ignore
[onnx.TensorProto.FLOAT],
[pos_erase, pos_insert, pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x, y, z], outputs=[out], name="test_sequence_model3")
# 4th testcase - concat
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_concat_node = onnx.helper.make_node(
"ConcatFromSequence", ["seq_1"], ["out"], axis=1
)
tensor_shape = [2, 3, 4]
concat_out_shape = [2, None, 4]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
out = SequenceConstructImpl(x, y, z)
concat_out = ConcatFromSequenceImpl(out, 1)
graph = make_graph(
[seq_construct_node, seq_concat_node],
[tensor_shape] * 3, # type: ignore
[concat_out_shape], # type: ignore
["X", "Y", "Z"],
["out"],
[onnx.TensorProto.FLOAT] * 3, # type: ignore
[onnx.TensorProto.FLOAT],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(
model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model4"
)
# 5th testcase - concat with new_axis = 1
seq_construct_node = onnx.helper.make_node(
"SequenceConstruct", ["X", "Y", "Z"], ["seq_1"]
)
seq_concat_node = onnx.helper.make_node(
"ConcatFromSequence", ["seq_1"], ["out"], axis=-1, new_axis=1
)
tensor_shape = [2, 3, 4]
concat_out_shape = [2, 3, 4, 3]
x = np.ones(tensor_shape, dtype=np.float32)
y = np.zeros(tensor_shape, dtype=np.float32)
z = np.ones(tensor_shape, dtype=np.float32) * 2
out = SequenceConstructImpl(x, y, z)
concat_out = ConcatFromSequenceImpl(out, -1, 1)
graph = make_graph(
[seq_construct_node, seq_concat_node],
[tensor_shape] * 3, # type: ignore
[concat_out_shape], # type: ignore
["X", "Y", "Z"],
["out"],
[onnx.TensorProto.FLOAT] * 3, # type: ignore
[onnx.TensorProto.FLOAT],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(
model, inputs=[x, y, z], outputs=[concat_out], name="test_sequence_model5"
)
# 6th testcase - split and len
seq_split_node = onnx.helper.make_node(
"SplitToSequence", ["X"], ["seq_1"], axis=-1
)
seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"])
tensor_shape = [2, 3, 4]
len_shape = [] # type: ignore
x = np.ones(tensor_shape, dtype=np.float32)
out = SplitToSequenceImpl(x, axis=-1)
out = SequenceLengthImpl(out)
assert np.array_equal(out, np.int64(4))
graph = onnx.helper.make_graph(
nodes=[seq_split_node, seq_len_node],
name="Sequence",
inputs=[
onnx.helper.make_tensor_value_info(
"X", onnx.TensorProto.FLOAT, tensor_shape
)
],
outputs=[
onnx.helper.make_tensor_value_info(
"len", onnx.TensorProto.INT64, len_shape
)
],
) # type: ignore
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x], outputs=[out], name="test_sequence_model6")
# 7th testcase - split with keepdims=0, and SequenceAt
seq_split_node = onnx.helper.make_node(
"SplitToSequence", ["X"], ["seq_1"], axis=0, keepdims=0
)
seq_at_node = onnx.helper.make_node("SequenceAt", ["seq_1", "pos_at"], ["out"])
tensor_shape = [2, 3, 4]
out_shape = [3, 4]
x = np.random.rand(*tensor_shape)
pos_at_val = 1
out = SplitToSequenceImpl(x, axis=0, keepdims=0)
out = SequenceAtImpl(out, pos_at_val)
assert np.array_equal(out, x[pos_at_val])
pos_at = onnx.helper.make_tensor("pos_at", TensorProto.INT64, (), (pos_at_val,))
graph = make_graph(
[seq_split_node, seq_at_node],
[tensor_shape, []], # type: ignore
[out_shape], # type: ignore
["X", "pos_at"],
["out"],
[onnx.TensorProto.DOUBLE, onnx.TensorProto.INT64],
[onnx.TensorProto.DOUBLE],
[pos_at],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(model, inputs=[x], outputs=[out], name="test_sequence_model7")
# 8th testcase - split zero length
seq_split_node = onnx.helper.make_node(
"SplitToSequence", ["X", "Splits"], ["seq_1"]
)
seq_len_node = onnx.helper.make_node("SequenceLength", ["seq_1"], ["len"])
tensor_shape = ["n"] # type: ignore
splits_shape = [3] # type: ignore
x = np.array([]).astype(np.float32)
splits = np.array([0, 0, 0]).astype(np.int64)
out_len = np.int64(3)
graph = onnx.helper.make_graph(
nodes=[seq_split_node, seq_len_node],
name="Sequence",
inputs=[
onnx.helper.make_tensor_value_info(
"X", onnx.TensorProto.FLOAT, tensor_shape
), # type: ignore
onnx.helper.make_tensor_value_info(
"Splits", onnx.TensorProto.INT64, splits_shape
),
], # type: ignore
outputs=[
onnx.helper.make_tensor_value_info(
"len", onnx.TensorProto.INT64, len_shape
)
],
) # type: ignore
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 12)],
)
expect(
model, inputs=[x, splits], outputs=[out_len], name="test_sequence_model8"
)

View File

@ -0,0 +1,42 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class ShrinkTest(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node(
"Shrink",
["x"],
["y"],
lambd=1.5,
bias=1.5,
)
graph = onnx.helper.make_graph(
nodes=[node],
name="Shrink",
inputs=[
onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [5])
],
outputs=[
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [5])
],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
x = np.array([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32)
y = np.array([-0.5, 0.0, 0.0, 0.0, 0.5], dtype=np.float32)
expect(model, inputs=[x], outputs=[y], name="test_shrink")

View File

@ -0,0 +1,36 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class SingleSign(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node("Sign", ["x"], ["y"], name="test")
x = np.array([-1.0, 4.5, -4.5, 3.1, 0.0, 2.4, -5.5]).astype(np.float32)
y = np.array([-1.0, 1.0, -1.0, 1.0, 0.0, 1.0, -1.0]).astype(np.float32)
graph = onnx.helper.make_graph(
nodes=[node],
name="SingleSign",
inputs=[
onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [7])
],
outputs=[
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [7])
],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
expect(model, inputs=[x], outputs=[y], name="test_sign_model")

View File

@ -0,0 +1,36 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class SingleRelu(Base):
@staticmethod
def export() -> None:
node = onnx.helper.make_node("Relu", ["x"], ["y"], name="test")
graph = onnx.helper.make_graph(
nodes=[node],
name="SingleRelu",
inputs=[
onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [1, 2])
],
outputs=[
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1, 2])
],
)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 9)],
)
x = np.random.randn(1, 2).astype(np.float32)
y = np.maximum(x, 0)
expect(model, inputs=[x], outputs=[y], name="test_single_relu_model")

View File

@ -0,0 +1,203 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Sequence
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.model import expect
class NormalizeStrings(Base):
@staticmethod
def export() -> None:
def make_graph(
node: onnx.helper.NodeProto,
input_shape: Sequence[int],
output_shape: Sequence[int],
) -> onnx.helper.GraphProto:
graph = onnx.helper.make_graph(
nodes=[node],
name="StringNormalizer",
inputs=[
onnx.helper.make_tensor_value_info(
"x", onnx.TensorProto.STRING, input_shape
)
],
outputs=[
onnx.helper.make_tensor_value_info(
"y", onnx.TensorProto.STRING, output_shape
)
],
)
return graph
# 1st model_monday_casesensintive_nochangecase
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
is_case_sensitive=1,
stopwords=stopwords,
)
x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
y = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_casesensintive_nochangecase",
)
# 2nd model_nostopwords_nochangecase
node = onnx.helper.make_node(
"StringNormalizer", inputs=["x"], outputs=["y"], is_case_sensitive=1
)
x = np.array(["monday", "tuesday"]).astype(object)
y = x
graph = make_graph(node, [2], [2])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_nostopwords_nochangecase",
)
# 3rd model_monday_casesensintive_lower
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="LOWER",
is_case_sensitive=1,
stopwords=stopwords,
)
x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
y = np.array(["tuesday", "wednesday", "thursday"]).astype(object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_casesensintive_lower",
)
# 4 model_monday_casesensintive_upper
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="UPPER",
is_case_sensitive=1,
stopwords=stopwords,
)
x = np.array(["monday", "tuesday", "wednesday", "thursday"]).astype(object)
y = np.array(["TUESDAY", "WEDNESDAY", "THURSDAY"]).astype(object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_casesensintive_upper",
)
# 5 monday_insensintive_upper_twodim
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="UPPER",
stopwords=stopwords,
)
input_shape = [1, 6]
output_shape = [1, 4]
x = (
np.array(
["Monday", "tuesday", "wednesday", "Monday", "tuesday", "wednesday"]
)
.astype(object)
.reshape(input_shape)
)
y = (
np.array(["TUESDAY", "WEDNESDAY", "TUESDAY", "WEDNESDAY"])
.astype(object)
.reshape(output_shape)
)
graph = make_graph(node, input_shape, output_shape)
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_insensintive_upper_twodim",
)
# 6 monday_empty_output
stopwords = ["monday"]
node = onnx.helper.make_node(
"StringNormalizer",
inputs=["x"],
outputs=["y"],
case_change_action="UPPER",
is_case_sensitive=0,
stopwords=stopwords,
)
x = np.array(["monday", "monday"]).astype(object)
y = np.array([""]).astype(object)
graph = make_graph(node, [2], [1])
model = onnx.helper.make_model_gen_version(
graph,
producer_name="backend-test",
opset_imports=[onnx.helper.make_opsetid("", 10)],
)
expect(
model,
inputs=[x],
outputs=[y],
name="test_strnorm_model_monday_empty_output",
)

View File

@ -0,0 +1,428 @@
# Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import subprocess
import sys
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Sequence
import numpy as np
import onnx
from onnx.backend.test.case.test_case import TestCase
from onnx.backend.test.case.utils import import_recursive
from onnx.onnx_pb import (
AttributeProto,
FunctionProto,
GraphProto,
ModelProto,
NodeProto,
TensorProto,
TypeProto,
)
_NodeTestCases = []
_TargetOpType = None
_DiffOpTypes = None
_existing_names: dict[str, onnx.NodeProto] = {}
def _rename_edges_helper(
internal_node: NodeProto,
rename_helper: Callable[[str], str],
attribute_map: dict[str, AttributeProto],
prefix: str,
) -> NodeProto:
new_node = NodeProto()
new_node.CopyFrom(internal_node)
new_node.ClearField("input")
new_node.ClearField("output")
new_node.ClearField("attribute")
for internal_name in internal_node.input:
new_node.input.append(rename_helper(internal_name))
for internal_name in internal_node.output:
new_node.output.append(rename_helper(internal_name))
for attr in internal_node.attribute:
if attr.HasField("ref_attr_name"):
if attr.ref_attr_name in attribute_map:
new_attr = AttributeProto()
new_attr.CopyFrom(attribute_map[attr.ref_attr_name]) # type: ignore
new_attr.name = attr.name
new_node.attribute.extend([new_attr])
else:
new_attr = AttributeProto()
new_attr.CopyFrom(attr)
if attr.type == AttributeProto.GRAPH:
new_graph = new_attr.g
sg_rename = {}
for in_desc in new_graph.input:
sg_rename[in_desc.name] = in_desc.name = prefix + in_desc.name
for out_desc in new_graph.output:
sg_rename[out_desc.name] = out_desc.name = prefix + out_desc.name
for init_desc in new_graph.initializer:
sg_rename[init_desc.name] = init_desc.name = prefix + init_desc.name
for sparse_init_desc in new_graph.sparse_initializer:
sg_rename[sparse_init_desc.values.name] = (
sparse_init_desc.values.name
) = (prefix + sparse_init_desc.values.name)
for sparse_init_desc in new_graph.sparse_initializer:
sg_rename[sparse_init_desc.indices.name] = (
sparse_init_desc.indices.name
) = (prefix + sparse_init_desc.indices.name)
def subgraph_rename_helper(name: str) -> Any:
if name in sg_rename: # noqa: B023
return sg_rename[name] # noqa: B023
return rename_helper(name)
new_nodes = [
_rename_edges_helper(
node_desc, subgraph_rename_helper, attribute_map, prefix
)
for node_desc in new_graph.node
]
new_graph.ClearField("node")
new_graph.node.extend(new_nodes)
new_node.attribute.extend([new_attr])
return new_node
# FIXME(TMVector): Any reason we can't get rid of this and use the C++ helper directly?
def function_expand_helper(
node: NodeProto, function_proto: FunctionProto, op_prefix: str
) -> list[NodeProto]:
io_names_map = {}
attribute_map = {a.name: a for a in node.attribute}
for idx in range(len(function_proto.input)):
io_names_map[function_proto.input[idx]] = (
node.input[idx] if idx in range(len(node.input)) else ""
)
for idx in range(len(function_proto.output)):
# Even if the node has been created with optional outputs missing, we
# can't assume that the function body handles this correctly, such as in
# the case that output is also an intermediate value.
# So we only add a name mapping if the output is present. An internal
# name will be generated if the missing output is used, the same as any
# other internal tensor.
if idx in range(len(node.output)) and node.output[idx] != "":
io_names_map[function_proto.output[idx]] = node.output[idx]
def rename_helper(internal_name: str) -> Any:
if internal_name in io_names_map:
return io_names_map[internal_name]
elif internal_name == "":
return ""
return op_prefix + internal_name
new_node_list = [
_rename_edges_helper(internal_node, rename_helper, attribute_map, op_prefix)
for internal_node in function_proto.node
]
return new_node_list
def function_testcase_helper(
node: NodeProto, input_types: list[TypeProto], name: str
) -> tuple[list[tuple[list[NodeProto], Any]], int]:
test_op = node.op_type
op_prefix = test_op + "_" + name + "_expanded_function_"
schema = onnx.defs.get_schema(test_op, domain=node.domain)
# an op schema may have several functions, each for one opset version
# opset versions include the op's since_version and other opset versions
# if it is needed to define the op for a opset version other than the op's since_version.
function_protos = []
for opset_version in schema.function_opset_versions: # type: ignore
function_proto_str = schema.get_function_with_opset_version(opset_version) # type: ignore
function_proto = FunctionProto()
function_proto.ParseFromString(function_proto_str)
function_protos.append(function_proto)
for opset_version in schema.context_dependent_function_opset_versions: # type: ignore
function_proto_str = schema.get_context_dependent_function_with_opset_version( # type: ignore
opset_version,
node.SerializeToString(),
[t.SerializeToString() for t in input_types],
)
function_proto = FunctionProto()
function_proto.ParseFromString(function_proto_str)
function_protos.append(function_proto)
expanded_tests = []
for function_proto in function_protos:
for attr in schema.attributes:
if attr in [a.name for a in node.attribute]:
continue
if schema.attributes[attr].default_value:
node.attribute.extend([schema.attributes[attr].default_value])
# function_proto.attributes
node_list = function_expand_helper(node, function_proto, op_prefix)
expanded_tests.append((node_list, function_proto.opset_import))
return expanded_tests, schema.since_version
def _extract_value_info(
input: list[Any] | np.ndarray | None,
name: str,
type_proto: TypeProto | None = None,
) -> onnx.ValueInfoProto:
if type_proto is None:
if input is None:
raise NotImplementedError(
"_extract_value_info: both input and type_proto arguments cannot be None."
)
elif isinstance(input, list):
elem_type = onnx.helper.np_dtype_to_tensor_dtype(input[0].dtype)
shape = None
tensor_type_proto = onnx.helper.make_tensor_type_proto(elem_type, shape)
type_proto = onnx.helper.make_sequence_type_proto(tensor_type_proto)
elif isinstance(input, TensorProto):
elem_type = input.data_type
shape = tuple(input.dims)
type_proto = onnx.helper.make_tensor_type_proto(elem_type, shape)
else:
elem_type = onnx.helper.np_dtype_to_tensor_dtype(input.dtype)
shape = input.shape
type_proto = onnx.helper.make_tensor_type_proto(elem_type, shape)
return onnx.helper.make_value_info(name, type_proto)
def _make_test_model_gen_version(graph: GraphProto, **kwargs: Any) -> ModelProto:
(
latest_onnx_version,
latest_ml_version,
latest_training_version,
) = onnx.helper.VERSION_TABLE[-1][
2:5
] # type: ignore
if "opset_imports" in kwargs:
for opset in kwargs["opset_imports"]:
# If the test model uses an unreleased opset version (latest_version+1),
# directly use make_model to create a model with the latest ir version
if (
(
(opset.domain in {"", "ai.onnx"})
and opset.version == latest_onnx_version + 1
)
or (
opset.domain == "ai.onnx.ml"
and opset.version == latest_ml_version + 1
)
or (
(
opset.domain
in {"ai.onnx.training version", "ai.onnx.preview.training"}
)
and opset.version == latest_training_version + 1
)
):
return onnx.helper.make_model(graph, **kwargs)
# Otherwise, find and use the corresponding ir version according to given opset version
return onnx.helper.make_model_gen_version(graph, **kwargs)
# In the case of ops with optional inputs and outputs, node_op.input and node_op.output indicate
# which inputs/outputs are present and which are omitted. However, the parameter inputs
# and outputs of this function include values only for inputs/outputs that are present.
# E.g., for an op with 3 inputs, if the second parameter is optional and we wish to omit it,
# node_op.inputs would look like ["Param1", "", "Param3"], while inputs would look like
# [input-1-value, input-3-value]
# Instead of creating model with latest version, it now generates models for since_version by default.
# Thus it can make every model uses the same opset version after every opset change.
# Besides, user can specify "use_max_opset_version" to generate models for
# the latest opset vesion that supports before targeted opset version
def expect(
node_op: onnx.NodeProto,
inputs: Sequence[np.ndarray | TensorProto],
outputs: Sequence[np.ndarray | TensorProto],
name: str,
**kwargs: Any,
) -> None:
# skip if the node_op's op_type is not same as the given one
if _TargetOpType and node_op.op_type != _TargetOpType:
return
if _DiffOpTypes is not None and node_op.op_type.lower() not in _DiffOpTypes:
return
if name in _existing_names:
raise ValueError(
f"Name {name!r} is already using by one test case for node type {node_op.op_type!r}."
)
_existing_names[name] = node_op
# in case node_op is modified
node = deepcopy(node_op)
present_inputs = [x for x in node.input if (x != "")]
present_outputs = [x for x in node.output if (x != "")]
input_type_protos = [None] * len(inputs)
if "input_type_protos" in kwargs:
input_type_protos = kwargs["input_type_protos"]
del kwargs["input_type_protos"]
output_type_protos = [None] * len(outputs)
if "output_type_protos" in kwargs:
output_type_protos = kwargs["output_type_protos"]
del kwargs["output_type_protos"]
inputs_vi = [
_extract_value_info(arr, arr_name, input_type)
for arr, arr_name, input_type in zip(inputs, present_inputs, input_type_protos)
]
outputs_vi = [
_extract_value_info(arr, arr_name, output_type)
for arr, arr_name, output_type in zip(
outputs, present_outputs, output_type_protos
)
]
graph = onnx.helper.make_graph(
nodes=[node], name=name, inputs=inputs_vi, outputs=outputs_vi
)
kwargs["producer_name"] = "backend-test"
if "opset_imports" not in kwargs:
# To make sure the model will be produced with the same opset_version after opset changes
# By default, it uses since_version as opset_version for produced models
produce_opset_version = onnx.defs.get_schema(
node.op_type, domain=node.domain
).since_version
kwargs["opset_imports"] = [
onnx.helper.make_operatorsetid(node.domain, produce_opset_version)
]
model = _make_test_model_gen_version(graph, **kwargs)
_NodeTestCases.append(
TestCase(
name=name,
model_name=name,
url=None,
model_dir=None,
model=model,
data_sets=[(inputs, outputs)],
kind="node",
rtol=1e-3,
atol=1e-7,
)
)
# Create list of types for node.input, filling a default TypeProto for missing inputs:
# E.g. merge(["x", "", "y"], [x-value-info, y-value-info]) will return [x-type, default-type, y-type]
def merge(
node_inputs: list[str], present_value_info: list[onnx.ValueInfoProto]
) -> list[TypeProto]:
if node_inputs:
if node_inputs[0] != "":
return [
present_value_info[0].type,
*merge(node_inputs[1:], present_value_info[1:]),
]
else:
return [TypeProto(), *merge(node_inputs[1:], present_value_info)]
return []
merged_types = merge(list(node.input), inputs_vi)
(
expanded_tests,
since_version,
) = function_testcase_helper(node, merged_types, name)
for expanded_function_nodes, func_opset_import in expanded_tests:
kwargs["producer_name"] = "backend-test"
# TODO: if kwargs["opset_imports"] already exists, only generate test case for the opset version.
# replace opset versions with what are specified in function proto
if "opset_imports" not in kwargs:
kwargs["opset_imports"] = func_opset_import
else:
for opset_import in func_opset_import:
matches = [
opset
for opset in kwargs["opset_imports"]
if opset.domain == opset_import.domain
]
if matches:
matches[0].version = opset_import.version
else:
kwargs["opset_imports"].append(opset_import)
onnx_ai_opset_version = ""
if "opset_imports" in kwargs:
onnx_ai_opset_imports = [
oi for oi in kwargs["opset_imports"] if oi.domain in ("", "ai.onnx")
]
if len(onnx_ai_opset_imports) == 1:
onnx_ai_opset_version = onnx_ai_opset_imports[0].version
function_test_name = name + "_expanded"
if onnx_ai_opset_version and onnx_ai_opset_version != since_version:
function_test_name += f"_ver{onnx_ai_opset_version}"
graph = onnx.helper.make_graph(
nodes=expanded_function_nodes,
name=function_test_name,
inputs=inputs_vi,
outputs=outputs_vi,
)
model = _make_test_model_gen_version(graph, **kwargs)
_NodeTestCases.append(
TestCase(
name=function_test_name,
model_name=function_test_name,
url=None,
model_dir=None,
model=model,
data_sets=[(inputs, outputs)],
kind="node",
rtol=1e-3,
atol=1e-7,
)
)
def collect_testcases(op_type: str) -> list[TestCase]:
"""Collect node test cases"""
# only keep those tests related to this operator
global _TargetOpType # noqa: PLW0603
_TargetOpType = op_type
import_recursive(sys.modules[__name__])
return _NodeTestCases
def collect_diff_testcases() -> list[TestCase]:
"""Collect node test cases which are different from the main branch"""
global _DiffOpTypes # noqa: PLW0603
_DiffOpTypes = get_diff_op_types()
import_recursive(sys.modules[__name__])
return _NodeTestCases
def get_diff_op_types():
cwd_path = Path.cwd()
# git fetch first for git diff on GitHub Action
subprocess.run(
["git", "fetch", "origin", "main:main"],
cwd=cwd_path,
capture_output=True,
check=True,
)
# obtain list of added or modified files in this PR
obtain_diff = subprocess.Popen(
["git", "diff", "--name-only", "--diff-filter=AM", "origin/main", "HEAD"],
cwd=cwd_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdoutput, _ = obtain_diff.communicate()
diff_list = stdoutput.split()
changed_op_types = []
for file in diff_list:
file_name = file.decode("utf-8")
if file_name.startswith("onnx/backend/test/case/node/") and file_name.endswith(
".py"
):
changed_op_types.append(file_name.split("/")[-1].replace(".py", ""))
return changed_op_types

Some files were not shown because too many files have changed in this diff Show More