I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,10 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
# appended to the __init__.py in the onnxruntime module's 'tools' folder from /tools/python/util/__init__append.py
import importlib.util
have_torch = importlib.util.find_spec("torch")
if have_torch:
from .pytorch_export_helpers import infer_input_info # noqa: F401

View File

@ -0,0 +1,47 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import logging
import pathlib
# need this before the mobile helper imports for some reason
logging.basicConfig(format="%(levelname)s: %(message)s")
from .mobile_helpers import usability_checker # noqa: E402
def check_usability():
parser = argparse.ArgumentParser(
description="""Analyze an ONNX model to determine how well it will work in mobile scenarios.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--log_level", choices=["debug", "info"], default="info", help="Logging level")
parser.add_argument("model_path", help="Path to ONNX model to check", type=pathlib.Path)
args = parser.parse_args()
logger = logging.getLogger("check_usability")
if args.log_level == "debug":
logger.setLevel(logging.DEBUG)
elif args.log_level == "info":
logger.setLevel(logging.INFO)
elif args.log_level == "warning":
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.ERROR)
try_eps = usability_checker.analyze_model(args.model_path, skip_optimize=False, logger=logger)
if try_eps:
logger.info(
"As NNAPI or CoreML may provide benefits with this model it is recommended to compare the "
"performance of the model using the NNAPI EP on Android, and the CoreML EP on iOS, "
"against the performance using the CPU EP."
)
else:
logger.info("For optimal performance the model should be used with the CPU EP. ")
if __name__ == "__main__":
check_usability()

View File

@ -0,0 +1,377 @@
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import annotations
import argparse
import contextlib
import enum
import os
import pathlib
import tempfile
import onnxruntime as ort
from .file_utils import files_from_file_or_dir, path_match_suffix_ignore_case
from .onnx_model_utils import get_optimization_level
from .ort_format_model import create_config_from_models
class OptimizationStyle(enum.Enum):
Fixed = 0
Runtime = 1
def _optimization_suffix(optimization_level_str: str, optimization_style: OptimizationStyle, suffix: str):
return "{}{}{}".format(
f".{optimization_level_str}" if optimization_level_str != "all" else "",
".with_runtime_opt" if optimization_style == OptimizationStyle.Runtime else "",
suffix,
)
def _create_config_file_path(
model_path_or_dir: pathlib.Path,
output_dir: pathlib.Path | None,
optimization_level_str: str,
optimization_style: OptimizationStyle,
enable_type_reduction: bool,
):
config_name = "{}{}".format(
"required_operators_and_types" if enable_type_reduction else "required_operators",
_optimization_suffix(optimization_level_str, optimization_style, ".config"),
)
if model_path_or_dir.is_dir():
return (output_dir or model_path_or_dir) / config_name
model_config_path = model_path_or_dir.with_suffix(f".{config_name}")
if output_dir is not None:
return output_dir / model_config_path.name
return model_config_path
def _create_session_options(
optimization_level: ort.GraphOptimizationLevel,
output_model_path: pathlib.Path,
custom_op_library: pathlib.Path,
session_options_config_entries: dict[str, str],
):
so = ort.SessionOptions()
so.optimized_model_filepath = str(output_model_path)
so.graph_optimization_level = optimization_level
if custom_op_library:
so.register_custom_ops_library(str(custom_op_library))
for key, value in session_options_config_entries.items():
so.add_session_config_entry(key, value)
return so
def _convert(
model_path_or_dir: pathlib.Path,
output_dir: pathlib.Path | None,
optimization_level_str: str,
optimization_style: OptimizationStyle,
custom_op_library: pathlib.Path,
create_optimized_onnx_model: bool,
allow_conversion_failures: bool,
target_platform: str,
session_options_config_entries: dict[str, str],
) -> list[pathlib.Path]:
model_dir = model_path_or_dir if model_path_or_dir.is_dir() else model_path_or_dir.parent
output_dir = output_dir or model_dir
optimization_level = get_optimization_level(optimization_level_str)
def is_model_file_to_convert(file_path: pathlib.Path):
if not path_match_suffix_ignore_case(file_path, ".onnx"):
return False
# ignore any files with an extension of .optimized.onnx which are presumably from previous executions
# of this script
if path_match_suffix_ignore_case(file_path, ".optimized.onnx"):
print(f"Ignoring '{file_path}'")
return False
return True
models = files_from_file_or_dir(model_path_or_dir, is_model_file_to_convert)
if len(models) == 0:
raise ValueError(f"No model files were found in '{model_path_or_dir}'")
providers = ["CPUExecutionProvider"]
# if the optimization level is 'all' we manually exclude the NCHWc transformer. It's not applicable to ARM
# devices, and creates a device specific model which won't run on all hardware.
# If someone really really really wants to run it they could manually create an optimized onnx model first,
# or they could comment out this code.
optimizer_filter = None
if optimization_level == ort.GraphOptimizationLevel.ORT_ENABLE_ALL and target_platform != "amd64":
optimizer_filter = ["NchwcTransformer"]
converted_models = []
for model in models:
try:
relative_model_path = model.relative_to(model_dir)
(output_dir / relative_model_path).parent.mkdir(parents=True, exist_ok=True)
ort_target_path = (output_dir / relative_model_path).with_suffix(
_optimization_suffix(optimization_level_str, optimization_style, ".ort")
)
if create_optimized_onnx_model:
# Create an ONNX file with the same optimization level that will be used for the ORT format file.
# This allows the ONNX equivalent of the ORT format model to be easily viewed in Netron.
# If runtime optimizations are saved in the ORT format model, there may be some difference in the
# graphs at runtime between the ORT format model and this saved ONNX model.
optimized_target_path = (output_dir / relative_model_path).with_suffix(
_optimization_suffix(optimization_level_str, optimization_style, ".optimized.onnx")
)
so = _create_session_options(
optimization_level, optimized_target_path, custom_op_library, session_options_config_entries
)
if optimization_style == OptimizationStyle.Runtime:
# Limit the optimizations to those that can run in a model with runtime optimizations.
so.add_session_config_entry("optimization.minimal_build_optimizations", "apply")
print(f"Saving optimized ONNX model {model} to {optimized_target_path}")
_ = ort.InferenceSession(
str(model), sess_options=so, providers=providers, disabled_optimizers=optimizer_filter
)
# Load ONNX model, optimize, and save to ORT format
so = _create_session_options(
optimization_level, ort_target_path, custom_op_library, session_options_config_entries
)
so.add_session_config_entry("session.save_model_format", "ORT")
if optimization_style == OptimizationStyle.Runtime:
so.add_session_config_entry("optimization.minimal_build_optimizations", "save")
print(f"Converting optimized ONNX model {model} to ORT format model {ort_target_path}")
_ = ort.InferenceSession(
str(model), sess_options=so, providers=providers, disabled_optimizers=optimizer_filter
)
converted_models.append(ort_target_path)
# orig_size = os.path.getsize(onnx_target_path)
# new_size = os.path.getsize(ort_target_path)
# print("Serialized {} to {}. Sizes: orig={} new={} diff={} new:old={:.4f}:1.0".format(
# onnx_target_path, ort_target_path, orig_size, new_size, new_size - orig_size, new_size / orig_size))
except Exception as e:
print(f"Error converting {model}: {e}")
if not allow_conversion_failures:
raise
print(f"Converted {len(converted_models)}/{len(models)} models successfully.")
return converted_models
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__),
description="""Convert the ONNX format model/s in the provided directory to ORT format models.
All files with a `.onnx` extension will be processed. For each one, an ORT format model will be created in the
given output directory, if specified, or the same directory.
A configuration file will also be created containing the list of required operators for all
converted models. This configuration file should be used as input to the minimal build via the
`--include_ops_by_config` parameter.
""",
)
parser.add_argument(
"--output_dir",
type=pathlib.Path,
help="Provide an output directory for the converted model/s and configuration file. "
"If unspecified, the converted ORT format model/s will be in the same directory as the ONNX model/s.",
)
parser.add_argument(
"--optimization_style",
nargs="+",
default=[OptimizationStyle.Fixed.name, OptimizationStyle.Runtime.name],
choices=[e.name for e in OptimizationStyle],
help="Style of optimization to perform on the ORT format model. "
"Multiple values may be provided. The conversion will run once for each value. "
"The general guidance is to use models optimized with "
f"'{OptimizationStyle.Runtime.name}' style when using NNAPI or CoreML and "
f"'{OptimizationStyle.Fixed.name}' style otherwise. "
f"'{OptimizationStyle.Fixed.name}': Run optimizations directly before saving the ORT "
"format model. This bakes in any platform-specific optimizations. "
f"'{OptimizationStyle.Runtime.name}': Run basic optimizations directly and save certain "
"other optimizations to be applied at runtime if possible. This is useful when using a "
"compiling EP like NNAPI or CoreML that may run an unknown (at model conversion time) "
"number of nodes. The saved optimizations can further optimize nodes not assigned to the "
"compiling EP at runtime.",
)
parser.add_argument(
"--enable_type_reduction",
action="store_true",
help="Add operator specific type information to the configuration file to potentially reduce "
"the types supported by individual operator implementations.",
)
parser.add_argument(
"--custom_op_library",
type=pathlib.Path,
default=None,
help="Provide path to shared library containing custom operator kernels to register.",
)
parser.add_argument(
"--save_optimized_onnx_model",
action="store_true",
help="Save the optimized version of each ONNX model. "
"This will have the same level of optimizations applied as the ORT format model.",
)
parser.add_argument(
"--allow_conversion_failures",
action="store_true",
help="Whether to proceed after encountering model conversion failures.",
)
parser.add_argument(
"--target_platform",
type=str,
default=None,
choices=["arm", "amd64"],
help="Specify the target platform where the exported model will be used. "
"This parameter can be used to choose between platform-specific options, "
"such as QDQIsInt8Allowed(arm), NCHWc (amd64) and NHWC (arm/amd64) format, different "
"optimizer level options, etc.",
)
parser.add_argument(
"model_path_or_dir",
type=pathlib.Path,
help="Provide path to ONNX model or directory containing ONNX model/s to convert. "
"All files with a .onnx extension, including those in subdirectories, will be "
"processed.",
)
parsed_args = parser.parse_args()
parsed_args.optimization_style = [OptimizationStyle[style_str] for style_str in parsed_args.optimization_style]
return parsed_args
def convert_onnx_models_to_ort(
model_path_or_dir: pathlib.Path,
output_dir: pathlib.Path | None = None,
optimization_styles: list[OptimizationStyle] | None = None,
custom_op_library_path: pathlib.Path | None = None,
target_platform: str | None = None,
save_optimized_onnx_model: bool = False,
allow_conversion_failures: bool = False,
enable_type_reduction: bool = False,
):
if output_dir is not None:
if not output_dir.is_dir():
output_dir.mkdir(parents=True)
output_dir = output_dir.resolve(strict=True)
optimization_styles = optimization_styles or []
# setting optimization level is not expected to be needed by typical users, but it can be set with this
# environment variable
optimization_level_str = os.getenv("ORT_CONVERT_ONNX_MODELS_TO_ORT_OPTIMIZATION_LEVEL", "all")
model_path_or_dir = model_path_or_dir.resolve()
custom_op_library = custom_op_library_path.resolve() if custom_op_library_path else None
if not model_path_or_dir.is_dir() and not model_path_or_dir.is_file():
raise FileNotFoundError(f"Model path '{model_path_or_dir}' is not a file or directory.")
if custom_op_library and not custom_op_library.is_file():
raise FileNotFoundError(f"Unable to find custom operator library '{custom_op_library}'")
session_options_config_entries = {}
if target_platform is not None and target_platform == "arm":
session_options_config_entries["session.qdqisint8allowed"] = "1"
else:
session_options_config_entries["session.qdqisint8allowed"] = "0"
for optimization_style in optimization_styles:
print(
f"Converting models with optimization style '{optimization_style.name}' and level '{optimization_level_str}'"
)
converted_models = _convert(
model_path_or_dir=model_path_or_dir,
output_dir=output_dir,
optimization_level_str=optimization_level_str,
optimization_style=optimization_style,
custom_op_library=custom_op_library,
create_optimized_onnx_model=save_optimized_onnx_model,
allow_conversion_failures=allow_conversion_failures,
target_platform=target_platform,
session_options_config_entries=session_options_config_entries,
)
with contextlib.ExitStack() as context_stack:
if optimization_style == OptimizationStyle.Runtime:
# Convert models again without runtime optimizations.
# Runtime optimizations may not end up being applied, so we need to use both converted models with and
# without runtime optimizations to get a complete set of ops that may be needed for the config file.
model_dir = model_path_or_dir if model_path_or_dir.is_dir() else model_path_or_dir.parent
temp_output_dir = context_stack.enter_context(
tempfile.TemporaryDirectory(dir=model_dir, suffix=".without_runtime_opt")
)
session_options_config_entries_for_second_conversion = session_options_config_entries.copy()
# Limit the optimizations to those that can run in a model with runtime optimizations.
session_options_config_entries_for_second_conversion["optimization.minimal_build_optimizations"] = (
"apply"
)
print(
"Converting models again without runtime optimizations to generate a complete config file. "
"These converted models are temporary and will be deleted."
)
converted_models += _convert(
model_path_or_dir=model_path_or_dir,
output_dir=temp_output_dir,
optimization_level_str=optimization_level_str,
optimization_style=OptimizationStyle.Fixed,
custom_op_library=custom_op_library,
create_optimized_onnx_model=False, # not useful as they would be created in a temp directory
allow_conversion_failures=allow_conversion_failures,
target_platform=target_platform,
session_options_config_entries=session_options_config_entries_for_second_conversion,
)
print(
f"Generating config file from ORT format models with optimization style '{optimization_style.name}' and level '{optimization_level_str}'"
)
config_file = _create_config_file_path(
model_path_or_dir,
output_dir,
optimization_level_str,
optimization_style,
enable_type_reduction,
)
create_config_from_models(converted_models, config_file, enable_type_reduction)
if __name__ == "__main__":
args = parse_args()
convert_onnx_models_to_ort(
args.model_path_or_dir,
output_dir=args.output_dir,
optimization_styles=args.optimization_style,
custom_op_library_path=args.custom_op_library,
target_platform=args.target_platform,
save_optimized_onnx_model=args.save_optimized_onnx_model,
allow_conversion_failures=args.allow_conversion_failures,
enable_type_reduction=args.enable_type_reduction,
)

View File

@ -0,0 +1,46 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pathlib
import typing
def path_match_suffix_ignore_case(path: typing.Union[pathlib.Path, str], suffix: str) -> bool:
"""
Returns whether `path` ends in `suffix`, ignoring case.
"""
if not isinstance(path, str):
path = str(path)
return path.casefold().endswith(suffix.casefold())
def files_from_file_or_dir(
file_or_dir_path: typing.Union[pathlib.Path, str], predicate: typing.Callable[[pathlib.Path], bool] = lambda _: True
) -> typing.List[pathlib.Path]:
"""
Gets the files in `file_or_dir_path` satisfying `predicate`.
If `file_or_dir_path` is a file, the single file is considered. Otherwise, all files in the directory are
considered.
:param file_or_dir_path: Path to a file or directory.
:param predicate: Predicate to determine if a file is included.
:return: A list of files.
"""
if not isinstance(file_or_dir_path, pathlib.Path):
file_or_dir_path = pathlib.Path(file_or_dir_path)
selected_files = []
def process_file(file_path: pathlib.Path):
if predicate(file_path):
selected_files.append(file_path)
if file_or_dir_path.is_dir():
for root, _, files in os.walk(file_or_dir_path):
for file in files:
file_path = pathlib.Path(root, file)
process_file(file_path)
else:
process_file(file_or_dir_path)
return selected_files

View File

@ -0,0 +1,11 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
def get_logger(name, level=logging.DEBUG):
logging.basicConfig(format="%(asctime)s %(name)s [%(levelname)s] - %(message)s")
logger = logging.getLogger(name)
logger.setLevel(level)
return logger

View File

@ -0,0 +1,72 @@
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import pathlib
import sys
import onnx
from .onnx_model_utils import fix_output_shapes, make_dim_param_fixed, make_input_shape_fixed
def make_dynamic_shape_fixed_helper():
parser = argparse.ArgumentParser(
f"{os.path.basename(__file__)}:{make_dynamic_shape_fixed_helper.__name__}",
description="""
Assign a fixed value to a dim_param or input shape
Provide either dim_param and dim_value or input_name and input_shape.""",
)
parser.add_argument(
"--dim_param", type=str, required=False, help="Symbolic parameter name. Provide dim_value if specified."
)
parser.add_argument(
"--dim_value", type=int, required=False, help="Value to replace dim_param with in the model. Must be > 0."
)
parser.add_argument(
"--input_name",
type=str,
required=False,
help="Model input name to replace shape of. Provide input_shape if specified.",
)
parser.add_argument(
"--input_shape",
type=lambda x: [int(i) for i in x.split(",")],
required=False,
help="Shape to use for input_shape. Provide comma separated list for the shape. "
"All values must be > 0. e.g. --input_shape 1,3,256,256",
)
parser.add_argument("input_model", type=pathlib.Path, help="Provide path to ONNX model to update.")
parser.add_argument("output_model", type=pathlib.Path, help="Provide path to write updated ONNX model to.")
args = parser.parse_args()
if (
(args.dim_param and args.input_name)
or (not args.dim_param and not args.input_name)
or (args.dim_param and (not args.dim_value or args.dim_value < 1))
or (args.input_name and (not args.input_shape or any([value < 1 for value in args.input_shape])))
):
print("Invalid usage.")
parser.print_help()
sys.exit(-1)
model = onnx.load(str(args.input_model.resolve(strict=True)))
if args.dim_param:
make_dim_param_fixed(model.graph, args.dim_param, args.dim_value)
else:
make_input_shape_fixed(model.graph, args.input_name, args.input_shape)
# update the output shapes to make them fixed if possible.
fix_output_shapes(model)
onnx.save(model, str(args.output_model.resolve()))
if __name__ == "__main__":
make_dynamic_shape_fixed_helper()

View File

@ -0,0 +1,301 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Helper script that will check if the types and operators used in an ONNX model
# are supported by the pre-built ORT Mobile package.
import argparse
import logging
import pathlib
import sys
import onnx
from ..onnx_model_utils import ModelProtoWithShapeInfo, get_opsets_imported
from ..reduced_build_config_parser import parse_config
cpp_to_tensorproto_type = {
"float": 1,
"uint8_t": 2,
"int8_t": 3,
"uint16_t": 4,
"int16_t": 5,
"int32_t": 6,
"int64_t": 7,
"std::string": 8,
"bool": 9,
"MLFloat16": 10,
"double": 11,
"uint32_t": 12,
"uint64_t": 13,
"Complex64": 14, # not supported by ORT
"Complex128": 15, # not supported by ORT
"BFloat16": 16,
}
tensorproto_type_to_cpp = {v: k for k, v in cpp_to_tensorproto_type.items()}
def check_graph(graph, opsets, required_ops, global_types, special_types, unsupported_ops, logger):
"""
Check the graph and any subgraphs for usage of types or operators which we know are not supported.
:param graph: Graph to process.
:param opsets: Map of domain to opset version that the model imports.
:param required_ops: Operators that are included in the pre-built package.
:param global_types: Types globally enabled in the pre-built package.
:param special_types: Types that are always enabled for a subset of operators and are _usually_ supported but are
not guaranteed to be. We would need to add a lot of infrastructure to know for sure so
currently we treat them as supported.
:param unsupported_ops: Set of unsupported operators that were found.
:param logger: Logger for diagnostic output.
:return: Returns whether the graph uses unsupported operators or types.
"""
has_unsupported_types = False
value_info_map = {vi.name: vi for vi in graph.value_info}
def _is_type_supported(value_info, description):
is_supported = True
type_name = value_info.type.WhichOneof("value")
if type_name == "tensor_type":
t = value_info.type.tensor_type.elem_type
if t not in global_types and t not in special_types:
cpp_type = tensorproto_type_to_cpp[t]
logger.debug(f"Element type {cpp_type} of {description} is not supported.")
is_supported = False
else:
# we don't support sequences, map, sparse tensors, or optional types in the pre-built package
logger.debug(f"Data type {type_name} of {description} is not supported.")
is_supported = False
return is_supported
def _input_output_is_supported(value_info, input_output):
return _is_type_supported(value_info, f"graph {input_output} {value_info.name}")
# node outputs are simpler to check.
# node inputs have a much wider mix of types, some of which come from initializers and most likely are always
# enabled as we generally do type reduction on the user data input to the operator and not the weights/etc. which
# come from initializers.
def _node_output_is_supported(name):
is_supported = True
if name in value_info_map:
vi = value_info_map[name]
is_supported = _is_type_supported(vi, f"node output {name}")
else:
# we don't have type info so ignore
pass
return is_supported
for i in graph.input:
if not _input_output_is_supported(i, "input"):
has_unsupported_types = True
for o in graph.output:
if not _input_output_is_supported(o, "output"):
has_unsupported_types = True
for node in graph.node:
# required_ops are map of [domain][opset] to set of op_type names. '' == ai.onnx
domain = node.domain or "ai.onnx"
# special case Constant as we will convert to an initializer during model load
if domain == "ai.onnx" and node.op_type == "Constant":
continue
# some models don't have complete imports. use 1 as a default as that's valid for custom domains and should
# result in an error for any others. not sure why ONNX or ORT validation allows this though.
opset = opsets.get(domain, 1)
if (
domain not in required_ops
or opset not in required_ops[domain]
or node.op_type not in required_ops[domain][opset]
):
unsupported_ops.add(f"{domain}:{opset}:{node.op_type}")
for output_name in node.output:
if not _node_output_is_supported(output_name):
has_unsupported_types = True
# recurse into subgraph for control flow nodes (Scan/Loop/If)
for attr in node.attribute:
if attr.HasField("g"):
check_graph(attr.g, opsets, required_ops, global_types, special_types, unsupported_ops, logger)
return has_unsupported_types or unsupported_ops
def _get_global_tensorproto_types(op_type_impl_filter, logger: logging.Logger):
"""
Map the globally supported types (C++) to onnx.TensorProto.DataType values used in the model
See https://github.com/onnx/onnx/blob/1faae95520649c93ae8d0b403816938a190f4fa7/onnx/onnx.proto#L485
Additionally return a set of types we special case as being able to generally be considered as supported.
:param op_type_impl_filter: type filter from reduced build configuration parser
:param logger: Logger
:return: tuple of globally enabled types and special cased types
"""
global_cpp_types = op_type_impl_filter.global_type_list()
global_onnx_tensorproto_types = set()
for t in global_cpp_types:
if t in cpp_to_tensorproto_type:
global_onnx_tensorproto_types.add(cpp_to_tensorproto_type[t])
else:
logger.error(f"Error: Unexpected data type of {t} in package build config's globally enabled types.")
sys.exit(-1)
# a subset of operators require int32 and int64 to always be enabled, as those types are used for dimensions in
# shapes and indices.
# additionally we have a number of operators (e.g. Not, Where) that always require the use of bool.
# this _may_ mean values involving these types can be processed, but without adding a lot more code we don't know
# for sure.
special_types = [
cpp_to_tensorproto_type["int32_t"],
cpp_to_tensorproto_type["int64_t"],
cpp_to_tensorproto_type["bool"],
]
return global_onnx_tensorproto_types, special_types
def get_default_config_path():
# get default path to config that was used to create the pre-built package.
script_dir = pathlib.Path(__file__).parent
local_config = script_dir / "mobile_package.required_operators.config"
# if we're running in the ORT python package the file should be local. otherwise assume we're running from the
# ORT repo
if local_config.exists():
default_config_path = local_config
else:
ort_root = script_dir.parents[3]
default_config_path = (
ort_root / "tools" / "ci_build" / "github" / "android" / "mobile_package.required_operators.config"
)
return default_config_path
def run_check_with_model(
model_with_type_info: onnx.ModelProto, mobile_pkg_build_config: pathlib.Path, logger: logging.Logger
):
"""
Check if an ONNX model can be used with the ORT Mobile pre-built package.
:param model_with_type_info: ONNX model that has had ONNX shape inferencing run on to add type/shape information.
:param mobile_pkg_build_config: Configuration file used to build the ORT Mobile package.
:param logger: Logger for output
:return: True if supported
"""
if not mobile_pkg_build_config:
mobile_pkg_build_config = get_default_config_path()
enable_type_reduction = True
config_path = str(mobile_pkg_build_config.resolve(strict=True))
required_ops, op_type_impl_filter = parse_config(config_path, enable_type_reduction)
global_onnx_tensorproto_types, special_types = _get_global_tensorproto_types(op_type_impl_filter, logger)
# get the opset imports
opsets = get_opsets_imported(model_with_type_info)
# If the ONNX opset of the model is not supported we can recommend using our tools to update that first.
supported_onnx_opsets = set(required_ops["ai.onnx"].keys())
# we have a contrib op that is erroneously in the ai.onnx domain with opset 1. manually remove that incorrect value
supported_onnx_opsets.remove(1)
onnx_opset_model_uses = opsets["ai.onnx"]
if onnx_opset_model_uses not in supported_onnx_opsets:
logger.info(f"Model uses ONNX opset {onnx_opset_model_uses}.")
logger.info(f"The pre-built package only supports ONNX opsets {sorted(supported_onnx_opsets)}.")
logger.info(
"Please try updating the ONNX model opset to a supported version using "
"python -m onnxruntime.tools.onnx_model_utils.update_onnx_opset ..."
)
return False
unsupported_ops = set()
logger.debug(
"Checking if the data types and operators used in the model are supported in the pre-built ORT package..."
)
unsupported = check_graph(
model_with_type_info.graph,
opsets,
required_ops,
global_onnx_tensorproto_types,
special_types,
unsupported_ops,
logger,
)
if unsupported_ops:
logger.info("Unsupported operators:")
for entry in sorted(unsupported_ops):
logger.info(" " + entry) # noqa: G003
if unsupported:
logger.info("\nModel is not supported by the pre-built package due to unsupported types and/or operators.")
logger.info(
"Please see https://onnxruntime.ai/docs/install/#install-on-web-and-mobile for information "
"on what is supported in the pre-built package."
)
logger.info(
"The 'full' ORT package for Android (onnxruntime-android) or iOS (onnxruntime-{objc|c}) could be used, "
"or a custom build of ONNX Runtime will be required if binary size is critical. Please see "
"https://onnxruntime.ai/docs/build/custom.html for details on performing that."
)
else:
logger.info("Model should work with the pre-built package.")
logger.info("---------------\n")
return not unsupported
def run_check(model_path: pathlib.Path, mobile_pkg_build_config: pathlib.Path, logger: logging.Logger):
"""
Check if an ONNX model will be able to be used with the ORT Mobile pre-built package.
:param model_path: Path to ONNX model.
:param mobile_pkg_build_config: Configuration file used to build the ORT Mobile package.
:param logger: Logger for output
:return: True if supported
"""
logger.info(
f"Checking if pre-built ORT Mobile package can be used with {model_path} once model is "
"converted from ONNX to ORT format using onnxruntime.tools.convert_onnx_models_to_ort..."
)
model_file = model_path.resolve(strict=True)
# we need to run shape inferencing to populate that type info for node outputs.
# we will get warnings if the model uses ORT contrib ops (ONNX does not have shape inferencing for those),
# and shape inferencing will be lost downstream of those.
# TODO: add support for checking ORT format model as it will have full type/shape info for all nodes
model_wrapper = ModelProtoWithShapeInfo(model_file)
return run_check_with_model(model_wrapper.model_with_shape_info, mobile_pkg_build_config, logger)
def main():
parser = argparse.ArgumentParser(
description="Check if model can be run using the ONNX Runtime Mobile Pre-Built Package",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--config_path",
help="Path to required operators and types configuration used to build the pre-built ORT mobile package.",
required=False,
type=pathlib.Path,
default=get_default_config_path(),
)
parser.add_argument("model_path", help="Path to ONNX model to check", type=pathlib.Path)
args = parser.parse_args()
logger = logging.getLogger("default")
logger.setLevel(logging.INFO)
run_check(args.model_path, args.config_path, logger)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,31 @@
<!--
Keep in sync with doco generated from /docs/execution-providers/CoreML-ExecutionProvider.md on the gh_pages branch
-->
|Operator|Note|
|--------|------|
|ai.onnx:Add||
|ai.onnx:AveragePool|Only 2D Pool is supported currently. 3D and 5D support can be added if needed.|
|ai.onnx:Clip||
|ai.onnx:Concat||
|ai.onnx:Conv|Only 1D/2D Conv is supported.<br/>Bias if provided must be constant.|
|ai.onnx:ConvTranspose|Weight and bias must be constant.<br/>padding_type of SAME_UPPER/SAME_LOWER is not supported.<br/>kernel_shape must have default values.<br/>output_shape is not supported.<br/>output_padding must have default values.|
|ai.onnx.DepthToSpace|If 'mode' is 'CRD' the input must have a fixed shape.|
|ai.onnx:Div||
|ai.onnx:Gemm|Input B must be constant.|
|ai.onnx:GlobalAveragePool|Only 2D Pool is supported currently. 3D and 5D support can be added if needed.|
|ai.onnx:GlobalMaxPool|Only 2D Pool is supported currently. 3D and 5D support can be added if needed.|
|ai.onnx:GridSample|4D input.<br/>'mode' of 'linear' or 'zeros'.<br/>(mode==linear && padding_mode==reflection && align_corners==0) is not supported.|
|ai.onnx.LeakyRelu||
|ai.onnx:MatMul|Only support for transA == 0, alpha == 1.0 and beta == 1.0 is currently implemented.|
|ai.onnx:MaxPool|Only 2D Pool is supported currently. 3D and 5D support can be added if needed.|
|ai.onnx:Mul||
|ai.onnx:Pow|Only supports cases when both inputs are fp32.|
|ai.onnx:Relu||
|ai.onnx:Reshape||
|ai.onnx:Resize|See [resize_op_builder.cc](https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/providers/coreml/builders/impl/resize_op_builder.cc) implementation. There are too many permutations to describe the valid combinations.|
|ai.onnx.Slice|starts/ends/axes/steps must be constant initializers.|
|ai.onnx:Split||
|ai.onnx:Sub||
|ai.onnx:Sigmoid||
|ai:onnx:Tanh||
|ai.onnx:Transpose||

View File

@ -0,0 +1,43 @@
<!--
Keep in sync with doco generated from /docs/execution-providers/CoreML-ExecutionProvider.md on the gh_pages branch
-->
|Operator|Note|
|--------|------|
|ai.onnx:Add||
|ai.onnx:ArgMax||
|ai.onnx:AveragePool|Only 2D Pool is supported.|
|ai.onnx:BatchNormalization||
|ai.onnx:Cast||
|ai.onnx:Clip||
|ai.onnx:Concat||
|ai.onnx:Conv|Only 1D/2D Conv is supported.<br/>Weights and bias should be constant.|
|ai.onnx:DepthToSpace|Only DCR mode DepthToSpace is supported.|
|ai.onnx:Div||
|ai.onnx:Flatten||
|ai.onnx:Gather|Input `indices` with scalar value is not supported.|
|ai.onnx:Gemm|Input B should be constant.|
|ai.onnx:GlobalAveragePool|Only 2D Pool is supported.|
|ai.onnx:GlobalMaxPool|Only 2D Pool is supported.|
|ai.onnx:LeakyRelu||
|ai.onnx:LRN||
|ai.onnx:MatMul|Input B should be constant.|
|ai.onnx:MaxPool|Only 2D Pool is supported.|
|ai.onnx:Mul||
|ai.onnx:Pad|Only constant mode and last two dim padding is supported.<br/>Input pads and constant_value should be constant.<br/>If provided, axes should be constant.|
|ai.onnx:Pow|Only supports cases when both inputs are fp32.|
|ai.onnx:PRelu|Input slope should be constant.<br/>Input slope should either have shape [C, 1, 1] or have 1 element.|
|ai.onnx:Reciprocal||
|ai.onnx.ReduceSum||
|ai.onnx:Relu||
|ai.onnx:Reshape||
|ai.onnx:Resize|4D input.<br/>`coordinate_transformation_mode` == `asymmetric`.<br/>`mode` == `linear` or `nearest`.<br/>`nearest_mode` == `floor`.<br/>`exclude_outside` == false<br/>`scales` or `sizes` must be constant.|
|ai.onnx:Shape|Attribute `start` with non-default value is not supported.<br/>Attribute `end` is not supported.|
|ai.onnx:Sigmoid||
|ai.onnx:Slice|Inputs `starts`, `ends`, `axes`, and `steps` should be constant. Empty slice is not supported.|
|ai.onnx:Softmax||
|ai.onnx:Split|If provided, `splits` should be constant. num of outputs supported is at least 2.|
|ai.onnx:Squeeze||
|ai.onnx:Sqrt||
|ai.onnx:Sub||
|ai.onnx:Tanh||
|ai.onnx:Transpose||

View File

@ -0,0 +1,46 @@
# Android package for ORT Mobile operator and type reduction configuration
#
# The list of operators was generated from:
# - the ONNX operators use by the tf2onnx tflite converter
# - the operators used in a set of tflite models from tfhub, the tflite examples, and the mlperf mobile models
# - models were optimized with optimizations set to 'basic', 'extended' and 'all'
# - see the readme file for full details
# allow float, int8, uint8. operators that manipulate shapes or indices have int32 and int64 enabled internally.
!globally_allowed_types;float,int8_t,uint8_t
# ops used by the tf2onnx tflite converter.
ai.onnx;12,13,14,15;Abs,Add,And,ArgMax,ArgMin,AveragePool,Cast,Ceil,Clip,Concat,ConstantOfShape,Conv,ConvTranspose,Cos,CumSum,DepthToSpace,DequantizeLinear,Div,DynamicQuantizeLinear,Elu,Equal,Exp,Expand,Flatten,Floor,Gather,GatherND,Gemm,Greater,GreaterOrEqual,Identity,If,LRN,LeakyRelu,Less,LessOrEqual,Log,LogSoftmax,Loop,MatMul,Max,MaxPool,Mean,Min,Mul,Neg,NonMaxSuppression,NonZero,Not,Or,PRelu,Pad,Pow,QuantizeLinear,Range,Reciprocal,ReduceMax,ReduceMean,ReduceMin,ReduceProd,ReduceSum,Relu,Reshape,Resize,ReverseSequence,Round,ScatterND,Shape,Sigmoid,Sin,Size,Slice,Softmax,SpaceToDepth,Split,Sqrt,Squeeze,Sub,Sum,Tanh,ThresholdedRelu,Tile,TopK,Transpose,Unique,Unsqueeze,Where
# other ops found in test models
ai.onnx;12,13,14,15;Erf,GlobalAveragePool,InstanceNormalization,HardSigmoid,MatMulInteger,QLinearConv,QLinearMatMul
# Control flow ops
# - If and Loop are covered by the tflite converter list
# - Scan tends to be used in speech models (it's more efficient than Loop) so include it for support of those
ai.onnx;12,13,14,15;Scan
# Changed ONNX ops by opset version for the above ops. This list is to provide context as to how much was added
# for each additional opset we support.
#
# opset 13
# Abs,Add,ArgMax,ArgMin,Cast,Ceil,Clip,Concat,DepthToSpace,DequantizeLinear,Div,Equal,Erf,Exp,Expand,Flatten,Floor,
# Gather,GatherND,Gemm,Greater,Identity,If,LRN,Less,Log,LogSoftmax,Loop,MatMul,Max,Mean,Min,Mul,Neg,NonZero,Pad,
# Pow,QuantizeLinear,Reciprocal,ReduceMax,ReduceMean,ReduceMin,ReduceProd,ReduceSum,Relu,Reshape,Resize,
# ScatterND,Shape,Sigmoid,Size,Slice,Softmax,SpaceToDepth,Split,Sqrt,Squeeze,Sub,Sum,Tanh,Tile,Transpose,Unsqueeze
# opset 14
# Add,CumSum,Div,Identity,Mul,Relu,Reshape,Sub
# opset 15
# Pow,Shape
# internal ops added by optimizers
# Note: LayerNormalization is an internal op even though it is (incorrectly) registered in the ONNX domain.
ai.onnx;1;LayerNormalization
com.microsoft;1;DynamicQuantizeMatMul,FusedConv,FusedGemm,FusedMatMul,Gelu,MatMulIntegerToFloat,NhwcMaxPool,QLinearAdd,QLinearAveragePool,QLinearConv,QLinearGlobalAveragePool,QLinearMul,QLinearSigmoid
# NHWC transformer also uses this, so assuming it's valuable enough to include
com.microsoft;1;QLinearLeakyRelu
# Quantized contrib ops that are registered but no usage was found. Excluding for now.
# com.microsoft;1;DynamicQuantizeLSTM,QAttention

View File

@ -0,0 +1,58 @@
<!--
Keep in sync with doco generated from /docs/execution-providers/NNAPI-ExecutionProvider.md on the gh_pages branch
-->
|Operator|Note|
|--------|------|
|ai.onnx:Abs||
|ai.onnx:Add||
|ai.onnx:AveragePool|Only 2D Pool is supported.|
|ai.onnx:BatchNormalization||
|ai.onnx:Cast||
|ai.onnx:Clip||
|ai.onnx:Concat||
|ai.onnx:Conv|Only 2D Conv is supported.<br/>Weights and bias should be constant.|
|ai.onnx:DepthToSpace|Only DCR mode DepthToSpace is supported.|
|ai.onnx:DequantizeLinear|All quantization scales and zero points should be constant.|
|ai.onnx:Div||
|ai.onnx:Elu||
|ai.onnx:Exp||
|ai.onnx:Flatten||
|ai.onnx:Floor||
|ai.onnx:Gather|Input indices should be constant if not int32 type.|
|ai.onnx:Gemm|If input B is not constant, transB should be 1.|
|ai.onnx:GlobalAveragePool|Only 2D Pool is supported.|
|ai.onnx:GlobalMaxPool|Only 2D Pool is supported.|
|ai.onnx:Identity||
|ai.onnx:LeakyRelu||
|ai.onnx:Log||
|ai.onnx:LRN||
|ai.onnx:MatMul||
|ai.onnx:MaxPool|Only 2D Pool is supported.|
|ai.onnx:Max||
|ai.onnx:Min||
|ai.onnx:Mul||
|ai.onnx:Neg||
|ai.onnx:Pad|Only constant mode Pad is supported.<br/>Input pads and constant_value should be constant.<br/>Input pads values should be non-negative.|
|ai.onnx:Pow||
|ai.onnx:PRelu||
|ai.onnx:QLinearConv|Only 2D Conv is supported.<br/>Weights and bias should be constant.<br/>All quantization scales and zero points should be constant.|
|ai.onnx:QLinearMatMul|All quantization scales and zero points should be constant.|
|ai.onnx:QuantizeLinear|All quantization scales and zero points should be constant.|
|ai.onnx:ReduceMean||
|ai.onnx:Relu||
|ai.onnx:Reshape||
|ai.onnx:Resize|Only 2D Resize is supported.|
|ai.onnx:Sigmoid||
|ai.onnx:Sin||
|ai.onnx:Slice||
|ai.onnx:Softmax||
|ai.onnx:Split|Number of splits must evenly divide split axis size. Input split should be constant if provided.|
|ai.onnx:Sqrt||
|ai.onnx:Squeeze|Input axes should be constant.|
|ai.onnx:Sub||
|ai.onnx:Tanh||
|ai.onnx:Transpose||
|ai.onnx:Unsqueeze|Input axes should be constant.|
|com.microsoft:QLinearAdd|All quantization scales and zero points should be constant.|
|com.microsoft:QLinearAveragePool|Only 2D Pool is supported.<br/>All quantization scales and zero points should be constant.|
|com.microsoft:QLinearSigmoid|All quantization scales and zero points should be constant.|

View File

@ -0,0 +1,739 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import annotations
import argparse
import logging
import os
import pathlib
import tempfile
from collections import deque
from enum import IntEnum
import onnx
from ..onnx_model_utils import ModelProtoWithShapeInfo, get_producer_consumer_maps, is_fixed_size_tensor, optimize_model
class _SupportedOpsChecker:
"""
Class to process the md file with list of supported ops and caveats for an execution provider.
e.g. /tools/ci_build/github/android/nnapi_supported_ops.md
/tools/ci_build/github/apple/coreml_supported_mlprogram_ops.md
/tools/ci_build/github/apple/coreml_supported_neuralnetwork_ops.md
"""
def __init__(self, filename):
self._filename = filename
self._ops = {} # op to caveats
self._ops_seen = set()
with open(filename) as f:
for line in f:
# we're looking for a markdown table with 2 columns. first is op name. second is caveats
# op name is domain:op
if line.startswith("|"):
pieces = line.strip().split("|")
if len(pieces) == 4: # pre-first '|'. op, caveat, post-last '|'
domain_op = pieces[1]
caveat = pieces[2]
caveat = caveat.replace("<br/>", " ") # remove some HTML tags
# skip lines that don't have the ':' which separates the domain and op
# e.g. the table header will fail this check
if ":" in domain_op:
self._ops[domain_op] = caveat
def is_op_supported(self, node):
domain = node.domain if node.domain else "ai.onnx"
domain_op = domain + ":" + node.op_type
is_supported = domain_op in self._ops
if is_supported:
self._ops_seen.add(domain_op)
return is_supported
def get_caveats(self):
caveats = []
for op in sorted(self._ops_seen):
caveat = self._ops[op]
if caveat:
caveats.append(f"{op}:{caveat}")
return caveats
class PartitioningInfo:
class TryWithEP(IntEnum):
NO = (0,)
MAYBE = (1,)
YES = 2
def __init__(
self,
num_nodes: int,
num_supported_nodes: int,
num_partitions: int,
supported_ops_checker: _SupportedOpsChecker,
supported_groups: list[onnx.NodeProto],
unsupported_ops: set[str],
nodes_unsupported_due_to_op: int,
nodes_unsupported_due_to_dynamic_input: int,
num_unsupported_nodes_due_to_rank: int,
ops_with_unsupported_rank: set[str],
):
self.num_nodes = num_nodes
self.num_supported_nodes = num_supported_nodes
self.num_partitions = num_partitions
self.supported_ops_checker = supported_ops_checker
self.supported_groups = supported_groups
self.unsupported_ops = unsupported_ops
self.nodes_unsupported_due_to_op = nodes_unsupported_due_to_op
self.nodes_unsupported_due_to_dynamic_input = nodes_unsupported_due_to_dynamic_input
self.num_unsupported_nodes_due_to_rank = num_unsupported_nodes_due_to_rank
self.ops_with_unsupported_rank = ops_with_unsupported_rank
self.num_subgraphs = 0
self.num_nodes_in_subgraphs = 0
def merge(self, other: PartitioningInfo):
"""
Merge the information from another PartitioningInfo instance into this one.
"""
self.num_nodes += other.num_nodes
self.num_supported_nodes += other.num_supported_nodes
self.num_partitions += other.num_partitions
self.supported_groups.extend(other.supported_groups)
self.unsupported_ops.update(other.unsupported_ops)
self.nodes_unsupported_due_to_op += other.nodes_unsupported_due_to_op
self.nodes_unsupported_due_to_dynamic_input += other.nodes_unsupported_due_to_dynamic_input
self.num_unsupported_nodes_due_to_rank += other.num_unsupported_nodes_due_to_rank
self.ops_with_unsupported_rank.update(other.ops_with_unsupported_rank)
# hard assumption that we merge into the main graph partitioning info
self.num_subgraphs += 1
self.num_nodes_in_subgraphs += other.num_nodes
def suitability(self):
# semi-arbitrary choices that err on the side of MAYBE.
# having 1 partition is always preferred, but if that is small it may not be useful.
# having 2 partitions may be okay if they cover most nodes
# more than 2 partitions and the device copy cost is almost guaranteed to outweigh the benefit of using the NPU
# NOTE: This assumes the EP is not CPU based and there is device copy overhead to consider
pct_supported = self.num_supported_nodes / self.num_nodes * 100
if self.num_partitions == 1:
if pct_supported > 75:
return PartitioningInfo.TryWithEP.YES
elif pct_supported > 50:
return PartitioningInfo.TryWithEP.MAYBE
else:
return PartitioningInfo.TryWithEP.NO
if self.num_partitions == 2:
if pct_supported > 75:
return PartitioningInfo.TryWithEP.MAYBE
else:
return PartitioningInfo.TryWithEP.NO
return PartitioningInfo.TryWithEP.NO
def print_analysis(self, logger: logging.Logger, ep_name: str):
"""
Analyze the partitioning information and log the analysis
:param logger: Logger to use
:param ep_name: Execution provider name to use in the log messages
"""
logger.info(
f"{self.num_partitions} partitions with a total of {self.num_supported_nodes}/{self.num_nodes} "
f"nodes can be handled by the {ep_name} EP."
)
if self.supported_groups:
logger.info(
f'\tPartition sizes: [{", ".join([str(len(partition)) for partition in self.supported_groups])}]'
)
# dump full groups if debug output is enabled
for group in self.supported_groups:
logger.debug(f'Nodes in group: {",".join([f"{node.op_type}:{node.name}" for node in group])}')
logger.info(f"Unsupported nodes due to operator={self.nodes_unsupported_due_to_op}")
if self.unsupported_ops:
logger.info(f'\tUnsupported ops: {",".join(sorted(self.unsupported_ops))}')
caveats = self.supported_ops_checker.get_caveats()
if caveats:
indent = " " * 5
logger.info(
"\tCaveats that have not been checked and may result in a node not actually being supported: "
f'{"".join([os.linesep + indent + caveat for caveat in caveats])}'
)
if self.nodes_unsupported_due_to_dynamic_input:
logger.info(
"Unsupported nodes due to input having a dynamic shape=%d",
self.nodes_unsupported_due_to_dynamic_input,
)
if self.num_unsupported_nodes_due_to_rank:
logger.info(f"Unsupported nodes due to rank of input data={self.num_unsupported_nodes_due_to_rank}")
logger.info(f"\tOps with unsupported rank: {','.join(sorted(self.ops_with_unsupported_rank))}")
if self.num_subgraphs > 0:
# TODO: CoreML has a flag. NNAPI doesn't. Either should be able to support a subgraph when treated as a
# separate graph (only extra detail would be making sure implicit inputs are handled).
# Merging the subgraph into the parent graph would be more complex.
# e.g. for CoreML we could potentially convert Loop to while_loop and If to cond if the subgraphs in the
# control flow node are fully supported.
# NNAPI also has While and If.
# It most likely will be necessary to support merging in If nodes with fully supported subgraphs,
# as the subgraphs in those are often very simple, so the performance cost of going to the CPU EP and back
# is high.
logger.info(
f"{self.num_nodes_in_subgraphs} nodes are in {self.num_subgraphs} subgraphs. "
"Check EP as to whether subgraphs are supported."
)
pct_nodes_using_ep = self.num_supported_nodes / self.num_nodes * 100
if self.num_partitions == 0:
logger.info(f"{ep_name} cannot run any nodes in this model.")
elif self.num_partitions == 1:
if pct_nodes_using_ep > 75:
logger.info(
f"{ep_name} should work well for this model as there is one partition "
f"covering {pct_nodes_using_ep:.1f}% of the nodes in the model."
)
elif pct_nodes_using_ep > 50:
logger.info(
f"{ep_name} may work well for this model, however only {pct_nodes_using_ep:.1f}% of nodes "
"will use it. Performance testing is required to validate."
)
else:
logger.info(
f"{ep_name} will probably not work will for this model as only {pct_nodes_using_ep:.2f}% "
"of nodes will use it."
)
elif self.num_partitions == 2 and pct_nodes_using_ep > 75:
logger.info(
f"{ep_name} can be considered for this model as there are two partitions "
f"covering {pct_nodes_using_ep:.1f}% of the nodes. "
"Performance testing is required to validate."
)
else:
logger.info(
f"{ep_name} is not recommended with this model as there are {self.num_partitions} partitions "
f"covering {pct_nodes_using_ep:.1f}% of the nodes in the model. "
"This will most likely result in worse performance than just using the CPU EP."
)
def _check_partitioning_for_graph(
graph: onnx.GraphProto,
node_to_producers: dict[onnx.NodeProto, set[onnx.NodeProto]],
node_to_consumers: dict[onnx.NodeProto, set[onnx.NodeProto]],
supported_ops_checker: _SupportedOpsChecker,
outer_scope_initializers: set[str],
require_fixed_input_sizes: bool,
value_info: dict[str, onnx.ValueInfoProto],
max_rank: int = 999, # max rank if EP has a limitation
):
# initializers have fixed sizes.
initializers = [i.name for i in graph.initializer]
def _is_fixed_shape_value(value):
if value in value_info:
return is_fixed_size_tensor(value_info[value])
if value in initializers or value in outer_scope_initializers:
return True
# if something has an unknown shape (e.g. something downstream of a Reshape with dynamic input for the shape)
# it won't have an entry in value_info
return False
#
# Replicate logic from /onnxruntime/core/providers/partitioning_utils.cc:CreateSupportedPartitionNodeGroups
# to roughly estimate number of partitions for nodes that is_node_supported_fn returns true for.
#
# We keep the structure and variable names as close as possible to the C++ implementation to simplify keeping them
# in sync if future updates are needed.
#
# NOTE: CreateSupportedPartitionNodeGroups was recently updated to be QDQ aware so that partitions did not split
# QDQ node groups. This code does not need to be QDQ aware as splitting a QDQ node group does not affect the total
# number of partitions or supported nodes.
#
# we don't currently support a callback for additional group closure checks in the python implementation
on_group_closed_fn = None
supported_groups = []
# number of inputs from unprocessed nodes (in-degree) per node
in_degree = {}
# nodes that are ready to process
nodes_to_process = deque() # deque of Node instances
# nodes that will be processed when considering the next partition node group
nodes_to_process_with_next_group = deque()
# initialize in-degrees and find root nodes
for node in graph.node:
node_input_edge_count = len(node_to_producers[node]) if node in node_to_producers else 0
in_degree[node] = node_input_edge_count
if node_input_edge_count == 0:
# node is only dependent on graph input or initializers
nodes_to_process.append(node)
supported_group = []
# the partition node group's border is the aggregate of its nodes' output nodes
supported_group_border = set()
num_supported_nodes = 0
num_unsupported_nodes_due_to_op = 0
num_unsupported_nodes_due_to_dynamic_input = 0
num_unsupported_nodes_due_to_rank = 0
unsupported_ops = set()
ops_with_unsupported_rank = set()
def close_group():
if supported_group:
keep_partition = not on_group_closed_fn or on_group_closed_fn(supported_group)
if keep_partition:
supported_groups.append(supported_group.copy())
supported_group.clear()
supported_group_border.clear()
while nodes_to_process or nodes_to_process_with_next_group:
if not nodes_to_process:
close_group()
nodes_to_process = nodes_to_process_with_next_group
nodes_to_process_with_next_group = deque()
continue
node = nodes_to_process.popleft()
is_op_supported = supported_ops_checker.is_op_supported(node)
is_input_shape_supported = not require_fixed_input_sizes or all(_is_fixed_shape_value(i) for i in node.input)
is_rank_supported = True
if value_info:
for node_input in node.input:
if node_input and node_input in value_info and value_info[node_input].type.HasField("tensor_type"):
input_rank = len(value_info[node_input].type.tensor_type.shape.dim)
if input_rank > max_rank:
is_rank_supported = False
break
# special-case if we can infer the rank from the length of the 'perms' Transpose attribute
# e.g. this works with SegmentAnything where dynamic Reshape operators result in no shape info.
if node.op_type == "Transpose" and len(node.attribute[0].ints) > max_rank:
is_rank_supported = False
is_node_supported = is_op_supported and is_input_shape_supported and is_rank_supported
if not is_node_supported:
if node in supported_group_border:
# an unsupported node on the border will be processed after the current partition node group
# so skip any additional processing/counting here
nodes_to_process_with_next_group.append(node)
continue
if not is_op_supported:
unsupported_ops.add(f'{node.domain if node.domain else "ai.onnx"}:{node.op_type}')
num_unsupported_nodes_due_to_op += 1
if not is_input_shape_supported:
num_unsupported_nodes_due_to_dynamic_input += 1
if not is_rank_supported:
num_unsupported_nodes_due_to_rank += 1
ops_with_unsupported_rank.add(f'{node.domain if node.domain else "ai.onnx"}:{node.op_type}')
if is_node_supported:
num_supported_nodes += 1
# add node to the partition node group
supported_group.append(node)
# remove node from the border and add its outputs to the border
if node in supported_group_border:
supported_group_border.remove(node)
# for each consumer node add to supported_group_border
if node in node_to_consumers:
for consumer in node_to_consumers[node]:
supported_group_border.add(consumer)
# adjust in-degrees of the node outputs and add any new nodes to process
if node in node_to_consumers:
for consumer in node_to_consumers[node]:
consumer_node_in_degree = in_degree[consumer]
consumer_node_in_degree -= 1
if consumer_node_in_degree == 0:
nodes_to_process.append(consumer)
in_degree[consumer] = consumer_node_in_degree
close_group()
num_nodes = len(graph.node)
num_partitions = len(supported_groups)
info = PartitioningInfo(
num_nodes,
num_supported_nodes,
num_partitions,
supported_ops_checker,
supported_groups,
unsupported_ops,
num_unsupported_nodes_due_to_op,
num_unsupported_nodes_due_to_dynamic_input,
num_unsupported_nodes_due_to_rank,
ops_with_unsupported_rank,
)
return info
def check_partitioning(
main_graph: onnx.GraphProto,
supported_ops_checker: _SupportedOpsChecker,
require_fixed_input_sizes: bool,
max_rank: int = 999,
) -> PartitioningInfo:
"""
Estimate the partitions the graph will be split into for nodes that is_node_supported_fn returns true for.
The check on whether a node is supported is purely based on the operator type. Additional limitations
(e.g. NNAPI EP only supports 2D Conv) are not checked, so partitions may not be 100% accurate. The limitations
for operators in the partitions are printed so the user can manually check.
:param main_graph: Graph to process
:param supported_ops_checker: Checker with info on supported ops.
:param require_fixed_input_sizes: If True, require that the inputs to a potentially supported node are fixed size
tensors for it to be considered as supported. This requires
onnx.shape_inference.infer_shapes to have been run on the model to populate the
shape information.
If False, shapes are ignored during the check.
:param max_rank: Set if EP has a limitation on the rank of tensors it supports.
:return PartitioningInfo instance with details
"""
if require_fixed_input_sizes and len(main_graph.value_info) == 0 and len(main_graph.node) > 1:
raise ValueError("Run onnx.shape_inference.infer_shapes on the model to populate the shape information.")
# create lookup map from ValueInfo for efficiency
def _update_value_info(graph: onnx.GraphProto, value_to_shape: dict[str, onnx.ValueInfoProto]):
for v in graph.input:
value_to_shape[v.name] = v
for v in graph.output:
value_to_shape[v.name] = v
for v in graph.value_info:
value_to_shape[v.name] = v
# the producer/consumer maps are for the entire model
node_to_producers, node_to_consumers = get_producer_consumer_maps(main_graph)
def _check_graph(
graph: onnx.GraphProto,
outer_scope_value_info: dict[str, onnx.ValueInfoProto] | None,
outer_scope_initializers: set[str] | None = None,
partitioning_info: PartitioningInfo | None = None,
) -> PartitioningInfo:
if outer_scope_value_info is not None:
# extend value info if we're using it. we replace any value shadowed with a local one
value_info = outer_scope_value_info.copy()
_update_value_info(graph, value_info)
else:
value_info = {}
if outer_scope_initializers is None:
outer_scope_initializers = set()
info = _check_partitioning_for_graph(
graph,
node_to_producers,
node_to_consumers,
supported_ops_checker,
outer_scope_initializers,
require_fixed_input_sizes,
value_info,
max_rank,
)
if partitioning_info:
# merge in subgraph info
partitioning_info.merge(info)
else:
# main graph info
partitioning_info = info
# setup outer scope initializers. we copy the input set as a model may have multiple subgraphs
# on multiple levels, so we need to keep the set for each descent separate
subgraph_outer_scope_initializers = set(outer_scope_initializers)
for initializer in graph.initializer:
subgraph_outer_scope_initializers.add(initializer.name)
for node in graph.node:
# recurse into nodes with subgraphs
for attr in node.attribute:
if attr.HasField("g"):
subgraph = attr.g
partitioning_info = _check_graph(
subgraph, value_info, subgraph_outer_scope_initializers, partitioning_info
)
return partitioning_info
aggregated_partitioning_info = _check_graph(main_graph, {} if require_fixed_input_sizes else None)
return aggregated_partitioning_info
def _check_ep_partitioning(
model: onnx.ModelProto, supported_ops_config: pathlib.Path, require_fixed_input_sizes: bool, max_rank: int = 999
):
supported_ops = _SupportedOpsChecker(supported_ops_config)
partition_info = check_partitioning(model.graph, supported_ops, require_fixed_input_sizes, max_rank)
return partition_info
def check_nnapi_partitions(model, require_fixed_input_sizes: bool):
# if we're running in the ORT python package the file should be local. otherwise assume we're running from the
# ORT repo
script_dir = pathlib.Path(__file__).parent
local_config = script_dir / "nnapi_supported_ops.md"
if local_config.exists():
config_path = local_config
else:
ort_root = script_dir.parents[3]
config_path = ort_root / "tools" / "ci_build" / "github" / "android" / "nnapi_supported_ops.md"
return _check_ep_partitioning(model, config_path, require_fixed_input_sizes)
def check_coreml_partitions(model: onnx.ModelProto, require_fixed_input_sizes: bool, config_filename: str):
# if we're running in the ORT python package the file should be local. otherwise assume we're running from the
# ORT repo
script_dir = pathlib.Path(__file__).parent
local_config = script_dir / config_filename
if local_config.exists():
config_path = local_config
else:
ort_root = script_dir.parents[3]
config_path = ort_root / "tools" / "ci_build" / "github" / "apple" / config_filename
max_rank = 5
return _check_ep_partitioning(model, config_path, require_fixed_input_sizes, max_rank)
def check_shapes(graph: onnx.GraphProto, logger: logging.Logger | None = None):
"""
Check the shapes of graph inputs, values and graph outputs to determine if they have static or dynamic sizes.
NNAPI does not support dynamically sized values. CoreML does, but it will most likely cost performance.
:param graph: Graph to check. If shape inferencing has been run the checks on values will be meaningful.
:param logger: Optional logger for diagnostic information.
:return: Tuple of List of inputs with dynamic shapes, Number of dynamic values found
"""
# it's OK if the input is dynamically sized and we do a Resize early to a fixed size.
# it's not good if lots of ops have dynamic inputs
num_fixed_values = 0
num_dynamic_values = 0
dynamic_inputs = []
for i in graph.input:
if not is_fixed_size_tensor(i):
dynamic_inputs.append(i)
# split/join to remove repeated whitespace and newlines from str(i)
if logger:
logger.info(f"Input is not a fixed size tensor: {' '.join(str(i).split())}")
num_dynamic_values += 1
else:
num_fixed_values += 1
dynamic_outputs = []
for o in graph.output:
if not is_fixed_size_tensor(o):
dynamic_outputs.append(o)
if logger:
logger.info(f"Output is not a fixed size tensor: {' '.join(str(o).split())}")
num_dynamic_values += 1
else:
num_fixed_values += 1
# check we have value info.
# special case some test graphs with a single node which only have graph input and output values, and
# a model where all inputs are dynamic (results in no value_info)
if not graph.value_info and not (len(graph.node) == 1 or len(dynamic_inputs) == len(graph.input)):
logger.warning(
"Unable to check shapes within model. "
"ONNX shape inferencing should be run on the model prior to checking."
)
for vi in graph.value_info:
if is_fixed_size_tensor(vi):
num_fixed_values += 1
else:
num_dynamic_values += 1
if logger:
logger.info(
f"Num values with fixed shape={num_fixed_values}. Num values with dynamic shape={num_dynamic_values}"
)
if dynamic_inputs:
if dynamic_outputs:
logger.info(
"Model has dynamic inputs and outputs. Consider re-exporting model with fixed sizes "
"if NNAPI or CoreML can be used with this model."
)
else:
logger.info(
"""Model has dynamically sized inputs but fixed sized outputs.
If the sizes become fixed early in the model (e.g. pre-processing of a dynamic input size
results in a fixed input size for the majority of the model) performance with NNAPI and CoreML,
if applicable, should not be significantly impacted."""
)
return dynamic_inputs, num_dynamic_values
def checker(model_path: pathlib.Path, logger: logging.Logger):
model_with_shape_info_wrapper = ModelProtoWithShapeInfo(model_path)
model_with_shape_info = model_with_shape_info_wrapper.model_with_shape_info
dynamic_inputs, num_dynamic_values = check_shapes(model_with_shape_info.graph)
def check_ep(ep_name, checker_func):
logger.info(f"Checking {ep_name}")
# check with shape info first so supported nodes takes into account values with dynamic shapes
require_fixed_input_sizes = True
partition_info = checker_func(model_with_shape_info, require_fixed_input_sizes)
if logger.getEffectiveLevel() <= logging.INFO:
partition_info.print_analysis(logger, ep_name)
suitability = partition_info.suitability()
logger.info(f"Model should perform well with {ep_name} as is: {suitability.name}")
if suitability != PartitioningInfo.TryWithEP.YES and dynamic_inputs:
logger.info("--------")
logger.info("Checking if model will perform better if the dynamic shapes are fixed...")
require_fixed_input_sizes = False
partition_info_with_fixed_shapes = checker_func(model_with_shape_info, require_fixed_input_sizes)
if logger.getEffectiveLevel() <= logging.INFO:
# analyze and log detailed info
logger.info("Partition information if the model was updated to make the shapes fixed:")
partition_info_with_fixed_shapes.print_analysis(logger, ep_name)
fixed_shape_suitability = partition_info_with_fixed_shapes.suitability()
logger.info(
f"Model should perform well with {ep_name} if modified to have fixed input shapes: "
f"{fixed_shape_suitability.name}"
)
if fixed_shape_suitability != PartitioningInfo.TryWithEP.NO:
logger.info("Shapes can be altered using python -m onnxruntime.tools.make_dynamic_shape_fixed")
if fixed_shape_suitability.value > suitability.value:
suitability = fixed_shape_suitability
logger.info("================")
logger.info("")
return suitability
nnapi_suitability = check_ep("NNAPI", check_nnapi_partitions)
# Check for NeuralNetwork CoreML model
def check_nn_coreml(model: onnx.ModelProto, require_fixed_input_sizes):
return check_coreml_partitions(model, require_fixed_input_sizes, "coreml_supported_neuralnetwork_ops.md")
# Check for MLProgram CoreML model
def check_mlprogram_coreml(model: onnx.ModelProto, require_fixed_input_sizes):
return check_coreml_partitions(model, require_fixed_input_sizes, "coreml_supported_mlprogram_ops.md")
coreml_nn_suitability = check_ep("CoreML NeuralNetwork", check_nn_coreml)
coreml_mlprogram_suitability = check_ep("CoreML MLProgram", check_mlprogram_coreml)
if (
nnapi_suitability != PartitioningInfo.TryWithEP.YES
or coreml_nn_suitability != PartitioningInfo.TryWithEP.YES
or coreml_mlprogram_suitability != PartitioningInfo.TryWithEP.YES
) and logger.getEffectiveLevel() > logging.INFO:
logger.info("Re-run with log level of INFO for more details on the NNAPI/CoreML issues.")
return (
nnapi_suitability != PartitioningInfo.TryWithEP.NO
or coreml_nn_suitability != PartitioningInfo.TryWithEP.NO
or coreml_mlprogram_suitability != PartitioningInfo.TryWithEP.NO
)
def analyze_model(model_path: pathlib.Path, skip_optimize: bool = False, logger: logging.Logger | None = None):
"""
Analyze the provided model to determine if it's likely to work well with the NNAPI or CoreML Execution Providers
:param model_path: Model to analyze.
:param skip_optimize: Skip optimizing to BASIC level before checking. When exporting to ORT format we will do this
optimization..
:param logger: Logger for output
:return: True if either the NNAPI or CoreML Execution Providers may work well with this model.
"""
if not logger:
logger = logging.getLogger("usability_checker")
logger.setLevel(logging.INFO)
logger.info(f"Checking {model_path} for usability with ORT Mobile.")
with tempfile.TemporaryDirectory() as tmp:
if not skip_optimize:
tmp_path = pathlib.Path(tmp) / model_path.name
optimize_model(model_path, tmp_path, use_external_initializers=True)
model_path = tmp_path
try_eps = checker(model_path.resolve(strict=True), logger)
return try_eps
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__), description="""Analyze an ONNX model for usage with the ORT mobile"""
)
parser.add_argument("--log_level", choices=["debug", "info"], default="info", help="Logging level")
parser.add_argument(
"--skip_optimize",
action="store_true",
help="Don't optimize the model to BASIC level prior to analyzing. "
"Optimization will occur when exporting the model to ORT format, so in general "
"should not be skipped unless you have a specific reason to do so.",
)
parser.add_argument("model_path", type=pathlib.Path, help="Provide path to ONNX model")
return parser.parse_args()
def run_analyze_model():
args = parse_args()
logger = logging.getLogger("default")
if args.log_level == "debug":
logger.setLevel(logging.DEBUG)
elif args.log_level == "info":
logger.setLevel(logging.INFO)
elif args.log_level == "warning":
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.ERROR)
model_path = args.model_path.resolve()
analyze_model(model_path, args.skip_optimize, logger)
if __name__ == "__main__":
run_analyze_model()

View File

@ -0,0 +1,169 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import copy
import json
import sys
from collections import OrderedDict
from pprint import pprint
from typing import Any, Dict, List
import onnx
TuningResults = Dict[str, Any]
_TUNING_RESULTS_KEY = "tuning_results"
def _find_tuning_results_in_props(metadata_props):
for idx, prop in enumerate(metadata_props):
if prop.key == _TUNING_RESULTS_KEY:
return idx
return -1
def extract(model: onnx.ModelProto):
idx = _find_tuning_results_in_props(model.metadata_props)
if idx < 0:
return None
tuning_results_prop = model.metadata_props[idx]
return json.loads(tuning_results_prop.value)
def embed(model: onnx.ModelProto, tuning_results: List[TuningResults], overwrite=False):
idx = _find_tuning_results_in_props(model.metadata_props)
assert overwrite or idx <= 0, "the supplied onnx file already have tuning results embedded!"
if idx >= 0:
model.metadata_props.pop(idx)
entry = model.metadata_props.add()
entry.key = _TUNING_RESULTS_KEY
entry.value = json.dumps(tuning_results)
return model
class Merger:
class EpAndValidators:
def __init__(self, ep: str, validators: Dict[str, str]):
self.ep = ep
self.validators = copy.deepcopy(validators)
self.key = (ep, tuple(sorted(validators.items())))
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.ep == other.ep and self.key == other.key
def __init__(self):
self.ev_to_results = OrderedDict()
def merge(self, tuning_results: List[TuningResults]):
for trs in tuning_results:
self._merge_one(trs)
def get_merged(self):
tuning_results = []
for ev, flat_results in self.ev_to_results.items():
results = {}
trs = {
"ep": ev.ep,
"validators": ev.validators,
"results": results,
}
for (op_sig, params_sig), kernel_id in flat_results.items():
kernel_map = results.setdefault(op_sig, {})
kernel_map[params_sig] = kernel_id
tuning_results.append(trs)
return tuning_results
def _merge_one(self, trs: TuningResults):
ev = Merger.EpAndValidators(trs["ep"], trs["validators"])
flat_results = self.ev_to_results.setdefault(ev, {})
for op_sig, kernel_map in trs["results"].items():
for params_sig, kernel_id in kernel_map.items():
if (op_sig, params_sig) not in flat_results:
flat_results[(op_sig, params_sig)] = kernel_id
def parse_args():
parser = argparse.ArgumentParser()
sub_parsers = parser.add_subparsers(help="Command to execute", dest="cmd")
extract_parser = sub_parsers.add_parser("extract", help="Extract embedded tuning results from an onnx file.")
extract_parser.add_argument("input_onnx")
extract_parser.add_argument("output_json")
embed_parser = sub_parsers.add_parser("embed", help="Embed the tuning results into an onnx file.")
embed_parser.add_argument("--force", "-f", action="store_true", help="Overwrite the tuning results if it existed.")
embed_parser.add_argument("output_onnx", help="Path of the output onnx file.")
embed_parser.add_argument("input_onnx", help="Path of the input onnx file.")
embed_parser.add_argument("input_json", nargs="+", help="Path(s) of the tuning results file(s) to be embedded.")
merge_parser = sub_parsers.add_parser("merge", help="Merge multiple tuning results files as a single one.")
merge_parser.add_argument("output_json", help="Path of the output tuning results file.")
merge_parser.add_argument("input_json", nargs="+", help="Paths of the tuning results files to be merged.")
pprint_parser = sub_parsers.add_parser("pprint", help="Pretty print the tuning results.")
pprint_parser.add_argument("json_or_onnx", help="A tuning results json file or an onnx file.")
args = parser.parse_args()
if len(vars(args)) == 0:
parser.print_help()
exit(-1)
return args
def main():
args = parse_args()
if args.cmd == "extract":
tuning_results = extract(onnx.load_model(args.input_onnx))
if tuning_results is None:
sys.stderr.write(f"{args.input_onnx} does not have tuning results embedded!\n")
sys.exit(-1)
json.dump(tuning_results, open(args.output_json, "w")) # noqa: SIM115
elif args.cmd == "embed":
model = onnx.load_model(args.input_onnx)
merger = Merger()
for tuning_results in [json.load(open(f)) for f in args.input_json]: # noqa: SIM115
merger.merge(tuning_results)
model = embed(model, merger.get_merged(), args.force)
onnx.save_model(model, args.output_onnx)
elif args.cmd == "merge":
merger = Merger()
for tuning_results in [json.load(open(f)) for f in args.input_json]: # noqa: SIM115
merger.merge(tuning_results)
json.dump(merger.get_merged(), open(args.output_json, "w")) # noqa: SIM115
elif args.cmd == "pprint":
tuning_results = None
try: # noqa: SIM105
tuning_results = json.load(open(args.json_or_onnx)) # noqa: SIM115
except Exception:
# it might be an onnx file otherwise, try it latter
pass
if tuning_results is None:
try:
model = onnx.load_model(args.json_or_onnx)
tuning_results = extract(model)
if tuning_results is None:
sys.stderr.write(f"{args.input_onnx} does not have tuning results embedded!\n")
sys.exit(-1)
except Exception:
pass
if tuning_results is None:
sys.stderr.write(f"{args.json_or_onnx} is not a valid tuning results file or onnx file!")
sys.exit(-1)
pprint(tuning_results)
else:
# invalid choice will be handled by the parser
pass
if __name__ == "__main__":
main()

View File

@ -0,0 +1,413 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import pathlib
from typing import Optional
import onnx
from onnx import version_converter
import onnxruntime as ort
def iterate_graph_per_node_func(graph, per_node_func, **func_args):
"""
Iterate the graph including subgraphs calling the per_node_func for each node.
:param graph: Graph to iterate
:param per_node_func: Function to call for each node. Signature is fn(node: onnx:NodeProto, **kwargs)
:param func_args: The keyword args to pass through.
"""
for node in graph.node:
per_node_func(node, **func_args)
# recurse into subgraph for control flow nodes (Scan/Loop/If)
for attr in node.attribute:
if attr.HasField("g"):
iterate_graph_per_node_func(attr.g, per_node_func, **func_args)
def iterate_graph_per_graph_func(graph, per_graph_func, **func_args):
"""
Iterate the graph including subgraphs calling the per_graph_func for each Graph.
:param graph: Graph to iterate
:param per_graph_func: Function to call for each graph. Signature is fn(graph: onnx:GraphProto, **kwargs)
:param func_args: The keyword args to pass through.
"""
per_graph_func(graph, **func_args)
for node in graph.node:
# recurse into subgraph for control flow nodes (Scan/Loop/If)
for attr in node.attribute:
if attr.HasField("g"):
iterate_graph_per_graph_func(attr.g, per_graph_func, **func_args)
def get_opsets_imported(model: onnx.ModelProto):
"""
Get the opsets imported by the model
:param model: Model to check.
:return: Map of domain to opset.
"""
opsets = {}
for entry in model.opset_import:
# if empty it's ai.onnx
domain = entry.domain or "ai.onnx"
opsets[domain] = entry.version
return opsets
def update_onnx_opset(
model_path: pathlib.Path,
opset: int,
out_path: Optional[pathlib.Path] = None,
logger: Optional[logging.Logger] = None,
):
"""
Helper to update the opset of a model using onnx version_converter. Target opset must be greater than current opset.
:param model_path: Path to model to update
:param opset: Opset to update model to
:param out_path: Optional output path for updated model to be saved to.
:param logger: Optional logger for diagnostic output
:returns: Updated onnx.ModelProto
"""
model_path_str = str(model_path.resolve(strict=True))
if logger:
logger.info("Updating %s to opset %d", model_path_str, opset)
model = onnx.load(model_path_str)
new_model = version_converter.convert_version(model, opset)
if out_path:
onnx.save(new_model, str(out_path))
if logger:
logger.info("Saved updated model to %s", out_path)
return new_model
def optimize_model(
model_path: pathlib.Path,
output_path: pathlib.Path,
level: ort.GraphOptimizationLevel = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC,
log_level: int = 3,
use_external_initializers: bool = False,
):
"""
Optimize an ONNX model using ONNX Runtime to the specified level
:param model_path: Path to ONNX model
:param output_path: Path to save optimized model to.
:param level: onnxruntime.GraphOptimizationLevel to use. Default is ORT_ENABLE_BASIC.
:param log_level: Log level. Defaults to Error (3) so we don't get output about unused initializers being removed.
Warning (2) or Info (1) may be desirable in some scenarios.
:param use_external_initializers: Set flag to write initializers to an external file. Required if model > 2GB.
Requires onnxruntime 1.17+
"""
so = ort.SessionOptions()
so.optimized_model_filepath = str(output_path.resolve())
so.graph_optimization_level = level
so.log_severity_level = log_level
# save using external initializers so models > 2 GB are handled
if use_external_initializers:
major, minor, rest = ort.__version__.split(".", 3)
if (int(major), int(minor)) >= (1, 17):
so.add_session_config_entry("session.optimized_model_external_initializers_file_name", "external_data.pb")
else:
raise ValueError(
"ONNX Runtime 1.17 or higher required to save initializers as external data when optimizing model. "
f"Current ONNX Runtime version is {ort.__version__}"
)
# create session to optimize. this will write the updated model to output_path
_ = ort.InferenceSession(str(model_path.resolve(strict=True)), so, providers=["CPUExecutionProvider"])
def _replace_symbolic_dim_value(graph: onnx.GraphProto, **kwargs):
param_to_replace = kwargs["dim_param"]
value = kwargs["value"]
def update_dim_values(value_infos):
for vi in value_infos:
if vi.type.HasField("tensor_type"):
shape = vi.type.tensor_type.shape
if shape:
for dim in shape.dim:
if dim.HasField("dim_param") and dim.dim_param == param_to_replace:
dim.Clear()
dim.dim_value = value
update_dim_values(graph.input)
update_dim_values(graph.output)
update_dim_values(graph.value_info)
def _remove_invalid_dim_values_impl(graph: onnx.GraphProto):
def clear_invalid_values(value):
if value.type.HasField("tensor_type"):
shape = value.type.tensor_type.shape
if shape:
for dim in shape.dim:
if dim.HasField("dim_value") and dim.dim_value < 1:
dim.Clear()
for i in graph.input:
clear_invalid_values(i)
for o in graph.output:
clear_invalid_values(o)
for vi in graph.value_info:
clear_invalid_values(vi)
def remove_invalid_dim_values(graph: onnx.GraphProto):
"""
Iterate the graph and subgraphs, unsetting any dim_value entries that have a value of less than 1.
These are typically erroneously inserted by a converter to represent a dynamic dimension.
:param graph: GraphProto to update
"""
iterate_graph_per_graph_func(graph, _remove_invalid_dim_values_impl)
def make_dim_param_fixed(graph: onnx.GraphProto, param_name: str, value: int):
"""
Iterate all values in the graph, replacing dim_param in a tensor shape with the provided value.
:param graph: GraphProto to update
:param param_name: dim_param to set
:param value: value to use
"""
iterate_graph_per_graph_func(graph, _replace_symbolic_dim_value, dim_param=param_name, value=value)
def make_input_shape_fixed(graph: onnx.GraphProto, input_name: str, fixed_shape: [int]):
"""
Update the named graph input to set shape to the provided value. This can be used to set unknown dims as well
as to replace dim values.
If setting the input shape replaces a dim_param, update any other values in the graph that use the dim_param.
:param graph: Graph to update
:param input_name: Name of graph input to update.
:param fixed_shape: Shape to use.
"""
# remove any invalid dim values first. typically this is a dim_value of -1.
remove_invalid_dim_values(graph)
for i in graph.input:
if i.name == input_name:
if not i.type.HasField("tensor_type"):
raise ValueError(f"Input {input_name} is not a tensor")
# graph inputs are required to have a shape to provide the rank
shape = i.type.tensor_type.shape
if len(shape.dim) != len(fixed_shape):
raise ValueError(f"Rank mismatch. Existing:{len(shape.dim)} Replacement:{len(fixed_shape)}")
for idx, dim in enumerate(shape.dim):
# check any existing fixed dims match
if dim.HasField("dim_value"):
if dim.dim_value != fixed_shape[idx]:
raise ValueError(
f"Can't replace existing fixed size of {dim.dim_value} with {fixed_shape[idx]} "
f"for dimension {idx + 1}"
)
elif dim.HasField("dim_param"):
# replacing a dim_param so have to do that through the entire graph
make_dim_param_fixed(graph, dim.dim_param, fixed_shape[idx])
else:
# replacing an unknown dim
dim.Clear()
dim.dim_value = fixed_shape[idx]
return
raise ValueError(
f"Input {input_name} was not found in graph inputs. "
f'Valid input names are: {",".join([i.name for i in graph.input])}'
)
def fix_output_shapes(model: onnx.ModelProto):
"""
Update the output shapesof a model where the input shape/s were made fixed, if possible.
This is mainly to make the model usage clearer if the output shapes can be inferred from the new input shapes.
:param model: Model that had input shapes fixed.
"""
# get a version of the model with shape inferencing info in it. this will provide fixed output shapes if possible.
m2 = onnx.shape_inference.infer_shapes(model)
onnx.checker.check_model(m2)
for idx, o in enumerate(model.graph.output):
if not is_fixed_size_tensor(o):
new_o = m2.graph.output[idx]
if is_fixed_size_tensor(new_o):
o.type.tensor_type.shape.CopyFrom(new_o.type.tensor_type.shape)
def _create_producer_consumer_link(
node_to_producers: dict, node_to_consumers: dict, producer: onnx.NodeProto, consumer: onnx.NodeProto
):
"""
Create links between two nodes for a value produced by one and consumed by the other.
:param node_to_producers: Map of NodeProto to set of nodes that produce values the node consumes as inputs.
:param node_to_consumers: Map of NodeProto to set of nodes that consume values the node produces as outputs.
:param producer: Producer node
:param consumer: Consumer node
"""
if consumer not in node_to_producers:
node_to_producers[consumer] = set()
if producer not in node_to_consumers:
node_to_consumers[producer] = set()
# add entry mapping this node to the producer of this input
node_to_producers[consumer].add(producer)
node_to_consumers[producer].add(consumer)
def _map_node_dependencies(graph: onnx.GraphProto, node_to_producers: dict, node_to_consumers: dict):
graph_inputs = {i.name for i in graph.input}
initializers = {i.name for i in graph.initializer}
# map of value name to node that creates it. copy parent values but override if values get shadowed
producers = {}
implicit_inputs = set()
def is_local_value(value):
return value in producers or value in initializers or value in graph_inputs
for node in graph.node:
inputs = [i for i in node.input]
for attr in node.attribute:
if attr.HasField("g"):
subgraph_implicit_inputs = _map_node_dependencies(attr.g, node_to_producers, node_to_consumers)
inputs += subgraph_implicit_inputs
for i in inputs:
if not i:
# missing optional input
continue
if is_local_value(i):
if i in producers:
producer = producers[i]
_create_producer_consumer_link(node_to_producers, node_to_consumers, producer, node)
else:
implicit_inputs.add(i)
for o in node.output:
producers[o] = node
return implicit_inputs
def get_producer_consumer_maps(graph: onnx.GraphProto):
"""
Get maps for connections between the node that produces each value and the nodes that consume the value.
Processing includes subgraphs. As the map key is a Node instance from the Graph there should be no ambiguity.
:param graph: Graph to process.
:return: Tuple with two maps.
First is node_to_producers map of a node to set of all nodes producing input it consumes.
Second is node_to_consumers map of a node to set of all nodes consuming output it creates.
e.g. NodeA and NodeB provide inputs to NodeC. NodeC provides input to NodeD
node_to_consumers[NodeA] = set([NodeC])
node_to_consumers[NodeB] = set([NodeC])
node_to_producers[NodeC] = set([NodeA, NodeB])
node_to_consumers[NodeC] = set([NodeD])
node_to_producers[NodeD] = set([NodeC])
"""
# use a hash of the object id for NodeProto.
# we need this for the partitioning checker where we keep maps with nodes as the key.
onnx.NodeProto.__hash__ = lambda self: id(self)
node_to_producers = {} # map of node instance to nodes producing input values it consumes
node_to_consumers = {} # map of node instance to nodes consuming output values it produces
implicit_inputs = _map_node_dependencies(graph, node_to_producers, node_to_consumers)
# top level graph should have no implicit inputs
if implicit_inputs:
raise ValueError(
f'This appears to be an invalid model with missing inputs of {",".join(sorted(implicit_inputs))}'
)
return node_to_producers, node_to_consumers
def is_fixed_size_tensor(value: onnx.ValueInfoProto):
"""
Check if value is a tensor with a fixed shape.
:param value: onnx.ValueInfoProto to check
:return: True if value is a tensor, with a shape, where all dimensions have fixed values.
"""
is_fixed = False
if value.type.HasField("tensor_type"):
shape = value.type.tensor_type.shape
if shape:
is_fixed = True # scalar has no dims so set to True and unset if we hit a dim without a valid value
for dim in shape.dim:
if dim.HasField("dim_value") and dim.dim_value > 0:
continue
# anything else means it's a dynamic value
is_fixed = False
break
return is_fixed
def get_optimization_level(level):
"""Convert string to GraphOptimizationLevel."""
if level == "disable":
return ort.GraphOptimizationLevel.ORT_DISABLE_ALL
if level == "basic":
# Constant folding and other optimizations that only use ONNX operators
return ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
if level == "extended":
# Optimizations using custom operators, excluding NCHWc and NHWC layout optimizers
return ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if level == "all":
return ort.GraphOptimizationLevel.ORT_ENABLE_ALL
raise ValueError("Invalid optimization level of " + level)
class ModelProtoWithShapeInfo:
"""
Class to load an ONNX model and run shape inferencing on it to populate the ValueInfo.
The model_with_shape_info property will contain the updated model.
If the model is > 2GB and uses external data a temporary file is required to run shape inferencing successfully.
This helper class handles automatic removal of the temporary file.
"""
def __init__(self, model_path: pathlib.Path):
"""
:param model_path: Path to ONNX model to load and run shape inferencing on.
"""
self.model_path = model_path
model = onnx.load(str(model_path))
self.model_with_shape_info = onnx.shape_inference.infer_shapes(model, strict_mode=True)
# ONNX has a silent failure from the call to infer_shapes when the model is > 2GB.
# We detect that by checking the nodes in the returned model.
self._tmp_model_path = None
if len(model.graph.node) > 0 and len(self.model_with_shape_info.graph.node) == 0:
self._tmp_model_path = pathlib.Path(model_path).with_suffix(".temp_with_shapeinf.onnx")
onnx.shape_inference.infer_shapes_path(str(model_path), str(self._tmp_model_path), strict_mode=True)
self.model_with_shape_info = onnx.load(str(self._tmp_model_path))
def __del__(self):
if self._tmp_model_path:
self._tmp_model_path.unlink(missing_ok=True)

View File

@ -0,0 +1,85 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
# An offline standalone script to declassify an ONNX model by randomizing the tensor data in initializers.
# The ORT Performance may change especially on generative models.
import argparse
from pathlib import Path
import numpy as np
from onnx import load_model, numpy_helper, onnx_pb, save_model
# An experimental small value for differentiating shape data and weights.
# The tensor data with larger size can't be shape data.
# User may adjust this value as needed.
SIZE_THRESHOLD = 10
def graph_iterator(model, func):
graph_queue = [model.graph]
while graph_queue:
graph = graph_queue.pop(0)
func(graph)
for node in graph.node:
for attr in node.attribute:
if attr.type == onnx_pb.AttributeProto.AttributeType.GRAPH:
assert isinstance(attr.g, onnx_pb.GraphProto)
graph_queue.append(attr.g)
if attr.type == onnx_pb.AttributeProto.AttributeType.GRAPHS:
for g in attr.graphs:
assert isinstance(g, onnx_pb.GraphProto)
graph_queue.append(g)
def randomize_graph_initializer(graph):
for i_tensor in graph.initializer:
array = numpy_helper.to_array(i_tensor)
# TODO: need to find a better way to differentiate shape data and weights.
if array.size > SIZE_THRESHOLD:
random_array = np.random.uniform(array.min(), array.max(), size=array.shape).astype(array.dtype)
o_tensor = numpy_helper.from_array(random_array, i_tensor.name)
i_tensor.CopyFrom(o_tensor)
def main():
parser = argparse.ArgumentParser(description="Randomize the weights of an ONNX model")
parser.add_argument("-m", type=str, required=True, help="input onnx model path")
parser.add_argument("-o", type=str, required=True, help="output onnx model path")
parser.add_argument(
"--use_external_data_format",
required=False,
action="store_true",
help="Store or Save in external data format",
)
parser.add_argument(
"--all_tensors_to_one_file",
required=False,
action="store_true",
help="Save all tensors to one file",
)
args = parser.parse_args()
data_path = None
if args.use_external_data_format:
if Path(args.m).parent == Path(args.o).parent:
raise RuntimeError("Please specify output directory with different parent path to input directory.")
if args.all_tensors_to_one_file:
data_path = Path(args.o).name + ".data"
Path(args.o).parent.mkdir(parents=True, exist_ok=True)
onnx_model = load_model(args.m, load_external_data=args.use_external_data_format)
graph_iterator(onnx_model, randomize_graph_initializer)
save_model(
onnx_model,
args.o,
save_as_external_data=args.use_external_data_format,
all_tensors_to_one_file=args.all_tensors_to_one_file,
location=data_path,
)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,164 @@
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from __future__ import annotations
import argparse
import os
import sys
from timeit import default_timer as timer
import numpy as np
import onnxruntime as onnxrt
float_dict = {
"tensor(float16)": "float16",
"tensor(float)": "float32",
"tensor(double)": "float64",
}
integer_dict = {
"tensor(int32)": "int32",
"tensor(int8)": "int8",
"tensor(uint8)": "uint8",
"tensor(int16)": "int16",
"tensor(uint16)": "uint16",
"tensor(int64)": "int64",
"tensor(uint64)": "uint64",
}
def generate_feeds(sess, symbolic_dims: dict | None = None):
feeds = {}
symbolic_dims = symbolic_dims or {}
for input_meta in sess.get_inputs():
# replace any symbolic dimensions
shape = []
for dim in input_meta.shape:
if not dim:
# unknown dim
shape.append(1)
elif isinstance(dim, str):
# symbolic dim. see if we have a value otherwise use 1
if dim in symbolic_dims:
shape.append(int(symbolic_dims[dim]))
else:
shape.append(1)
else:
shape.append(dim)
if input_meta.type in float_dict:
feeds[input_meta.name] = np.random.rand(*shape).astype(float_dict[input_meta.type])
elif input_meta.type in integer_dict:
feeds[input_meta.name] = np.random.uniform(high=1000, size=tuple(shape)).astype(
integer_dict[input_meta.type]
)
elif input_meta.type == "tensor(bool)":
feeds[input_meta.name] = np.random.randint(2, size=tuple(shape)).astype("bool")
else:
print(f"unsupported input type {input_meta.type} for input {input_meta.name}")
sys.exit(-1)
return feeds
# simple test program for loading onnx model, feeding all inputs and running the model num_iters times.
def run_model(
model_path,
num_iters=1,
debug=None,
profile=None,
symbolic_dims=None,
feeds=None,
override_initializers=True,
):
symbolic_dims = symbolic_dims or {}
if debug:
print(f"Pausing execution ready for debugger to attach to pid: {os.getpid()}")
print("Press key to continue.")
sys.stdin.read(1)
sess_options = None
if profile:
sess_options = onnxrt.SessionOptions()
sess_options.enable_profiling = True
sess_options.profile_file_prefix = os.path.basename(model_path)
sess = onnxrt.InferenceSession(
model_path,
sess_options=sess_options,
providers=onnxrt.get_available_providers(),
)
meta = sess.get_modelmeta()
if not feeds:
feeds = generate_feeds(sess, symbolic_dims)
if override_initializers:
# Starting with IR4 some initializers provide default values
# and can be overridden (available in IR4). For IR < 4 models
# the list would be empty
for initializer in sess.get_overridable_initializers():
shape = [dim if dim else 1 for dim in initializer.shape]
if initializer.type in float_dict:
feeds[initializer.name] = np.random.rand(*shape).astype(float_dict[initializer.type])
elif initializer.type in integer_dict:
feeds[initializer.name] = np.random.uniform(high=1000, size=tuple(shape)).astype(
integer_dict[initializer.type]
)
elif initializer.type == "tensor(bool)":
feeds[initializer.name] = np.random.randint(2, size=tuple(shape)).astype("bool")
else:
print(f"unsupported initializer type {initializer.type} for initializer {initializer.name}")
sys.exit(-1)
start = timer()
for _i in range(num_iters):
outputs = sess.run([], feeds) # fetch all outputs
end = timer()
print(f"model: {meta.graph_name}")
print(f"version: {meta.version}")
print(f"iterations: {num_iters}")
print(f"avg latency: {((end - start) * 1000) / num_iters} ms")
if profile:
trace_file = sess.end_profiling()
print(f"trace file written to: {trace_file}")
return 0, feeds, num_iters > 0 and outputs
def main():
parser = argparse.ArgumentParser(description="Simple ONNX Runtime Test Tool.")
parser.add_argument("model_path", help="model path")
parser.add_argument(
"num_iters",
nargs="?",
type=int,
default=1000,
help="model run iterations. default=1000",
)
parser.add_argument(
"--debug",
action="store_true",
help="pause execution to allow attaching a debugger.",
)
parser.add_argument("--profile", action="store_true", help="enable chrome timeline trace profiling.")
parser.add_argument(
"--symbolic_dims",
default={},
type=lambda s: dict(x.split("=") for x in s.split(",")),
help="Comma separated name=value pairs for any symbolic dimensions in the model input. "
"e.g. --symbolic_dims batch=1,seqlen=5. "
"If not provided, the value of 1 will be used for all symbolic dimensions.",
)
args = parser.parse_args()
exit_code, _, _ = run_model(args.model_path, args.num_iters, args.debug, args.profile, args.symbolic_dims)
sys.exit(exit_code)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,55 @@
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import pathlib
from .onnx_model_utils import get_optimization_level, optimize_model
def optimize_model_helper():
parser = argparse.ArgumentParser(
f"{os.path.basename(__file__)}:{optimize_model_helper.__name__}",
description="""
Optimize an ONNX model using ONNX Runtime to the specified level.
See https://onnxruntime.ai/docs/performance/model-optimizations/graph-optimizations.html for more
details of the optimization levels.""",
)
parser.add_argument(
"--opt_level",
default="basic",
choices=["disable", "basic", "extended", "all"],
help="Optimization level to use.",
)
parser.add_argument(
"--log_level",
choices=["debug", "info", "warning", "error"],
type=str,
required=False,
default="error",
help="Log level. Defaults to Error so we don't get output about unused initializers "
"being removed. Warning or Info may be desirable in some scenarios.",
)
parser.add_argument("input_model", type=pathlib.Path, help="Provide path to ONNX model to update.")
parser.add_argument("output_model", type=pathlib.Path, help="Provide path to write optimized ONNX model to.")
args = parser.parse_args()
if args.log_level == "error":
log_level = 3
elif args.log_level == "debug":
log_level = 0 # ORT verbose level
elif args.log_level == "info":
log_level = 1
elif args.log_level == "warning":
log_level = 2
optimize_model(args.input_model, args.output_model, get_optimization_level(args.opt_level), log_level)
if __name__ == "__main__":
optimize_model_helper()

View File

@ -0,0 +1,25 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
# need to add the path to the ORT flatbuffers python module before we import anything else here.
# we also auto-magically adjust to whether we're running from the ORT repo, or from within the ORT python package
script_dir = os.path.dirname(os.path.realpath(__file__))
fbs_py_schema_dirname = "ort_flatbuffers_py"
if os.path.isdir(os.path.join(script_dir, fbs_py_schema_dirname)):
# fbs bindings are in this directory, so we're running in the ORT python package
ort_fbs_py_parent_dir = script_dir
else:
# running directly from ORT repo, so fbs bindings are under onnxruntime/core/flatbuffers
ort_root = os.path.abspath(os.path.join(script_dir, "..", "..", "..", ".."))
ort_fbs_py_parent_dir = os.path.join(ort_root, "onnxruntime", "core", "flatbuffers")
sys.path.append(ort_fbs_py_parent_dir)
from .operator_type_usage_processors import GloballyAllowedTypesOpTypeImplFilter # noqa: E402, F401
from .operator_type_usage_processors import OperatorTypeUsageManager # noqa: E402, F401
from .operator_type_usage_processors import OpTypeImplFilterInterface # noqa: E402, F401
from .ort_model_processor import OrtFormatModelProcessor # noqa: E402, F401
from .utils import create_config_from_models # noqa: E402, F401

View File

@ -0,0 +1,663 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import typing
from abc import ABC, abstractmethod
import ort_flatbuffers_py.fbs as fbs
from .types import FbsTypeInfo, value_name_to_typestr
def _create_op_key(domain: str, optype: str):
return f"{domain}:{optype}"
def _ort_constant_for_domain(domain: str):
"""
Map a string domain value to the internal ONNX Runtime constant for that domain.
:param domain: Domain string to map.
:return: Internal ONNX Runtime constant
"""
# constants are defined in <ORT root>/include/onnxruntime/core/graph/constants.h
# This list is limited to just the domains we have processors for
domain_to_constant_map = {"ai.onnx": "kOnnxDomain", "ai.onnx.ml": "kMLDomain", "com.microsoft": "kMSDomain"}
if domain not in domain_to_constant_map:
raise ValueError(f"Domain {domain} not found in map to ONNX Runtime constant. Please update map.")
return domain_to_constant_map[domain]
def _reg_type_to_cpp_type(reg_type: str):
if reg_type == "string":
return "std::string"
return reg_type
def _split_reg_types(reg_types_str: str):
"""
Split on underscores but append "_t" to the previous element.
"""
tokens = reg_types_str.split("_")
reg_types = []
for token in tokens:
if token == "t" and len(reg_types) > 0:
reg_types[-1] += "_t"
else:
reg_types += [token]
return reg_types
class TypeUsageProcessor(ABC):
"""
Abstract base class for processors which implement operator specific logic to determine the type or types required.
"""
def __init__(self, domain: str, optype: str):
self.domain = domain
self.optype = optype
self.name = _create_op_key(domain, optype)
@abstractmethod
def process_node(self, node: fbs.Node, value_name_to_typeinfo: dict):
pass
def is_typed_registration_needed(
self, type_in_registration: str, globally_allowed_types: typing.Optional[typing.Set[str]]
):
"""
Given the string from a kernel registration, determine if the registration is required or not.
:param type_in_registration: Type string from kernel registration
:param globally_allowed_types: Optional set of globally allowed types. If provided, these types take precedence
in determining the required types.
:return: True is required. False if not.
"""
# Not all operators have typed registrations, so this is optionally implemented by derived classes
raise RuntimeError(f"Did not expect processor for {self.name} to have typed registrations.")
def get_cpp_entry(self):
"""
Get the C++ code that specifies this operator's required types.
:return: List with any applicable C++ code for this operator's required types. One line per entry.
"""
# Not applicable for some ops, so return no lines by default.
return []
@abstractmethod
def to_config_entry(self):
"""
Generate a configuration file entry in JSON format with the required types for the operator.
:return: JSON string with required type information.
"""
@abstractmethod
def from_config_entry(self, entry: str):
"""
Re-create the types required from a configuration file entry created with to_config_entry.
NOTE: Any existing type information should be cleared prior to re-creating from a config file entry.
:param entry: Configuration file entry
"""
class DefaultTypeUsageProcessor(TypeUsageProcessor):
"""
Operator processor which tracks the types used for selected input/s and/or output/s.
"""
def __init__(
self,
domain: str,
optype: str,
inputs: [int] = [0], # noqa: B006
outputs: [int] = [], # noqa: B006
required_input_types: typing.Dict[int, typing.Set[str]] = {}, # noqa: B006
required_output_types: typing.Dict[int, typing.Set[str]] = {}, # noqa: B006
):
"""
Create DefaultTypeUsageProcessor. Types for one or more inputs and/or outputs can be tracked by the processor.
The default is to track the types required for input 0, as this is the most common use case in ONNX.
Required input and output types may be specified. These are only applicable to is_typed_registration_needed().
If a registration type matches a required type, the typed registration is needed.
There is a separate mechanism for specifying required types from C++ for kernels with untyped registration.
:param domain: Operator domain.
:param optype: Operator name.
:param inputs: Inputs to track. Zero based index. May be empty.
:param outputs: Outputs to track. Zero based index. May be empty.
:param required_input_types: Required input types. May be empty.
:param required_output_types: Required output types. May be empty.
"""
super().__init__(domain, optype)
self._input_types = {}
self._output_types = {}
for i in inputs:
self._input_types[i] = set()
for o in outputs:
self._output_types[o] = set()
if not inputs and not outputs:
raise ValueError("At least one input or output must be tracked")
self._required_input_types = required_input_types
self._required_output_types = required_output_types
def _is_type_enabled(self, reg_type, index, required_types, allowed_type_set):
cpp_type = _reg_type_to_cpp_type(reg_type)
return cpp_type in required_types.get(index, set()) or cpp_type in allowed_type_set
def is_input_type_enabled(self, reg_type, index, allowed_type_set=None):
"""Whether input type is enabled based on required and allowed types."""
if allowed_type_set is None:
allowed_type_set = self._input_types[index]
return self._is_type_enabled(reg_type, index, self._required_input_types, allowed_type_set)
def is_output_type_enabled(self, reg_type, index, allowed_type_set=None):
"""Whether output type is enabled based on required and allowed types."""
if allowed_type_set is None:
allowed_type_set = self._output_types[index]
return self._is_type_enabled(reg_type, index, self._required_output_types, allowed_type_set)
def process_node(self, node: fbs.Node, value_name_to_typeinfo: dict):
for i in self._input_types:
if i >= node.InputsLength():
# Some operators have fewer inputs in earlier versions where data that was as an attribute
# become an input in later versions to allow it to be dynamically provided. Allow for that.
# e.g. Slice-1 had attributes for the indices, and Slice-10 moved those to be inputs
# raise RuntimeError('Node has {} outputs. Tracker for {} incorrectly configured as it requires {}.'
# .format(node.OutputsLength(), self.name, o))
pass
else:
type_str = value_name_to_typestr(node.Inputs(i), value_name_to_typeinfo)
self._input_types[i].add(type_str)
for o in self._output_types:
# Don't know of any ops where the number of outputs changed across versions, so require a valid length
if o >= node.OutputsLength():
raise RuntimeError(
f"Node has {node.OutputsLength()} outputs. Tracker for {self.name} incorrectly configured as it requires {o}."
)
type_str = value_name_to_typestr(node.Outputs(o), value_name_to_typeinfo)
self._output_types[o].add(type_str)
def is_typed_registration_needed(
self, type_in_registration: str, globally_allowed_types: typing.Optional[typing.Set[str]]
):
if 0 not in self._input_types:
# currently all standard typed registrations are for input 0.
# custom registrations can be handled by operator specific processors (e.g. OneHotProcessor below).
raise RuntimeError(f"Expected typed registration to use type from input 0. Node:{self.name}")
return self.is_input_type_enabled(type_in_registration, 0, globally_allowed_types)
def get_cpp_entry(self):
entries = []
domain = _ort_constant_for_domain(self.domain)
for i in sorted(self._input_types.keys()):
if self._input_types[i]:
entries.append(
"ORT_SPECIFY_OP_KERNEL_ARG_ALLOWED_TYPES({}, {}, Input, {}, {});".format(
domain, self.optype, i, ", ".join(sorted(self._input_types[i]))
)
)
for o in sorted(self._output_types.keys()):
if self._output_types[o]:
entries.append(
"ORT_SPECIFY_OP_KERNEL_ARG_ALLOWED_TYPES({}, {}, Output, {}, {});".format(
domain, self.optype, o, ", ".join(sorted(self._output_types[o]))
)
)
return entries
def to_config_entry(self):
# convert the sets of types to lists so they can easily written out using the json model
aggregate_info = {"inputs": {}, "outputs": {}}
# filter out empty entries and sort the types
for i in sorted(self._input_types.keys()):
if self._input_types[i]:
aggregate_info["inputs"][i] = sorted(self._input_types[i])
for o in sorted(self._output_types.keys()):
if self._output_types[o]:
aggregate_info["outputs"][o] = sorted(self._output_types[o])
# remove any empty keys
if not aggregate_info["inputs"]:
aggregate_info.pop("inputs")
if not aggregate_info["outputs"]:
aggregate_info.pop("outputs")
entry = json.dumps(aggregate_info) if aggregate_info else None
return entry
def from_config_entry(self, entry: str):
self._input_types.clear()
self._output_types.clear()
aggregate_info = json.loads(entry)
if "inputs" in aggregate_info:
for i_str, values in aggregate_info["inputs"].items():
self._input_types[int(i_str)] = set(values)
if "outputs" in aggregate_info:
for o_str, values in aggregate_info["outputs"].items():
self._output_types[int(o_str)] = set(values)
class Input1TypedRegistrationProcessor(DefaultTypeUsageProcessor):
"""
Processor for operators where the second input type is used in a typed kernel registration.
"""
def __init__(self, domain: str, optype: str):
# init with tracking of input 1 only.
super().__init__(domain, optype, inputs=[1], outputs=[])
def is_typed_registration_needed(
self, type_in_registration: str, globally_allowed_types: typing.Optional[typing.Set[str]]
):
return self.is_input_type_enabled(type_in_registration, 1, globally_allowed_types)
class Output0TypedRegistrationProcessor(DefaultTypeUsageProcessor):
"""
Processor for operators where the first output type is used in a typed kernel registration.
"""
def __init__(self, domain: str, optype: str):
# init with tracking of output 0 only.
super().__init__(domain, optype, inputs=[], outputs=[0])
def is_typed_registration_needed(
self, type_in_registration: str, globally_allowed_types: typing.Optional[typing.Set[str]]
):
return self.is_output_type_enabled(type_in_registration, 0, globally_allowed_types)
class OneHotProcessor(TypeUsageProcessor):
"""
Processor for the OneHot operator, which requires custom logic as the type registration key is a concatenation of
the three types involved instead of a single type name.
"""
def __init__(self):
super().__init__("ai.onnx", "OneHot")
self._triples = set()
def process_node(self, node: fbs.Node, value_name_to_typeinfo: dict):
type0 = value_name_to_typestr(node.Inputs(0), value_name_to_typeinfo)
type1 = value_name_to_typestr(node.Inputs(1), value_name_to_typeinfo)
type2 = value_name_to_typestr(node.Inputs(2), value_name_to_typeinfo)
# types in kernel registration are ordered this way: input (T1), output (T3), depth (T2)
key = (type0, type2, type1)
self._triples.add(key)
def is_typed_registration_needed(
self, type_in_registration: str, globally_allowed_types: typing.Optional[typing.Set[str]]
):
# the OneHot registration involves a concatenation of the 3 types involved
reg_types = tuple([_reg_type_to_cpp_type(reg_type) for reg_type in _split_reg_types(type_in_registration)])
if globally_allowed_types is not None:
return all(reg_type in globally_allowed_types for reg_type in reg_types)
else:
return reg_types in self._triples
def to_config_entry(self):
if not self._triples:
return None
aggregate_info = {"custom": sorted(self._triples)}
entry = json.dumps(aggregate_info)
return entry
def from_config_entry(self, entry: str):
self._triples.clear()
aggregate_info = json.loads(entry)
if "custom" in aggregate_info:
self._triples = {tuple(triple) for triple in aggregate_info["custom"]}
def _create_operator_type_usage_processors():
"""
Create a set of processors that determine the required types for all enabled operators.
:return: Dictionary of operator key to processor. Key is 'domain:operator (e.g. ai.onnx:Cast)'.
"""
operator_processors = {}
def add(processor):
if processor.name in operator_processors:
raise RuntimeError("Duplicate processor for " + processor.name)
operator_processors[processor.name] = processor
# Starting with ops from:
# - Priority 1P models
# - Mobilenet + SSD Mobilenet + MobileBert
# - some known large kernels
#
# Ops we are ignoring currently so as not to produce meaningless/unused output:
# - Implementation is type agnostic:
# ai.onnx: If, Loop, Reshape, Scan, Shape, Squeeze, Tile, Unsqueeze
# com.microsoft: DynamicQuantizeMatMul, MatMulIntegerToFloat
# - Only one type supported in the ORT implementation:
# ai.onnx: NonMaxSuppression
# com.microsoft: FusedConv, FusedGemm, FusedMatMul
# - Implementation does not have any significant type specific code:
# ai.onnx: Concat, Flatten, Not, Reshape, Shape, Squeeze, Unsqueeze
#
default_processor_onnx_ops = [
"Abs",
"ArgMax",
"ArgMin",
"AveragePool",
"BatchNormalization",
"BitShift",
"Ceil",
"Clip",
"Conv",
"CumSum",
"Exp",
"Expand",
"Floor",
"Gemm",
"IsNaN",
"Log",
"LogSoftmax",
"LpNormalization",
"MatMul",
"Max",
"MaxPool",
"Mean",
"Min",
"NonZero",
"Pad",
"QLinearConv",
"QLinearMatMul",
"Range",
"Reciprocal",
"ReduceL1",
"ReduceL2",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceMax",
"ReduceMean",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
"Relu",
"Resize",
"ReverseSequence",
"RoiAlign",
"Round",
"Scatter",
"ScatterElements",
"ScatterND",
"Shrink",
"Sigmoid",
"Sign",
"Sin",
"Softmax",
"Split",
"SplitToSequence",
"Sqrt",
"Sum",
"Tanh",
"TopK",
"Transpose",
"Unique",
]
# ops that are used to manipulate shapes or indices so require int32_t and int64_t to be available
default_processor_onnx_ops_requiring_ints_for_input_0 = [
"Add",
"Concat",
"Div",
"Equal",
"Greater",
"Less",
"Mul",
"Neg", # used in tflite TransposeConv conversion
"Sub",
]
# NOTE: QLinearConv has ONNX and internal implementations
internal_ops = ["QLinearAdd", "QLinearMul", "QLinearConv"]
# TODO - review and add ML ops as needed
# ML Op notes.
# CastMap: Switch on value type of input map type, and output type
# DictVectorizer: Templatized on key+value of input so need to handle like OneHot with custom processor
# LabelEncoder: Implementation switches on input and output types (only supports string and int64 in T1 and T2)
# LinearClassifier: Internal switch on input type and also switch on output type
# SVMClassifier: ditto
# TreeEnsembleClassifier: Templatized on input type and also switch on output type
# ZipMap: Switch on output type (derived from attributes)
default_processor_onnxml_ops = []
[add(DefaultTypeUsageProcessor("ai.onnx", op)) for op in default_processor_onnx_ops]
[
add(DefaultTypeUsageProcessor("ai.onnx", op, required_input_types={0: {"int32_t", "int64_t"}}))
for op in default_processor_onnx_ops_requiring_ints_for_input_0
]
[add(DefaultTypeUsageProcessor("ai.onnx.ml", op)) for op in default_processor_onnxml_ops]
[add(DefaultTypeUsageProcessor("com.microsoft", op)) for op in internal_ops]
#
# Operators that require custom handling
#
# Cast switches on types of input 0 and output 0
add(DefaultTypeUsageProcessor("ai.onnx", "Cast", inputs=[0], outputs=[0]))
# Operators that switch on the type of input 0 and 1
add(DefaultTypeUsageProcessor("ai.onnx", "Gather", inputs=[0, 1]))
add(DefaultTypeUsageProcessor("ai.onnx", "GatherElements", inputs=[0, 1]))
add(DefaultTypeUsageProcessor("ai.onnx", "Pow", inputs=[0, 1]))
add(DefaultTypeUsageProcessor("ai.onnx", "Slice", inputs=[0, 1]))
# Operators that switch on output type
add(DefaultTypeUsageProcessor("ai.onnx", "ConstantOfShape", inputs=[], outputs=[0]))
# Random generator ops produce new data so we track the output type
onnx_random_ops = ["RandomNormal", "RandomNormalLike", "RandomUniform", "RandomUniformLike", "Multinomial"]
[add(DefaultTypeUsageProcessor("ai.onnx", op, inputs=[], outputs=[0])) for op in onnx_random_ops]
# Where always has a boolean first input so track the second input type for typed registration
add(Input1TypedRegistrationProcessor("ai.onnx", "Where"))
# we only support 'float' as input for [Dynamic]QuantizeLinear so just track the output type
# as that's what is used in the typed registration
add(Output0TypedRegistrationProcessor("ai.onnx", "QuantizeLinear"))
add(Output0TypedRegistrationProcessor("ai.onnx", "DynamicQuantizeLinear"))
# make sure all the dequantize types are enabled. we use int32_t for parts of GEMM and Conv so just
# enabling int8 and uint8 is not enough.
# TODO: Only apply required types to the global type list and ignore if it's model based per-op type reduction
add(
DefaultTypeUsageProcessor(
"ai.onnx", "DequantizeLinear", inputs=[0], required_input_types={0: {"int8_t", "uint8_t", "int32_t"}}
)
)
# OneHot concatenates type strings into a triple in the typed registration
# e.g. float_int64_t_int64_t
add(OneHotProcessor())
return operator_processors
class OpTypeImplFilterInterface(ABC):
"""
Class that filters operator implementations based on type.
"""
@abstractmethod
def is_typed_registration_needed(self, domain: str, optype: str, type_registration_str: str):
"""
Given the string from a kernel registration, determine if the registration is required or not.
:param domain: Operator domain.
:param optype: Operator type.
:param type_registration_str: Type string from kernel registration
:return: True is required. False if not.
"""
@abstractmethod
def get_cpp_entries(self):
"""
Get the C++ code that specifies the operator types to enable.
:return: List of strings. One line of C++ code per entry.
"""
class OperatorTypeUsageManager:
"""
Class to manage the operator type usage processors.
TODO: Currently the type tracking is not specific to a version of the operator.
It's unclear how/where version specific logic could/should be added, and it would add significant complexity
to track types on a per-version basis. Not clear there's enough benefit from doing so either.
"""
def __init__(self):
self._all_operator_processors = _create_operator_type_usage_processors() # all possible processors
self._operator_processors = {} # processors we have actually used so we can limit output to be meaningful
def _get_op_processor(self, key):
"Add the processor to _operator_processors as it is about to be used."
processor = None
if key in self._all_operator_processors:
if key not in self._operator_processors:
self._operator_processors[key] = self._all_operator_processors[key]
processor = self._operator_processors[key]
return processor
def process_node(self, node: fbs.Node, value_name_to_typeinfo: dict):
"""
Process a Node and record info on the types used.
:param node: Node from ORT format model
:param value_name_to_typeinfo: Map of value names to TypeInfo instances
"""
optype = node.OpType().decode()
domain = node.Domain().decode() or "ai.onnx" # empty domain defaults to ai.onnx
key = _create_op_key(domain, optype)
op_processor = self._get_op_processor(key)
if op_processor:
op_processor.process_node(node, value_name_to_typeinfo)
def get_config_entry(self, domain: str, optype: str):
"""
Get the config entry specifying the types for this operator.
:param domain: Operator domain.
:param optype: Operator type.
:return: JSON string with type info if available, else None
"""
key = _create_op_key(domain, optype)
config_str = None
if key in self._operator_processors:
config_str = self._operator_processors[key].to_config_entry()
return config_str
def restore_from_config_entry(self, domain: str, optype: str, config_entry: str):
"""
Restore the per-operator type information from a configuration file entry.
:param domain: Operator domain.
:param optype: Operator type.
:param config_entry: JSON string with type info as created by get_config_entry
"""
key = _create_op_key(domain, optype)
op_processor = self._get_op_processor(key)
if op_processor:
op_processor.from_config_entry(config_entry)
def debug_dump(self):
print("C++ code that will be emitted:")
[print(cpp_line) for cpp_line in self.get_cpp_entries()]
print("Config file type information that will be returned by get_config_entry:")
for key in sorted(self._operator_processors.keys()):
entry = self._operator_processors[key].to_config_entry()
if entry:
print(f"{key} -> {entry}")
# roundtrip test to validate that we can initialize the processor from the entry and get the
# same values back
self._operator_processors[key].from_config_entry(entry)
assert entry == self._operator_processors[key].to_config_entry()
class _OpTypeImplFilter(OpTypeImplFilterInterface):
def __init__(self, manager):
self._manager = manager
def is_typed_registration_needed(self, domain: str, optype: str, type_registration_str: str):
needed = True # we keep the registration unless the per-operator processor says not to
key = _create_op_key(domain, optype)
if key in self._manager._operator_processors:
needed = self._manager._operator_processors[key].is_typed_registration_needed(
type_in_registration=type_registration_str, globally_allowed_types=None
)
return needed
def get_cpp_entries(self):
entries = []
for key in sorted(self._manager._operator_processors.keys()):
entries.extend(self._manager._operator_processors[key].get_cpp_entry())
return entries
def make_op_type_impl_filter(self):
"""
Creates an OpTypeImplFilterInterface instance from this manager.
Filtering uses the manager's operator type usage processor state.
"""
return OperatorTypeUsageManager._OpTypeImplFilter(self)
class GloballyAllowedTypesOpTypeImplFilter(OpTypeImplFilterInterface):
"""
Operator implementation filter which uses globally allowed types.
"""
_valid_allowed_types = set(FbsTypeInfo.tensordatatype_to_string.values()) # noqa: RUF012
def __init__(self, globally_allowed_types: typing.Set[str]):
self._operator_processors = _create_operator_type_usage_processors()
if not globally_allowed_types.issubset(self._valid_allowed_types):
raise ValueError(
f"Globally allowed types must all be valid. Invalid types: {sorted(globally_allowed_types - self._valid_allowed_types)}"
)
self._globally_allowed_types = globally_allowed_types
def is_typed_registration_needed(self, domain: str, optype: str, type_registration_str: str):
key = _create_op_key(domain, optype)
if key in self._operator_processors:
needed = self._operator_processors[key].is_typed_registration_needed(
type_in_registration=type_registration_str, globally_allowed_types=self._globally_allowed_types
)
else:
needed = _reg_type_to_cpp_type(type_registration_str) in self._globally_allowed_types
return needed
def get_cpp_entries(self):
return [
"ORT_SPECIFY_OP_KERNEL_GLOBAL_ALLOWED_TYPES({});".format(", ".join(sorted(self._globally_allowed_types)))
]
def global_type_list(self):
return self._globally_allowed_types

View File

@ -0,0 +1,7 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
class ArgType(object):
INPUT = 0
OUTPUT = 1

View File

@ -0,0 +1,67 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ArgTypeAndIndex(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArgTypeAndIndex()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsArgTypeAndIndex(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ArgTypeAndIndexBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# ArgTypeAndIndex
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ArgTypeAndIndex
def ArgType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# ArgTypeAndIndex
def Index(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
def ArgTypeAndIndexStart(builder):
builder.StartObject(2)
def Start(builder):
ArgTypeAndIndexStart(builder)
def ArgTypeAndIndexAddArgType(builder, argType):
builder.PrependInt8Slot(0, argType, 0)
def AddArgType(builder, argType):
ArgTypeAndIndexAddArgType(builder, argType)
def ArgTypeAndIndexAddIndex(builder, index):
builder.PrependUint32Slot(1, index, 0)
def AddIndex(builder, index):
ArgTypeAndIndexAddIndex(builder, index)
def ArgTypeAndIndexEnd(builder):
return builder.EndObject()
def End(builder):
return ArgTypeAndIndexEnd(builder)

View File

@ -0,0 +1,337 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Attribute(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Attribute()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsAttribute(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def AttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# Attribute
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Attribute
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Attribute
def DocString(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Attribute
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Attribute
def F(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# Attribute
def I(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Attribute
def S(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Attribute
def T(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Attribute
def G(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.Graph import Graph
obj = Graph()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Attribute
def Floats(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Attribute
def FloatsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# Attribute
def FloatsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Attribute
def FloatsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
return o == 0
# Attribute
def Ints(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Attribute
def IntsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# Attribute
def IntsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Attribute
def IntsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
return o == 0
# Attribute
def Strings(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Attribute
def StringsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Attribute
def StringsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
return o == 0
# Attribute
def Tensors(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Attribute
def TensorsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Attribute
def TensorsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
return o == 0
# Attribute
def Graphs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Graph import Graph
obj = Graph()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Attribute
def GraphsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Attribute
def GraphsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
return o == 0
def AttributeStart(builder):
builder.StartObject(13)
def Start(builder):
AttributeStart(builder)
def AttributeAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
AttributeAddName(builder, name)
def AttributeAddDocString(builder, docString):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(docString), 0)
def AddDocString(builder, docString):
AttributeAddDocString(builder, docString)
def AttributeAddType(builder, type):
builder.PrependInt32Slot(2, type, 0)
def AddType(builder, type):
AttributeAddType(builder, type)
def AttributeAddF(builder, f):
builder.PrependFloat32Slot(3, f, 0.0)
def AddF(builder, f):
AttributeAddF(builder, f)
def AttributeAddI(builder, i):
builder.PrependInt64Slot(4, i, 0)
def AddI(builder, i):
AttributeAddI(builder, i)
def AttributeAddS(builder, s):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(s), 0)
def AddS(builder, s):
AttributeAddS(builder, s)
def AttributeAddT(builder, t):
builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(t), 0)
def AddT(builder, t):
AttributeAddT(builder, t)
def AttributeAddG(builder, g):
builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(g), 0)
def AddG(builder, g):
AttributeAddG(builder, g)
def AttributeAddFloats(builder, floats):
builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(floats), 0)
def AddFloats(builder, floats):
AttributeAddFloats(builder, floats)
def AttributeStartFloatsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartFloatsVector(builder, numElems: int) -> int:
return AttributeStartFloatsVector(builder, numElems)
def AttributeAddInts(builder, ints):
builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(ints), 0)
def AddInts(builder, ints):
AttributeAddInts(builder, ints)
def AttributeStartIntsVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def StartIntsVector(builder, numElems: int) -> int:
return AttributeStartIntsVector(builder, numElems)
def AttributeAddStrings(builder, strings):
builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(strings), 0)
def AddStrings(builder, strings):
AttributeAddStrings(builder, strings)
def AttributeStartStringsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartStringsVector(builder, numElems: int) -> int:
return AttributeStartStringsVector(builder, numElems)
def AttributeAddTensors(builder, tensors):
builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
def AddTensors(builder, tensors):
AttributeAddTensors(builder, tensors)
def AttributeStartTensorsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartTensorsVector(builder, numElems: int) -> int:
return AttributeStartTensorsVector(builder, numElems)
def AttributeAddGraphs(builder, graphs):
builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(graphs), 0)
def AddGraphs(builder, graphs):
AttributeAddGraphs(builder, graphs)
def AttributeStartGraphsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartGraphsVector(builder, numElems: int) -> int:
return AttributeStartGraphsVector(builder, numElems)
def AttributeEnd(builder):
return builder.EndObject()
def End(builder):
return AttributeEnd(builder)

View File

@ -0,0 +1,18 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
class AttributeType(object):
UNDEFINED = 0
FLOAT = 1
INT = 2
STRING = 3
TENSOR = 4
GRAPH = 5
FLOATS = 6
INTS = 7
STRINGS = 8
TENSORS = 9
GRAPHS = 10
SPARSE_TENSOR = 11
SPARSE_TENSORS = 12

View File

@ -0,0 +1,125 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Checkpoint(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Checkpoint()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsCheckpoint(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def CheckpointBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# Checkpoint
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Checkpoint
def Version(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Checkpoint
def ModuleState(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.ModuleState import ModuleState
obj = ModuleState()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Checkpoint
def OptimizerGroups(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.OptimizerGroup import OptimizerGroup
obj = OptimizerGroup()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Checkpoint
def OptimizerGroupsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Checkpoint
def OptimizerGroupsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
# Checkpoint
def PropertyBag(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.PropertyBag import PropertyBag
obj = PropertyBag()
obj.Init(self._tab.Bytes, x)
return obj
return None
def CheckpointStart(builder):
builder.StartObject(4)
def Start(builder):
CheckpointStart(builder)
def CheckpointAddVersion(builder, version):
builder.PrependInt32Slot(0, version, 0)
def AddVersion(builder, version):
CheckpointAddVersion(builder, version)
def CheckpointAddModuleState(builder, moduleState):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(moduleState), 0)
def AddModuleState(builder, moduleState):
CheckpointAddModuleState(builder, moduleState)
def CheckpointAddOptimizerGroups(builder, optimizerGroups):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(optimizerGroups), 0)
def AddOptimizerGroups(builder, optimizerGroups):
CheckpointAddOptimizerGroups(builder, optimizerGroups)
def CheckpointStartOptimizerGroupsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartOptimizerGroupsVector(builder, numElems: int) -> int:
return CheckpointStartOptimizerGroupsVector(builder, numElems)
def CheckpointAddPropertyBag(builder, propertyBag):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(propertyBag), 0)
def AddPropertyBag(builder, propertyBag):
CheckpointAddPropertyBag(builder, propertyBag)
def CheckpointEnd(builder):
return builder.EndObject()
def End(builder):
return CheckpointEnd(builder)

View File

@ -0,0 +1,120 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# deprecated: no longer using kernel def hashes
class DeprecatedKernelCreateInfos(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DeprecatedKernelCreateInfos()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsDeprecatedKernelCreateInfos(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def DeprecatedKernelCreateInfosBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# DeprecatedKernelCreateInfos
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DeprecatedKernelCreateInfos
def NodeIndices(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# DeprecatedKernelCreateInfos
def NodeIndicesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
return 0
# DeprecatedKernelCreateInfos
def NodeIndicesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# DeprecatedKernelCreateInfos
def NodeIndicesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# DeprecatedKernelCreateInfos
def KernelDefHashes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# DeprecatedKernelCreateInfos
def KernelDefHashesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# DeprecatedKernelCreateInfos
def KernelDefHashesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# DeprecatedKernelCreateInfos
def KernelDefHashesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
def DeprecatedKernelCreateInfosStart(builder):
builder.StartObject(2)
def Start(builder):
DeprecatedKernelCreateInfosStart(builder)
def DeprecatedKernelCreateInfosAddNodeIndices(builder, nodeIndices):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(nodeIndices), 0)
def AddNodeIndices(builder, nodeIndices):
DeprecatedKernelCreateInfosAddNodeIndices(builder, nodeIndices)
def DeprecatedKernelCreateInfosStartNodeIndicesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartNodeIndicesVector(builder, numElems: int) -> int:
return DeprecatedKernelCreateInfosStartNodeIndicesVector(builder, numElems)
def DeprecatedKernelCreateInfosAddKernelDefHashes(builder, kernelDefHashes):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernelDefHashes), 0)
def AddKernelDefHashes(builder, kernelDefHashes):
DeprecatedKernelCreateInfosAddKernelDefHashes(builder, kernelDefHashes)
def DeprecatedKernelCreateInfosStartKernelDefHashesVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def StartKernelDefHashesVector(builder, numElems: int) -> int:
return DeprecatedKernelCreateInfosStartKernelDefHashesVector(builder, numElems)
def DeprecatedKernelCreateInfosEnd(builder):
return builder.EndObject()
def End(builder):
return DeprecatedKernelCreateInfosEnd(builder)

View File

@ -0,0 +1,68 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# deprecated: no longer using kernel def hashes
class DeprecatedNodeIndexAndKernelDefHash(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DeprecatedNodeIndexAndKernelDefHash()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsDeprecatedNodeIndexAndKernelDefHash(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def DeprecatedNodeIndexAndKernelDefHashBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# DeprecatedNodeIndexAndKernelDefHash
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DeprecatedNodeIndexAndKernelDefHash
def NodeIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# DeprecatedNodeIndexAndKernelDefHash
def KernelDefHash(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
def DeprecatedNodeIndexAndKernelDefHashStart(builder):
builder.StartObject(2)
def Start(builder):
DeprecatedNodeIndexAndKernelDefHashStart(builder)
def DeprecatedNodeIndexAndKernelDefHashAddNodeIndex(builder, nodeIndex):
builder.PrependUint32Slot(0, nodeIndex, 0)
def AddNodeIndex(builder, nodeIndex):
DeprecatedNodeIndexAndKernelDefHashAddNodeIndex(builder, nodeIndex)
def DeprecatedNodeIndexAndKernelDefHashAddKernelDefHash(builder, kernelDefHash):
builder.PrependUint64Slot(1, kernelDefHash, 0)
def AddKernelDefHash(builder, kernelDefHash):
DeprecatedNodeIndexAndKernelDefHashAddKernelDefHash(builder, kernelDefHash)
def DeprecatedNodeIndexAndKernelDefHashEnd(builder):
return builder.EndObject()
def End(builder):
return DeprecatedNodeIndexAndKernelDefHashEnd(builder)

View File

@ -0,0 +1,96 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# deprecated: no longer using kernel def hashes
class DeprecatedSessionState(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DeprecatedSessionState()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsDeprecatedSessionState(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def DeprecatedSessionStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# DeprecatedSessionState
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DeprecatedSessionState
def Kernels(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.DeprecatedKernelCreateInfos import DeprecatedKernelCreateInfos
obj = DeprecatedKernelCreateInfos()
obj.Init(self._tab.Bytes, x)
return obj
return None
# DeprecatedSessionState
def SubGraphSessionStates(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.DeprecatedSubGraphSessionState import DeprecatedSubGraphSessionState
obj = DeprecatedSubGraphSessionState()
obj.Init(self._tab.Bytes, x)
return obj
return None
# DeprecatedSessionState
def SubGraphSessionStatesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# DeprecatedSessionState
def SubGraphSessionStatesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
def DeprecatedSessionStateStart(builder):
builder.StartObject(2)
def Start(builder):
DeprecatedSessionStateStart(builder)
def DeprecatedSessionStateAddKernels(builder, kernels):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(kernels), 0)
def AddKernels(builder, kernels):
DeprecatedSessionStateAddKernels(builder, kernels)
def DeprecatedSessionStateAddSubGraphSessionStates(builder, subGraphSessionStates):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(subGraphSessionStates), 0)
def AddSubGraphSessionStates(builder, subGraphSessionStates):
DeprecatedSessionStateAddSubGraphSessionStates(builder, subGraphSessionStates)
def DeprecatedSessionStateStartSubGraphSessionStatesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartSubGraphSessionStatesVector(builder, numElems: int) -> int:
return DeprecatedSessionStateStartSubGraphSessionStatesVector(builder, numElems)
def DeprecatedSessionStateEnd(builder):
return builder.EndObject()
def End(builder):
return DeprecatedSessionStateEnd(builder)

View File

@ -0,0 +1,72 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# deprecated: no longer using kernel def hashes
class DeprecatedSubGraphSessionState(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DeprecatedSubGraphSessionState()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsDeprecatedSubGraphSessionState(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def DeprecatedSubGraphSessionStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# DeprecatedSubGraphSessionState
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DeprecatedSubGraphSessionState
def GraphId(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# DeprecatedSubGraphSessionState
def SessionState(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.DeprecatedSessionState import DeprecatedSessionState
obj = DeprecatedSessionState()
obj.Init(self._tab.Bytes, x)
return obj
return None
def DeprecatedSubGraphSessionStateStart(builder):
builder.StartObject(2)
def Start(builder):
DeprecatedSubGraphSessionStateStart(builder)
def DeprecatedSubGraphSessionStateAddGraphId(builder, graphId):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(graphId), 0)
def AddGraphId(builder, graphId):
DeprecatedSubGraphSessionStateAddGraphId(builder, graphId)
def DeprecatedSubGraphSessionStateAddSessionState(builder, sessionState):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(sessionState), 0)
def AddSessionState(builder, sessionState):
DeprecatedSubGraphSessionStateAddSessionState(builder, sessionState)
def DeprecatedSubGraphSessionStateEnd(builder):
return builder.EndObject()
def End(builder):
return DeprecatedSubGraphSessionStateEnd(builder)

View File

@ -0,0 +1,71 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Dimension(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Dimension()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsDimension(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def DimensionBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# Dimension
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Dimension
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.DimensionValue import DimensionValue
obj = DimensionValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Dimension
def Denotation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def DimensionStart(builder):
builder.StartObject(2)
def Start(builder):
DimensionStart(builder)
def DimensionAddValue(builder, value):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def AddValue(builder, value):
DimensionAddValue(builder, value)
def DimensionAddDenotation(builder, denotation):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(denotation), 0)
def AddDenotation(builder, denotation):
DimensionAddDenotation(builder, denotation)
def DimensionEnd(builder):
return builder.EndObject()
def End(builder):
return DimensionEnd(builder)

View File

@ -0,0 +1,80 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class DimensionValue(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DimensionValue()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsDimensionValue(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def DimensionValueBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# DimensionValue
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DimensionValue
def DimType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# DimensionValue
def DimValue(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# DimensionValue
def DimParam(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def DimensionValueStart(builder):
builder.StartObject(3)
def Start(builder):
DimensionValueStart(builder)
def DimensionValueAddDimType(builder, dimType):
builder.PrependInt8Slot(0, dimType, 0)
def AddDimType(builder, dimType):
DimensionValueAddDimType(builder, dimType)
def DimensionValueAddDimValue(builder, dimValue):
builder.PrependInt64Slot(1, dimValue, 0)
def AddDimValue(builder, dimValue):
DimensionValueAddDimValue(builder, dimValue)
def DimensionValueAddDimParam(builder, dimParam):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dimParam), 0)
def AddDimParam(builder, dimParam):
DimensionValueAddDimParam(builder, dimParam)
def DimensionValueEnd(builder):
return builder.EndObject()
def End(builder):
return DimensionValueEnd(builder)

View File

@ -0,0 +1,8 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
class DimensionValueType(object):
UNKNOWN = 0
VALUE = 1
PARAM = 2

View File

@ -0,0 +1,32 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class EdgeEnd(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 12
# EdgeEnd
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# EdgeEnd
def NodeIndex(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# EdgeEnd
def SrcArgIndex(self): return self._tab.Get(flatbuffers.number_types.Int32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
# EdgeEnd
def DstArgIndex(self): return self._tab.Get(flatbuffers.number_types.Int32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
def CreateEdgeEnd(builder, nodeIndex, srcArgIndex, dstArgIndex):
builder.Prep(4, 12)
builder.PrependInt32(dstArgIndex)
builder.PrependInt32(srcArgIndex)
builder.PrependUint32(nodeIndex)
return builder.Offset()

View File

@ -0,0 +1,67 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class FloatProperty(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = FloatProperty()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsFloatProperty(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def FloatPropertyBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# FloatProperty
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# FloatProperty
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# FloatProperty
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
def FloatPropertyStart(builder):
builder.StartObject(2)
def Start(builder):
FloatPropertyStart(builder)
def FloatPropertyAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
FloatPropertyAddName(builder, name)
def FloatPropertyAddValue(builder, value):
builder.PrependFloat32Slot(1, value, 0.0)
def AddValue(builder, value):
FloatPropertyAddValue(builder, value)
def FloatPropertyEnd(builder):
return builder.EndObject()
def End(builder):
return FloatPropertyEnd(builder)

View File

@ -0,0 +1,320 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Graph(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Graph()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsGraph(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def GraphBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# Graph
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Graph
def Initializers(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Graph
def InitializersLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Graph
def InitializersIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# Graph
def NodeArgs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.ValueInfo import ValueInfo
obj = ValueInfo()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Graph
def NodeArgsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Graph
def NodeArgsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# Graph
def Nodes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Node import Node
obj = Node()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Graph
def NodesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Graph
def NodesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
# Graph
def MaxNodeIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Graph
def NodeEdges(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.NodeEdge import NodeEdge
obj = NodeEdge()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Graph
def NodeEdgesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Graph
def NodeEdgesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
return o == 0
# Graph
def Inputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Graph
def InputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Graph
def InputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# Graph
def Outputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Graph
def OutputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Graph
def OutputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
return o == 0
# Graph
def SparseInitializers(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.SparseTensor import SparseTensor
obj = SparseTensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Graph
def SparseInitializersLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Graph
def SparseInitializersIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
return o == 0
# Graph
def RuntimeOptimizations(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.RuntimeOptimizations import RuntimeOptimizations
obj = RuntimeOptimizations()
obj.Init(self._tab.Bytes, x)
return obj
return None
def GraphStart(builder):
builder.StartObject(9)
def Start(builder):
GraphStart(builder)
def GraphAddInitializers(builder, initializers):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(initializers), 0)
def AddInitializers(builder, initializers):
GraphAddInitializers(builder, initializers)
def GraphStartInitializersVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartInitializersVector(builder, numElems: int) -> int:
return GraphStartInitializersVector(builder, numElems)
def GraphAddNodeArgs(builder, nodeArgs):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(nodeArgs), 0)
def AddNodeArgs(builder, nodeArgs):
GraphAddNodeArgs(builder, nodeArgs)
def GraphStartNodeArgsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartNodeArgsVector(builder, numElems: int) -> int:
return GraphStartNodeArgsVector(builder, numElems)
def GraphAddNodes(builder, nodes):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(nodes), 0)
def AddNodes(builder, nodes):
GraphAddNodes(builder, nodes)
def GraphStartNodesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartNodesVector(builder, numElems: int) -> int:
return GraphStartNodesVector(builder, numElems)
def GraphAddMaxNodeIndex(builder, maxNodeIndex):
builder.PrependUint32Slot(3, maxNodeIndex, 0)
def AddMaxNodeIndex(builder, maxNodeIndex):
GraphAddMaxNodeIndex(builder, maxNodeIndex)
def GraphAddNodeEdges(builder, nodeEdges):
builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(nodeEdges), 0)
def AddNodeEdges(builder, nodeEdges):
GraphAddNodeEdges(builder, nodeEdges)
def GraphStartNodeEdgesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartNodeEdgesVector(builder, numElems: int) -> int:
return GraphStartNodeEdgesVector(builder, numElems)
def GraphAddInputs(builder, inputs):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
def AddInputs(builder, inputs):
GraphAddInputs(builder, inputs)
def GraphStartInputsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartInputsVector(builder, numElems: int) -> int:
return GraphStartInputsVector(builder, numElems)
def GraphAddOutputs(builder, outputs):
builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
def AddOutputs(builder, outputs):
GraphAddOutputs(builder, outputs)
def GraphStartOutputsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartOutputsVector(builder, numElems: int) -> int:
return GraphStartOutputsVector(builder, numElems)
def GraphAddSparseInitializers(builder, sparseInitializers):
builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(sparseInitializers), 0)
def AddSparseInitializers(builder, sparseInitializers):
GraphAddSparseInitializers(builder, sparseInitializers)
def GraphStartSparseInitializersVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartSparseInitializersVector(builder, numElems: int) -> int:
return GraphStartSparseInitializersVector(builder, numElems)
def GraphAddRuntimeOptimizations(builder, runtimeOptimizations):
builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(runtimeOptimizations), 0)
def AddRuntimeOptimizations(builder, runtimeOptimizations):
GraphAddRuntimeOptimizations(builder, runtimeOptimizations)
def GraphEnd(builder):
return builder.EndObject()
def End(builder):
return GraphEnd(builder)

View File

@ -0,0 +1,88 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class InferenceSession(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = InferenceSession()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsInferenceSession(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def InferenceSessionBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# InferenceSession
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# InferenceSession
def OrtVersion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# InferenceSession
def Model(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.Model import Model
obj = Model()
obj.Init(self._tab.Bytes, x)
return obj
return None
# InferenceSession
def KernelTypeStrResolver(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.KernelTypeStrResolver import KernelTypeStrResolver
obj = KernelTypeStrResolver()
obj.Init(self._tab.Bytes, x)
return obj
return None
def InferenceSessionStart(builder):
builder.StartObject(4)
def Start(builder):
InferenceSessionStart(builder)
def InferenceSessionAddOrtVersion(builder, ortVersion):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(ortVersion), 0)
def AddOrtVersion(builder, ortVersion):
InferenceSessionAddOrtVersion(builder, ortVersion)
def InferenceSessionAddModel(builder, model):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(model), 0)
def AddModel(builder, model):
InferenceSessionAddModel(builder, model)
def InferenceSessionAddKernelTypeStrResolver(builder, kernelTypeStrResolver):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(kernelTypeStrResolver), 0)
def AddKernelTypeStrResolver(builder, kernelTypeStrResolver):
InferenceSessionAddKernelTypeStrResolver(builder, kernelTypeStrResolver)
def InferenceSessionEnd(builder):
return builder.EndObject()
def End(builder):
return InferenceSessionEnd(builder)

View File

@ -0,0 +1,67 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class IntProperty(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = IntProperty()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsIntProperty(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def IntPropertyBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# IntProperty
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# IntProperty
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# IntProperty
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
def IntPropertyStart(builder):
builder.StartObject(2)
def Start(builder):
IntPropertyStart(builder)
def IntPropertyAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
IntPropertyAddName(builder, name)
def IntPropertyAddValue(builder, value):
builder.PrependInt64Slot(1, value, 0)
def AddValue(builder, value):
IntPropertyAddValue(builder, value)
def IntPropertyEnd(builder):
return builder.EndObject()
def End(builder):
return IntPropertyEnd(builder)

View File

@ -0,0 +1,91 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class KernelTypeStrArgsEntry(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = KernelTypeStrArgsEntry()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsKernelTypeStrArgsEntry(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def KernelTypeStrArgsEntryBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# KernelTypeStrArgsEntry
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# KernelTypeStrArgsEntry
def KernelTypeStr(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# KernelTypeStrArgsEntry
def Args(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.ArgTypeAndIndex import ArgTypeAndIndex
obj = ArgTypeAndIndex()
obj.Init(self._tab.Bytes, x)
return obj
return None
# KernelTypeStrArgsEntry
def ArgsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# KernelTypeStrArgsEntry
def ArgsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
def KernelTypeStrArgsEntryStart(builder):
builder.StartObject(2)
def Start(builder):
KernelTypeStrArgsEntryStart(builder)
def KernelTypeStrArgsEntryAddKernelTypeStr(builder, kernelTypeStr):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(kernelTypeStr), 0)
def AddKernelTypeStr(builder, kernelTypeStr):
KernelTypeStrArgsEntryAddKernelTypeStr(builder, kernelTypeStr)
def KernelTypeStrArgsEntryAddArgs(builder, args):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(args), 0)
def AddArgs(builder, args):
KernelTypeStrArgsEntryAddArgs(builder, args)
def KernelTypeStrArgsEntryStartArgsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartArgsVector(builder, numElems: int) -> int:
return KernelTypeStrArgsEntryStartArgsVector(builder, numElems)
def KernelTypeStrArgsEntryEnd(builder):
return builder.EndObject()
def End(builder):
return KernelTypeStrArgsEntryEnd(builder)

View File

@ -0,0 +1,78 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class KernelTypeStrResolver(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = KernelTypeStrResolver()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsKernelTypeStrResolver(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def KernelTypeStrResolverBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# KernelTypeStrResolver
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# KernelTypeStrResolver
def OpKernelTypeStrArgs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.OpIdKernelTypeStrArgsEntry import OpIdKernelTypeStrArgsEntry
obj = OpIdKernelTypeStrArgsEntry()
obj.Init(self._tab.Bytes, x)
return obj
return None
# KernelTypeStrResolver
def OpKernelTypeStrArgsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# KernelTypeStrResolver
def OpKernelTypeStrArgsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def KernelTypeStrResolverStart(builder):
builder.StartObject(1)
def Start(builder):
KernelTypeStrResolverStart(builder)
def KernelTypeStrResolverAddOpKernelTypeStrArgs(builder, opKernelTypeStrArgs):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(opKernelTypeStrArgs), 0)
def AddOpKernelTypeStrArgs(builder, opKernelTypeStrArgs):
KernelTypeStrResolverAddOpKernelTypeStrArgs(builder, opKernelTypeStrArgs)
def KernelTypeStrResolverStartOpKernelTypeStrArgsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartOpKernelTypeStrArgsVector(builder, numElems: int) -> int:
return KernelTypeStrResolverStartOpKernelTypeStrArgsVector(builder, numElems)
def KernelTypeStrResolverEnd(builder):
return builder.EndObject()
def End(builder):
return KernelTypeStrResolverEnd(builder)

View File

@ -0,0 +1,71 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class MapType(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = MapType()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsMapType(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def MapTypeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# MapType
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# MapType
def KeyType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# MapType
def ValueType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.TypeInfo import TypeInfo
obj = TypeInfo()
obj.Init(self._tab.Bytes, x)
return obj
return None
def MapTypeStart(builder):
builder.StartObject(2)
def Start(builder):
MapTypeStart(builder)
def MapTypeAddKeyType(builder, keyType):
builder.PrependInt32Slot(0, keyType, 0)
def AddKeyType(builder, keyType):
MapTypeAddKeyType(builder, keyType)
def MapTypeAddValueType(builder, valueType):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(valueType), 0)
def AddValueType(builder, valueType):
MapTypeAddValueType(builder, valueType)
def MapTypeEnd(builder):
return builder.EndObject()
def End(builder):
return MapTypeEnd(builder)

View File

@ -0,0 +1,223 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Model(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Model()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsModel(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# Model
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Model
def IrVersion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Model
def OpsetImport(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.OperatorSetId import OperatorSetId
obj = OperatorSetId()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Model
def OpsetImportLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Model
def OpsetImportIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# Model
def ProducerName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Model
def ProducerVersion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Model
def Domain(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Model
def ModelVersion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# Model
def DocString(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Model
def Graph(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.Graph import Graph
obj = Graph()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Model
def GraphDocString(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Model
def MetadataProps(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.StringStringEntry import StringStringEntry
obj = StringStringEntry()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Model
def MetadataPropsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Model
def MetadataPropsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
return o == 0
def ModelStart(builder):
builder.StartObject(10)
def Start(builder):
ModelStart(builder)
def ModelAddIrVersion(builder, irVersion):
builder.PrependInt64Slot(0, irVersion, 0)
def AddIrVersion(builder, irVersion):
ModelAddIrVersion(builder, irVersion)
def ModelAddOpsetImport(builder, opsetImport):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(opsetImport), 0)
def AddOpsetImport(builder, opsetImport):
ModelAddOpsetImport(builder, opsetImport)
def ModelStartOpsetImportVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartOpsetImportVector(builder, numElems: int) -> int:
return ModelStartOpsetImportVector(builder, numElems)
def ModelAddProducerName(builder, producerName):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(producerName), 0)
def AddProducerName(builder, producerName):
ModelAddProducerName(builder, producerName)
def ModelAddProducerVersion(builder, producerVersion):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(producerVersion), 0)
def AddProducerVersion(builder, producerVersion):
ModelAddProducerVersion(builder, producerVersion)
def ModelAddDomain(builder, domain):
builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(domain), 0)
def AddDomain(builder, domain):
ModelAddDomain(builder, domain)
def ModelAddModelVersion(builder, modelVersion):
builder.PrependInt64Slot(5, modelVersion, 0)
def AddModelVersion(builder, modelVersion):
ModelAddModelVersion(builder, modelVersion)
def ModelAddDocString(builder, docString):
builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(docString), 0)
def AddDocString(builder, docString):
ModelAddDocString(builder, docString)
def ModelAddGraph(builder, graph):
builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(graph), 0)
def AddGraph(builder, graph):
ModelAddGraph(builder, graph)
def ModelAddGraphDocString(builder, graphDocString):
builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(graphDocString), 0)
def AddGraphDocString(builder, graphDocString):
ModelAddGraphDocString(builder, graphDocString)
def ModelAddMetadataProps(builder, metadataProps):
builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(metadataProps), 0)
def AddMetadataProps(builder, metadataProps):
ModelAddMetadataProps(builder, metadataProps)
def ModelStartMetadataPropsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartMetadataPropsVector(builder, numElems: int) -> int:
return ModelStartMetadataPropsVector(builder, numElems)
def ModelEnd(builder):
return builder.EndObject()
def End(builder):
return ModelEnd(builder)

View File

@ -0,0 +1,141 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ModuleState(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ModuleState()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsModuleState(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ModuleStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# ModuleState
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ModuleState
def RequiresGradParams(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ModuleState
def RequiresGradParamsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ModuleState
def RequiresGradParamsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# ModuleState
def FrozenParams(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ModuleState
def FrozenParamsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ModuleState
def FrozenParamsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# ModuleState
def IsNominalState(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# ModuleState
def HasExternalData(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def ModuleStateStart(builder):
builder.StartObject(4)
def Start(builder):
ModuleStateStart(builder)
def ModuleStateAddRequiresGradParams(builder, requiresGradParams):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(requiresGradParams), 0)
def AddRequiresGradParams(builder, requiresGradParams):
ModuleStateAddRequiresGradParams(builder, requiresGradParams)
def ModuleStateStartRequiresGradParamsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartRequiresGradParamsVector(builder, numElems: int) -> int:
return ModuleStateStartRequiresGradParamsVector(builder, numElems)
def ModuleStateAddFrozenParams(builder, frozenParams):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(frozenParams), 0)
def AddFrozenParams(builder, frozenParams):
ModuleStateAddFrozenParams(builder, frozenParams)
def ModuleStateStartFrozenParamsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartFrozenParamsVector(builder, numElems: int) -> int:
return ModuleStateStartFrozenParamsVector(builder, numElems)
def ModuleStateAddIsNominalState(builder, isNominalState):
builder.PrependBoolSlot(2, isNominalState, 0)
def AddIsNominalState(builder, isNominalState):
ModuleStateAddIsNominalState(builder, isNominalState)
def ModuleStateAddHasExternalData(builder, hasExternalData):
builder.PrependBoolSlot(3, hasExternalData, 0)
def AddHasExternalData(builder, hasExternalData):
ModuleStateAddHasExternalData(builder, hasExternalData)
def ModuleStateEnd(builder):
return builder.EndObject()
def End(builder):
return ModuleStateEnd(builder)

View File

@ -0,0 +1,317 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Node(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Node()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsNode(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def NodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# Node
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Node
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Node
def DocString(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Node
def Domain(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Node
def SinceVersion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Node
def Index(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Node
def OpType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Node
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Node
def ExecutionProviderType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Node
def Inputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Node
def InputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Node
def InputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
return o == 0
# Node
def Outputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Node
def OutputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Node
def OutputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
return o == 0
# Node
def Attributes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Attribute import Attribute
obj = Attribute()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Node
def AttributesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Node
def AttributesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
return o == 0
# Node
def InputArgCounts(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Node
def InputArgCountsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Node
def InputArgCountsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Node
def InputArgCountsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
return o == 0
# Node
def ImplicitInputs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Node
def ImplicitInputsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Node
def ImplicitInputsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
return o == 0
def NodeStart(builder):
builder.StartObject(13)
def Start(builder):
NodeStart(builder)
def NodeAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
NodeAddName(builder, name)
def NodeAddDocString(builder, docString):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(docString), 0)
def AddDocString(builder, docString):
NodeAddDocString(builder, docString)
def NodeAddDomain(builder, domain):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(domain), 0)
def AddDomain(builder, domain):
NodeAddDomain(builder, domain)
def NodeAddSinceVersion(builder, sinceVersion):
builder.PrependInt32Slot(3, sinceVersion, 0)
def AddSinceVersion(builder, sinceVersion):
NodeAddSinceVersion(builder, sinceVersion)
def NodeAddIndex(builder, index):
builder.PrependUint32Slot(4, index, 0)
def AddIndex(builder, index):
NodeAddIndex(builder, index)
def NodeAddOpType(builder, opType):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(opType), 0)
def AddOpType(builder, opType):
NodeAddOpType(builder, opType)
def NodeAddType(builder, type):
builder.PrependInt32Slot(6, type, 0)
def AddType(builder, type):
NodeAddType(builder, type)
def NodeAddExecutionProviderType(builder, executionProviderType):
builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(executionProviderType), 0)
def AddExecutionProviderType(builder, executionProviderType):
NodeAddExecutionProviderType(builder, executionProviderType)
def NodeAddInputs(builder, inputs):
builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
def AddInputs(builder, inputs):
NodeAddInputs(builder, inputs)
def NodeStartInputsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartInputsVector(builder, numElems: int) -> int:
return NodeStartInputsVector(builder, numElems)
def NodeAddOutputs(builder, outputs):
builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
def AddOutputs(builder, outputs):
NodeAddOutputs(builder, outputs)
def NodeStartOutputsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartOutputsVector(builder, numElems: int) -> int:
return NodeStartOutputsVector(builder, numElems)
def NodeAddAttributes(builder, attributes):
builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(attributes), 0)
def AddAttributes(builder, attributes):
NodeAddAttributes(builder, attributes)
def NodeStartAttributesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartAttributesVector(builder, numElems: int) -> int:
return NodeStartAttributesVector(builder, numElems)
def NodeAddInputArgCounts(builder, inputArgCounts):
builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(inputArgCounts), 0)
def AddInputArgCounts(builder, inputArgCounts):
NodeAddInputArgCounts(builder, inputArgCounts)
def NodeStartInputArgCountsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartInputArgCountsVector(builder, numElems: int) -> int:
return NodeStartInputArgCountsVector(builder, numElems)
def NodeAddImplicitInputs(builder, implicitInputs):
builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(implicitInputs), 0)
def AddImplicitInputs(builder, implicitInputs):
NodeAddImplicitInputs(builder, implicitInputs)
def NodeStartImplicitInputsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartImplicitInputsVector(builder, numElems: int) -> int:
return NodeStartImplicitInputsVector(builder, numElems)
def NodeEnd(builder):
return builder.EndObject()
def End(builder):
return NodeEnd(builder)

View File

@ -0,0 +1,126 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class NodeEdge(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = NodeEdge()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsNodeEdge(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def NodeEdgeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# NodeEdge
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# NodeEdge
def NodeIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# NodeEdge
def InputEdges(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 12
from ort_flatbuffers_py.fbs.EdgeEnd import EdgeEnd
obj = EdgeEnd()
obj.Init(self._tab.Bytes, x)
return obj
return None
# NodeEdge
def InputEdgesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# NodeEdge
def InputEdgesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# NodeEdge
def OutputEdges(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 12
from ort_flatbuffers_py.fbs.EdgeEnd import EdgeEnd
obj = EdgeEnd()
obj.Init(self._tab.Bytes, x)
return obj
return None
# NodeEdge
def OutputEdgesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# NodeEdge
def OutputEdgesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
def NodeEdgeStart(builder):
builder.StartObject(3)
def Start(builder):
NodeEdgeStart(builder)
def NodeEdgeAddNodeIndex(builder, nodeIndex):
builder.PrependUint32Slot(0, nodeIndex, 0)
def AddNodeIndex(builder, nodeIndex):
NodeEdgeAddNodeIndex(builder, nodeIndex)
def NodeEdgeAddInputEdges(builder, inputEdges):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputEdges), 0)
def AddInputEdges(builder, inputEdges):
NodeEdgeAddInputEdges(builder, inputEdges)
def NodeEdgeStartInputEdgesVector(builder, numElems):
return builder.StartVector(12, numElems, 4)
def StartInputEdgesVector(builder, numElems: int) -> int:
return NodeEdgeStartInputEdgesVector(builder, numElems)
def NodeEdgeAddOutputEdges(builder, outputEdges):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputEdges), 0)
def AddOutputEdges(builder, outputEdges):
NodeEdgeAddOutputEdges(builder, outputEdges)
def NodeEdgeStartOutputEdgesVector(builder, numElems):
return builder.StartVector(12, numElems, 4)
def StartOutputEdgesVector(builder, numElems: int) -> int:
return NodeEdgeStartOutputEdgesVector(builder, numElems)
def NodeEdgeEnd(builder):
return builder.EndObject()
def End(builder):
return NodeEdgeEnd(builder)

View File

@ -0,0 +1,7 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
class NodeType(object):
Primitive = 0
Fused = 1

View File

@ -0,0 +1,160 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# nodes to consider for a runtime optimization
# see corresponding type in onnxruntime/core/graph/runtime_optimization_record.h
class NodesToOptimizeIndices(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = NodesToOptimizeIndices()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsNodesToOptimizeIndices(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def NodesToOptimizeIndicesBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# NodesToOptimizeIndices
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# NodesToOptimizeIndices
def NodeIndices(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# NodesToOptimizeIndices
def NodeIndicesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
return 0
# NodesToOptimizeIndices
def NodeIndicesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# NodesToOptimizeIndices
def NodeIndicesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# NodesToOptimizeIndices
def NumInputs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# NodesToOptimizeIndices
def NumOutputs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# NodesToOptimizeIndices
def HasVariadicInput(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# NodesToOptimizeIndices
def HasVariadicOutput(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# NodesToOptimizeIndices
def NumVariadicInputs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# NodesToOptimizeIndices
def NumVariadicOutputs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
def NodesToOptimizeIndicesStart(builder):
builder.StartObject(7)
def Start(builder):
NodesToOptimizeIndicesStart(builder)
def NodesToOptimizeIndicesAddNodeIndices(builder, nodeIndices):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(nodeIndices), 0)
def AddNodeIndices(builder, nodeIndices):
NodesToOptimizeIndicesAddNodeIndices(builder, nodeIndices)
def NodesToOptimizeIndicesStartNodeIndicesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartNodeIndicesVector(builder, numElems: int) -> int:
return NodesToOptimizeIndicesStartNodeIndicesVector(builder, numElems)
def NodesToOptimizeIndicesAddNumInputs(builder, numInputs):
builder.PrependUint32Slot(1, numInputs, 0)
def AddNumInputs(builder, numInputs):
NodesToOptimizeIndicesAddNumInputs(builder, numInputs)
def NodesToOptimizeIndicesAddNumOutputs(builder, numOutputs):
builder.PrependUint32Slot(2, numOutputs, 0)
def AddNumOutputs(builder, numOutputs):
NodesToOptimizeIndicesAddNumOutputs(builder, numOutputs)
def NodesToOptimizeIndicesAddHasVariadicInput(builder, hasVariadicInput):
builder.PrependBoolSlot(3, hasVariadicInput, 0)
def AddHasVariadicInput(builder, hasVariadicInput):
NodesToOptimizeIndicesAddHasVariadicInput(builder, hasVariadicInput)
def NodesToOptimizeIndicesAddHasVariadicOutput(builder, hasVariadicOutput):
builder.PrependBoolSlot(4, hasVariadicOutput, 0)
def AddHasVariadicOutput(builder, hasVariadicOutput):
NodesToOptimizeIndicesAddHasVariadicOutput(builder, hasVariadicOutput)
def NodesToOptimizeIndicesAddNumVariadicInputs(builder, numVariadicInputs):
builder.PrependUint32Slot(5, numVariadicInputs, 0)
def AddNumVariadicInputs(builder, numVariadicInputs):
NodesToOptimizeIndicesAddNumVariadicInputs(builder, numVariadicInputs)
def NodesToOptimizeIndicesAddNumVariadicOutputs(builder, numVariadicOutputs):
builder.PrependUint32Slot(6, numVariadicOutputs, 0)
def AddNumVariadicOutputs(builder, numVariadicOutputs):
NodesToOptimizeIndicesAddNumVariadicOutputs(builder, numVariadicOutputs)
def NodesToOptimizeIndicesEnd(builder):
return builder.EndObject()
def End(builder):
return NodesToOptimizeIndicesEnd(builder)

View File

@ -0,0 +1,91 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class OpIdKernelTypeStrArgsEntry(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = OpIdKernelTypeStrArgsEntry()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsOpIdKernelTypeStrArgsEntry(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def OpIdKernelTypeStrArgsEntryBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# OpIdKernelTypeStrArgsEntry
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# OpIdKernelTypeStrArgsEntry
def OpId(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# OpIdKernelTypeStrArgsEntry
def KernelTypeStrArgs(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.KernelTypeStrArgsEntry import KernelTypeStrArgsEntry
obj = KernelTypeStrArgsEntry()
obj.Init(self._tab.Bytes, x)
return obj
return None
# OpIdKernelTypeStrArgsEntry
def KernelTypeStrArgsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# OpIdKernelTypeStrArgsEntry
def KernelTypeStrArgsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
def OpIdKernelTypeStrArgsEntryStart(builder):
builder.StartObject(2)
def Start(builder):
OpIdKernelTypeStrArgsEntryStart(builder)
def OpIdKernelTypeStrArgsEntryAddOpId(builder, opId):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(opId), 0)
def AddOpId(builder, opId):
OpIdKernelTypeStrArgsEntryAddOpId(builder, opId)
def OpIdKernelTypeStrArgsEntryAddKernelTypeStrArgs(builder, kernelTypeStrArgs):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernelTypeStrArgs), 0)
def AddKernelTypeStrArgs(builder, kernelTypeStrArgs):
OpIdKernelTypeStrArgsEntryAddKernelTypeStrArgs(builder, kernelTypeStrArgs)
def OpIdKernelTypeStrArgsEntryStartKernelTypeStrArgsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartKernelTypeStrArgsVector(builder, numElems: int) -> int:
return OpIdKernelTypeStrArgsEntryStartKernelTypeStrArgsVector(builder, numElems)
def OpIdKernelTypeStrArgsEntryEnd(builder):
return builder.EndObject()
def End(builder):
return OpIdKernelTypeStrArgsEntryEnd(builder)

View File

@ -0,0 +1,67 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class OperatorSetId(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = OperatorSetId()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsOperatorSetId(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def OperatorSetIdBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# OperatorSetId
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# OperatorSetId
def Domain(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# OperatorSetId
def Version(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
def OperatorSetIdStart(builder):
builder.StartObject(2)
def Start(builder):
OperatorSetIdStart(builder)
def OperatorSetIdAddDomain(builder, domain):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(domain), 0)
def AddDomain(builder, domain):
OperatorSetIdAddDomain(builder, domain)
def OperatorSetIdAddVersion(builder, version):
builder.PrependInt64Slot(1, version, 0)
def AddVersion(builder, version):
OperatorSetIdAddVersion(builder, version)
def OperatorSetIdEnd(builder):
return builder.EndObject()
def End(builder):
return OperatorSetIdEnd(builder)

View File

@ -0,0 +1,117 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class OptimizerGroup(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = OptimizerGroup()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsOptimizerGroup(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def OptimizerGroupBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# OptimizerGroup
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# OptimizerGroup
def GroupName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# OptimizerGroup
def Step(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# OptimizerGroup
def InitialLearningRate(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# OptimizerGroup
def OptimizerStates(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.ParameterOptimizerState import ParameterOptimizerState
obj = ParameterOptimizerState()
obj.Init(self._tab.Bytes, x)
return obj
return None
# OptimizerGroup
def OptimizerStatesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# OptimizerGroup
def OptimizerStatesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
return o == 0
def OptimizerGroupStart(builder):
builder.StartObject(4)
def Start(builder):
OptimizerGroupStart(builder)
def OptimizerGroupAddGroupName(builder, groupName):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(groupName), 0)
def AddGroupName(builder, groupName):
OptimizerGroupAddGroupName(builder, groupName)
def OptimizerGroupAddStep(builder, step):
builder.PrependInt64Slot(1, step, 0)
def AddStep(builder, step):
OptimizerGroupAddStep(builder, step)
def OptimizerGroupAddInitialLearningRate(builder, initialLearningRate):
builder.PrependFloat32Slot(2, initialLearningRate, 0.0)
def AddInitialLearningRate(builder, initialLearningRate):
OptimizerGroupAddInitialLearningRate(builder, initialLearningRate)
def OptimizerGroupAddOptimizerStates(builder, optimizerStates):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(optimizerStates), 0)
def AddOptimizerStates(builder, optimizerStates):
OptimizerGroupAddOptimizerStates(builder, optimizerStates)
def OptimizerGroupStartOptimizerStatesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartOptimizerStatesVector(builder, numElems: int) -> int:
return OptimizerGroupStartOptimizerStatesVector(builder, numElems)
def OptimizerGroupEnd(builder):
return builder.EndObject()
def End(builder):
return OptimizerGroupEnd(builder)

View File

@ -0,0 +1,91 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ParameterOptimizerState(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ParameterOptimizerState()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsParameterOptimizerState(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ParameterOptimizerStateBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# ParameterOptimizerState
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ParameterOptimizerState
def ParamName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ParameterOptimizerState
def Momentums(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ParameterOptimizerState
def MomentumsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ParameterOptimizerState
def MomentumsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
def ParameterOptimizerStateStart(builder):
builder.StartObject(2)
def Start(builder):
ParameterOptimizerStateStart(builder)
def ParameterOptimizerStateAddParamName(builder, paramName):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(paramName), 0)
def AddParamName(builder, paramName):
ParameterOptimizerStateAddParamName(builder, paramName)
def ParameterOptimizerStateAddMomentums(builder, momentums):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(momentums), 0)
def AddMomentums(builder, momentums):
ParameterOptimizerStateAddMomentums(builder, momentums)
def ParameterOptimizerStateStartMomentumsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartMomentumsVector(builder, numElems: int) -> int:
return ParameterOptimizerStateStartMomentumsVector(builder, numElems)
def ParameterOptimizerStateEnd(builder):
return builder.EndObject()
def End(builder):
return ParameterOptimizerStateEnd(builder)

View File

@ -0,0 +1,152 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class PropertyBag(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PropertyBag()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsPropertyBag(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def PropertyBagBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# PropertyBag
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# PropertyBag
def Ints(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.IntProperty import IntProperty
obj = IntProperty()
obj.Init(self._tab.Bytes, x)
return obj
return None
# PropertyBag
def IntsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PropertyBag
def IntsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# PropertyBag
def Floats(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.FloatProperty import FloatProperty
obj = FloatProperty()
obj.Init(self._tab.Bytes, x)
return obj
return None
# PropertyBag
def FloatsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PropertyBag
def FloatsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# PropertyBag
def Strings(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.StringProperty import StringProperty
obj = StringProperty()
obj.Init(self._tab.Bytes, x)
return obj
return None
# PropertyBag
def StringsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PropertyBag
def StringsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
def PropertyBagStart(builder):
builder.StartObject(3)
def Start(builder):
PropertyBagStart(builder)
def PropertyBagAddInts(builder, ints):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(ints), 0)
def AddInts(builder, ints):
PropertyBagAddInts(builder, ints)
def PropertyBagStartIntsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartIntsVector(builder, numElems: int) -> int:
return PropertyBagStartIntsVector(builder, numElems)
def PropertyBagAddFloats(builder, floats):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(floats), 0)
def AddFloats(builder, floats):
PropertyBagAddFloats(builder, floats)
def PropertyBagStartFloatsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartFloatsVector(builder, numElems: int) -> int:
return PropertyBagStartFloatsVector(builder, numElems)
def PropertyBagAddStrings(builder, strings):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(strings), 0)
def AddStrings(builder, strings):
PropertyBagAddStrings(builder, strings)
def PropertyBagStartStringsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartStringsVector(builder, numElems: int) -> int:
return PropertyBagStartStringsVector(builder, numElems)
def PropertyBagEnd(builder):
return builder.EndObject()
def End(builder):
return PropertyBagEnd(builder)

View File

@ -0,0 +1,105 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# a single runtime optimization
# see corresponding type in onnxruntime/core/graph/runtime_optimization_record.h
class RuntimeOptimizationRecord(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = RuntimeOptimizationRecord()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsRuntimeOptimizationRecord(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def RuntimeOptimizationRecordBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# RuntimeOptimizationRecord
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# RuntimeOptimizationRecord
def ActionId(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# RuntimeOptimizationRecord
def NodesToOptimizeIndices(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.NodesToOptimizeIndices import NodesToOptimizeIndices
obj = NodesToOptimizeIndices()
obj.Init(self._tab.Bytes, x)
return obj
return None
# RuntimeOptimizationRecord
def ProducedOpIds(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# RuntimeOptimizationRecord
def ProducedOpIdsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# RuntimeOptimizationRecord
def ProducedOpIdsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
return o == 0
def RuntimeOptimizationRecordStart(builder):
builder.StartObject(4)
def Start(builder):
RuntimeOptimizationRecordStart(builder)
def RuntimeOptimizationRecordAddActionId(builder, actionId):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(actionId), 0)
def AddActionId(builder, actionId):
RuntimeOptimizationRecordAddActionId(builder, actionId)
def RuntimeOptimizationRecordAddNodesToOptimizeIndices(builder, nodesToOptimizeIndices):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(nodesToOptimizeIndices), 0)
def AddNodesToOptimizeIndices(builder, nodesToOptimizeIndices):
RuntimeOptimizationRecordAddNodesToOptimizeIndices(builder, nodesToOptimizeIndices)
def RuntimeOptimizationRecordAddProducedOpIds(builder, producedOpIds):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(producedOpIds), 0)
def AddProducedOpIds(builder, producedOpIds):
RuntimeOptimizationRecordAddProducedOpIds(builder, producedOpIds)
def RuntimeOptimizationRecordStartProducedOpIdsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartProducedOpIdsVector(builder, numElems: int) -> int:
return RuntimeOptimizationRecordStartProducedOpIdsVector(builder, numElems)
def RuntimeOptimizationRecordEnd(builder):
return builder.EndObject()
def End(builder):
return RuntimeOptimizationRecordEnd(builder)

View File

@ -0,0 +1,91 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class RuntimeOptimizationRecordContainerEntry(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = RuntimeOptimizationRecordContainerEntry()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsRuntimeOptimizationRecordContainerEntry(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def RuntimeOptimizationRecordContainerEntryBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# RuntimeOptimizationRecordContainerEntry
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# RuntimeOptimizationRecordContainerEntry
def OptimizerName(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# RuntimeOptimizationRecordContainerEntry
def RuntimeOptimizationRecords(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.RuntimeOptimizationRecord import RuntimeOptimizationRecord
obj = RuntimeOptimizationRecord()
obj.Init(self._tab.Bytes, x)
return obj
return None
# RuntimeOptimizationRecordContainerEntry
def RuntimeOptimizationRecordsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# RuntimeOptimizationRecordContainerEntry
def RuntimeOptimizationRecordsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
def RuntimeOptimizationRecordContainerEntryStart(builder):
builder.StartObject(2)
def Start(builder):
RuntimeOptimizationRecordContainerEntryStart(builder)
def RuntimeOptimizationRecordContainerEntryAddOptimizerName(builder, optimizerName):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(optimizerName), 0)
def AddOptimizerName(builder, optimizerName):
RuntimeOptimizationRecordContainerEntryAddOptimizerName(builder, optimizerName)
def RuntimeOptimizationRecordContainerEntryAddRuntimeOptimizationRecords(builder, runtimeOptimizationRecords):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(runtimeOptimizationRecords), 0)
def AddRuntimeOptimizationRecords(builder, runtimeOptimizationRecords):
RuntimeOptimizationRecordContainerEntryAddRuntimeOptimizationRecords(builder, runtimeOptimizationRecords)
def RuntimeOptimizationRecordContainerEntryStartRuntimeOptimizationRecordsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartRuntimeOptimizationRecordsVector(builder, numElems: int) -> int:
return RuntimeOptimizationRecordContainerEntryStartRuntimeOptimizationRecordsVector(builder, numElems)
def RuntimeOptimizationRecordContainerEntryEnd(builder):
return builder.EndObject()
def End(builder):
return RuntimeOptimizationRecordContainerEntryEnd(builder)

View File

@ -0,0 +1,79 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class RuntimeOptimizations(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = RuntimeOptimizations()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsRuntimeOptimizations(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def RuntimeOptimizationsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# RuntimeOptimizations
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# mapping from optimizer name to [RuntimeOptimizationRecord]
# RuntimeOptimizations
def Records(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.RuntimeOptimizationRecordContainerEntry import RuntimeOptimizationRecordContainerEntry
obj = RuntimeOptimizationRecordContainerEntry()
obj.Init(self._tab.Bytes, x)
return obj
return None
# RuntimeOptimizations
def RecordsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# RuntimeOptimizations
def RecordsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def RuntimeOptimizationsStart(builder):
builder.StartObject(1)
def Start(builder):
RuntimeOptimizationsStart(builder)
def RuntimeOptimizationsAddRecords(builder, records):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(records), 0)
def AddRecords(builder, records):
RuntimeOptimizationsAddRecords(builder, records)
def RuntimeOptimizationsStartRecordsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartRecordsVector(builder, numElems: int) -> int:
return RuntimeOptimizationsStartRecordsVector(builder, numElems)
def RuntimeOptimizationsEnd(builder):
return builder.EndObject()
def End(builder):
return RuntimeOptimizationsEnd(builder)

View File

@ -0,0 +1,58 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SequenceType(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SequenceType()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSequenceType(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def SequenceTypeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# SequenceType
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SequenceType
def ElemType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.TypeInfo import TypeInfo
obj = TypeInfo()
obj.Init(self._tab.Bytes, x)
return obj
return None
def SequenceTypeStart(builder):
builder.StartObject(1)
def Start(builder):
SequenceTypeStart(builder)
def SequenceTypeAddElemType(builder, elemType):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(elemType), 0)
def AddElemType(builder, elemType):
SequenceTypeAddElemType(builder, elemType)
def SequenceTypeEnd(builder):
return builder.EndObject()
def End(builder):
return SequenceTypeEnd(builder)

View File

@ -0,0 +1,78 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Shape(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Shape()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsShape(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ShapeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# Shape
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Shape
def Dim(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from ort_flatbuffers_py.fbs.Dimension import Dimension
obj = Dimension()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Shape
def DimLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Shape
def DimIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def ShapeStart(builder):
builder.StartObject(1)
def Start(builder):
ShapeStart(builder)
def ShapeAddDim(builder, dim):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(dim), 0)
def AddDim(builder, dim):
ShapeAddDim(builder, dim)
def ShapeStartDimVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartDimVector(builder, numElems: int) -> int:
return ShapeStartDimVector(builder, numElems)
def ShapeEnd(builder):
return builder.EndObject()
def End(builder):
return ShapeEnd(builder)

View File

@ -0,0 +1,114 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SparseTensor(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SparseTensor()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSparseTensor(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def SparseTensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# SparseTensor
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SparseTensor
def Values(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SparseTensor
def Indices(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.Tensor import Tensor
obj = Tensor()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SparseTensor
def Dims(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# SparseTensor
def DimsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# SparseTensor
def DimsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SparseTensor
def DimsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
def SparseTensorStart(builder):
builder.StartObject(3)
def Start(builder):
SparseTensorStart(builder)
def SparseTensorAddValues(builder, values):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
def AddValues(builder, values):
SparseTensorAddValues(builder, values)
def SparseTensorAddIndices(builder, indices):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(indices), 0)
def AddIndices(builder, indices):
SparseTensorAddIndices(builder, indices)
def SparseTensorAddDims(builder, dims):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dims), 0)
def AddDims(builder, dims):
SparseTensorAddDims(builder, dims)
def SparseTensorStartDimsVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def StartDimsVector(builder, numElems: int) -> int:
return SparseTensorStartDimsVector(builder, numElems)
def SparseTensorEnd(builder):
return builder.EndObject()
def End(builder):
return SparseTensorEnd(builder)

View File

@ -0,0 +1,67 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class StringProperty(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = StringProperty()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsStringProperty(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def StringPropertyBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x44\x54\x43", size_prefixed=size_prefixed)
# StringProperty
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# StringProperty
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# StringProperty
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def StringPropertyStart(builder):
builder.StartObject(2)
def Start(builder):
StringPropertyStart(builder)
def StringPropertyAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
StringPropertyAddName(builder, name)
def StringPropertyAddValue(builder, value):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def AddValue(builder, value):
StringPropertyAddValue(builder, value)
def StringPropertyEnd(builder):
return builder.EndObject()
def End(builder):
return StringPropertyEnd(builder)

View File

@ -0,0 +1,67 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class StringStringEntry(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = StringStringEntry()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsStringStringEntry(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def StringStringEntryBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# StringStringEntry
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# StringStringEntry
def Key(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# StringStringEntry
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def StringStringEntryStart(builder):
builder.StartObject(2)
def Start(builder):
StringStringEntryStart(builder)
def StringStringEntryAddKey(builder, key):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(key), 0)
def AddKey(builder, key):
StringStringEntryAddKey(builder, key)
def StringStringEntryAddValue(builder, value):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def AddValue(builder, value):
StringStringEntryAddValue(builder, value)
def StringStringEntryEnd(builder):
return builder.EndObject()
def End(builder):
return StringStringEntryEnd(builder)

View File

@ -0,0 +1,203 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Tensor(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Tensor()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTensor(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# Tensor
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Tensor
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Tensor
def DocString(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Tensor
def Dims(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# Tensor
def DimsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# Tensor
def DimsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def DimsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
# Tensor
def DataType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Tensor
def RawData(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Tensor
def RawDataAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# Tensor
def RawDataLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def RawDataIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
return o == 0
# Tensor
def StringData(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Tensor
def StringDataLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def StringDataIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# Tensor
def ExternalDataOffset(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return -1
def TensorStart(builder):
builder.StartObject(7)
def Start(builder):
TensorStart(builder)
def TensorAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
TensorAddName(builder, name)
def TensorAddDocString(builder, docString):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(docString), 0)
def AddDocString(builder, docString):
TensorAddDocString(builder, docString)
def TensorAddDims(builder, dims):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dims), 0)
def AddDims(builder, dims):
TensorAddDims(builder, dims)
def TensorStartDimsVector(builder, numElems):
return builder.StartVector(8, numElems, 8)
def StartDimsVector(builder, numElems: int) -> int:
return TensorStartDimsVector(builder, numElems)
def TensorAddDataType(builder, dataType):
builder.PrependInt32Slot(3, dataType, 0)
def AddDataType(builder, dataType):
TensorAddDataType(builder, dataType)
def TensorAddRawData(builder, rawData):
builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(rawData), 0)
def AddRawData(builder, rawData):
TensorAddRawData(builder, rawData)
def TensorStartRawDataVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def StartRawDataVector(builder, numElems: int) -> int:
return TensorStartRawDataVector(builder, numElems)
def TensorAddStringData(builder, stringData):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(stringData), 0)
def AddStringData(builder, stringData):
TensorAddStringData(builder, stringData)
def TensorStartStringDataVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartStringDataVector(builder, numElems: int) -> int:
return TensorStartStringDataVector(builder, numElems)
def TensorAddExternalDataOffset(builder, externalDataOffset):
builder.PrependInt64Slot(6, externalDataOffset, -1)
def AddExternalDataOffset(builder, externalDataOffset):
TensorAddExternalDataOffset(builder, externalDataOffset)
def TensorEnd(builder):
return builder.EndObject()
def End(builder):
return TensorEnd(builder)

View File

@ -0,0 +1,26 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
class TensorDataType(object):
UNDEFINED = 0
FLOAT = 1
UINT8 = 2
INT8 = 3
UINT16 = 4
INT16 = 5
INT32 = 6
INT64 = 7
STRING = 8
BOOL = 9
FLOAT16 = 10
DOUBLE = 11
UINT32 = 12
UINT64 = 13
COMPLEX64 = 14
COMPLEX128 = 15
BFLOAT16 = 16
FLOAT8E4M3FN = 17
FLOAT8E4M3FNUZ = 18
FLOAT8E5M2 = 19
FLOAT8E5M2FNUZ = 20

View File

@ -0,0 +1,71 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TensorTypeAndShape(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TensorTypeAndShape()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTensorTypeAndShape(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def TensorTypeAndShapeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# TensorTypeAndShape
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TensorTypeAndShape
def ElemType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# TensorTypeAndShape
def Shape(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.Shape import Shape
obj = Shape()
obj.Init(self._tab.Bytes, x)
return obj
return None
def TensorTypeAndShapeStart(builder):
builder.StartObject(2)
def Start(builder):
TensorTypeAndShapeStart(builder)
def TensorTypeAndShapeAddElemType(builder, elemType):
builder.PrependInt32Slot(0, elemType, 0)
def AddElemType(builder, elemType):
TensorTypeAndShapeAddElemType(builder, elemType)
def TensorTypeAndShapeAddShape(builder, shape):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def AddShape(builder, shape):
TensorTypeAndShapeAddShape(builder, shape)
def TensorTypeAndShapeEnd(builder):
return builder.EndObject()
def End(builder):
return TensorTypeAndShapeEnd(builder)

View File

@ -0,0 +1,83 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TypeInfo(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TypeInfo()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTypeInfo(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def TypeInfoBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# TypeInfo
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TypeInfo
def Denotation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# TypeInfo
def ValueType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# TypeInfo
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
def TypeInfoStart(builder):
builder.StartObject(3)
def Start(builder):
TypeInfoStart(builder)
def TypeInfoAddDenotation(builder, denotation):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(denotation), 0)
def AddDenotation(builder, denotation):
TypeInfoAddDenotation(builder, denotation)
def TypeInfoAddValueType(builder, valueType):
builder.PrependUint8Slot(1, valueType, 0)
def AddValueType(builder, valueType):
TypeInfoAddValueType(builder, valueType)
def TypeInfoAddValue(builder, value):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def AddValue(builder, value):
TypeInfoAddValue(builder, value)
def TypeInfoEnd(builder):
return builder.EndObject()
def End(builder):
return TypeInfoEnd(builder)

View File

@ -0,0 +1,9 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
class TypeInfoValue(object):
NONE = 0
tensor_type = 1
sequence_type = 2
map_type = 3

View File

@ -0,0 +1,84 @@
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: fbs
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ValueInfo(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ValueInfo()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsValueInfo(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ValueInfoBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4F\x52\x54\x4D", size_prefixed=size_prefixed)
# ValueInfo
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ValueInfo
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ValueInfo
def DocString(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# ValueInfo
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from ort_flatbuffers_py.fbs.TypeInfo import TypeInfo
obj = TypeInfo()
obj.Init(self._tab.Bytes, x)
return obj
return None
def ValueInfoStart(builder):
builder.StartObject(3)
def Start(builder):
ValueInfoStart(builder)
def ValueInfoAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
ValueInfoAddName(builder, name)
def ValueInfoAddDocString(builder, docString):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(docString), 0)
def AddDocString(builder, docString):
ValueInfoAddDocString(builder, docString)
def ValueInfoAddType(builder, type):
builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(type), 0)
def AddType(builder, type):
ValueInfoAddType(builder, type)
def ValueInfoEnd(builder):
return builder.EndObject()
def End(builder):
return ValueInfoEnd(builder)

View File

@ -0,0 +1,6 @@
from os.path import dirname, basename, isfile, join, splitext
import glob
modules = glob.glob(join(dirname(__file__), "*.py"))
__all__ = [splitext(basename(f))[0] for f in modules if isfile(f) and not f.endswith('__init__.py')]
from . import *

Some files were not shown because too many files have changed in this diff Show More