I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,68 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Interface for Op Version Adapters
#pragma once
#include <functional>
#include <memory>
#include <string>
#include "onnx/onnx_pb.h"
#include "onnx/version_converter/helper.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Adapter {
private:
std::string name_;
OpSetID initial_version_;
OpSetID target_version_;
public:
virtual ~Adapter() noexcept = default;
explicit Adapter(const std::string& name, const OpSetID& initial_version, const OpSetID& target_version)
: name_(name), initial_version_(initial_version), target_version_(target_version) {}
// This will almost always return its own node argument after modifying it in place.
// The only exception are adapters for deprecated operators: in this case the input
// node must be destroyed and a new one must be created and returned. See e.g.
// upsample_9_10.h
virtual Node* adapt(std::shared_ptr<Graph> /*graph*/, Node* node) const = 0;
const std::string& name() const {
return name_;
}
const OpSetID& initial_version() const {
return initial_version_;
}
const OpSetID& target_version() const {
return target_version_;
}
};
using NodeTransformerFunction = std::function<Node*(std::shared_ptr<Graph>, Node* node)>;
class GenericAdapter final : public Adapter {
public:
GenericAdapter(const char* op, int64_t from, int64_t to, NodeTransformerFunction transformer)
: Adapter(op, OpSetID(from), OpSetID(to)), transformer_(transformer) {}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
return transformer_(graph, node);
}
private:
NodeTransformerFunction transformer_;
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,50 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for all ops that remove consumed_inputs
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class AxesAttributeToInput : public Adapter {
public:
explicit AxesAttributeToInput(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
void attrToInput(std::shared_ptr<Graph> graph, Node* node, std::vector<int64_t> axes) const {
Tensor t;
t.elem_type() = TensorProto_DataType_INT64;
t.sizes() = std::vector<int64_t>{static_cast<int64_t>(axes.size())};
auto& data = t.int64s();
for (auto a : axes) {
data.emplace_back(a);
}
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
if (node->hasAttribute(kaxes)) {
attrToInput(graph, node, node->is(kaxes));
node->removeAttribute(kaxes);
}
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,71 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for all ops that remove consumed_inputs
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class AxesInputToAttribute : public Adapter {
public:
explicit AxesInputToAttribute(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
// Identify if axes is statically determined; if so, feed as attribute
const ArrayRef<Value*>& inputs = node->inputs();
// Get axes from initializer or constant operator
// Identify whether we have a Constant Op or an Initializer
Value* const_val = inputs[1];
Node* node_ptr = const_val->node();
if (node_ptr->kind() == kConstant) {
// Get value attribute of kConstant
const std::vector<int64_t>& int64s = node_ptr->t(kvalue).int64s();
if (int64s.empty()) {
// Also handle raw data
std::string raw_data = node_ptr->t(kvalue).raw();
ONNX_ASSERTM(
raw_data.size() != 0 && raw_data.size() % 8 == 0,
"Raw Data must be non-empty and size must be a multiple of 8");
int64_t* raw = (int64_t*)const_cast<char*>(raw_data.c_str());
node->is_(kaxes, std::vector<int64_t>(raw, raw + node_ptr->t(kvalue).size_from_dim(0)));
} else {
node->is_(kaxes, std::forward<const std::vector<int64_t>>(int64s));
}
// If Constant node isn't used anywhere else, remove it
node->removeInput(1);
if (const_val->uses().size() < 1) {
node_ptr->destroy();
}
} else {
// Get Value name, find Initializer with same name
for (const auto& initializer : graph->initializers()) {
if (initializer.name() == inputs[1]->uniqueName()) {
node->is_(kaxes, std::forward<const std::vector<int64_t>>(initializer.int64s()));
node->removeInput(1);
// Remove initializer
if (const_val->uses().size() < 1)
graph->eraseInitializerAndInput(const_val);
break;
}
}
}
ONNX_ASSERTM(node->hasAttribute(kaxes), "No initializer or constant input to node found");
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,74 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class AxisAttributeToInput : public Adapter {
public:
AxisAttributeToInput(
const std::string& op_name,
const OpSetID& initial,
const OpSetID& target,
size_t axis_index,
int64_t default_axis)
: Adapter(op_name, initial, target), axis_index(axis_index), default_axis(default_axis) {}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
if (node->hasAttribute(kaxis)) {
AttrToInput(graph, node, node->i(kaxis), this->axis_index);
node->removeAttribute(kaxis);
return node;
}
// Fill in the default value for axis
AttrToInput(graph, node, default_axis, this->axis_index);
return node;
}
private:
size_t axis_index;
int64_t default_axis;
void AttrToInput(std::shared_ptr<Graph> graph, Node* node, int64_t axis, size_t axis_index) const {
const ArrayRef<Value*>& inputs = node->inputs();
// Add the optional inputs if they don't exist
for (size_t i = inputs.size(); i < axis_index; ++i) {
Node* empty_input = graph->create(kUndefined);
empty_input->insertBefore(node);
node->addInput(empty_input->output());
}
// Add the axis input
Node* constant = CreateAxisInput(graph, node, axis);
node->addInput(constant->output());
}
Node* CreateAxisInput(std::shared_ptr<Graph> graph, Node* node, int64_t axis) const {
Tensor t;
t.elem_type() = TensorProto_DataType_INT64;
t.sizes() = std::vector<int64_t>{};
auto& data = t.int64s();
data.emplace_back(axis);
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
return constant;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,99 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class AxisInputToAttribute : public Adapter {
public:
explicit AxisInputToAttribute(
const std::string& op_name,
const OpSetID& initial,
const OpSetID& target,
size_t axis_index,
int64_t default_axis)
: Adapter(op_name, initial, target), axis_index(axis_index), default_axis(default_axis) {}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
if (!HasAxisInput(node)) {
node->i_(kaxis, this->default_axis);
return EnsureAndReturnNode(node);
}
const ArrayRef<Value*>& inputs = node->inputs();
Value* axis_val = inputs[this->axis_index];
Node* axis_node = axis_val->node();
if (axis_node->kind() == kConstant) {
HandleConstantNode(node, axis_node, axis_val);
return EnsureAndReturnNode(node);
}
if (graph->is_constant_initializer(axis_val)) {
HandleInitializerNode(graph, node, axis_val);
return EnsureAndReturnNode(node);
}
ONNX_ASSERTM(false, "Axis input must be a constant or initializer for promotion to attribute.");
}
private:
size_t axis_index;
int64_t default_axis;
bool HasAxisInput(const Node* node) const {
const ArrayRef<const Value*>& inputs = node->inputs();
return inputs.size() > this->axis_index && inputs[this->axis_index]->node()->kind() != kUndefined;
}
void HandleConstantNode(Node* node, Node* axis_node, Value* axis_val) const {
const std::vector<int64_t>& int64s = axis_node->t(kvalue).int64s();
if (int64s.empty()) {
std::string raw_data = axis_node->t(kvalue).raw();
ONNX_ASSERTM(
raw_data.size() != 0 && raw_data.size() % 8 == 0,
"Raw Data must be non-empty and size must be a multiple of 8");
const int64_t* raw = reinterpret_cast<const int64_t*>(raw_data.c_str());
node->i_(kaxis, raw[0]);
} else {
node->i_(kaxis, int64s.at(0));
}
node->removeInput(this->axis_index);
if (axis_val->uses().size() < 1) {
axis_node->destroy();
}
}
void HandleInitializerNode(std::shared_ptr<Graph> graph, Node* node, Value* axis_val) const {
const std::string initializer_name = axis_val->uniqueName();
for (const auto& initializer : graph->initializers()) {
if (initializer.name() == initializer_name) {
node->i_(kaxis, initializer.int64s().at(0));
node->removeInput(this->axis_index);
// Remove initializer
if (axis_val->uses().size() < 1)
graph->eraseInitializer(initializer_name);
break;
}
}
}
inline Node* EnsureAndReturnNode(Node* node) const {
ONNX_ASSERTM(node->hasAttribute(kaxis), "Axis attribute not created. This may be a bug.");
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,34 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for BatchNormalization in default domain from version 13 to 14
#pragma once
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class BatchNormalization_13_14 final : public Adapter {
public:
explicit BatchNormalization_13_14() : Adapter("BatchNormalization", OpSetID(13), OpSetID(14)) {}
void adapt_batch_normalization_13_14(Node* node) const {
ONNX_ASSERTM(
node->outputs().size() < 4,
"BatchNormalization outputs 4 and 5 are not "
"supported in Opset 14.");
}
Node* adapt(std::shared_ptr<Graph>, Node* node) const override {
adapt_batch_normalization_13_14(node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,60 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for broadcasting ops in default domain from version 7 to 6
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class BroadcastBackwardCompatibility final : public Adapter {
public:
explicit BroadcastBackwardCompatibility(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
void adapt_broadcast_backward_compatibility(std::shared_ptr<Graph>, Node* node) const {
// Verify that broadcasts are allowed in limited spec of opset version 6
// Multidirectional broadcasting, as defined in Broadcasting.md
// MathDocGenerator provides differences
// Main change: encode broadcasting commands as explicit attribute
const ArrayRef<Value*>& inputs = node->inputs();
assertInputsAvailable(inputs, name().c_str(), 2);
const std::vector<Dimension>& A_sizes = inputs[0]->sizes();
const std::vector<Dimension>& B_sizes = inputs[1]->sizes();
// Ensure that first input is larger than or equal to the second
// numpy_unibroadcastable here is considered to be equivalent to opset1_broadcastable
// This is because backwards conversion does not allow for an axis that is not
// suffix matching
int req_broadcast = check_numpy_unibroadcastable_and_require_broadcast(A_sizes, B_sizes);
ONNX_ASSERTM(
req_broadcast != -1,
"%s being converted from %d to %d does "
"not have broadcastable inputs.",
name().c_str(),
initial_version().version(),
target_version().version());
if (req_broadcast == 1) {
// If conditional is not fulfilled, we have a default broadcast
// Add broadcast attribute
node->i_(kbroadcast, 1);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_broadcast_backward_compatibility(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,87 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for broadcasting ops in default domain from version 6 to 7
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class BroadcastForwardCompatibility final : public Adapter {
public:
explicit BroadcastForwardCompatibility(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
void adapt_broadcast_forward_compatibility(std::shared_ptr<Graph> graph, Node* node) const {
// Remove axis and broadcast attributes
// Assess whether axis requires reshaping
if (node->hasAttribute(kbroadcast)) {
const ArrayRef<Value*>& inputs = node->inputs();
assertInputsAvailable(inputs, name().c_str(), 2);
const std::vector<Dimension>& A_sizes = inputs[0]->sizes();
const std::vector<Dimension>& B_sizes = inputs[1]->sizes();
// Also assert that broadcasting syntax are correct if axis is not present
if (node->hasAttribute(kaxis)) {
if (node->i(kaxis) != (int)(A_sizes.size() - B_sizes.size())) {
// Add a Reshape node before input B
Node* n = graph->create(kUnsqueeze);
n->addInput(inputs[1]);
std::vector<int64_t> axes;
std::vector<Dimension> new_sizes = B_sizes;
auto size = A_sizes.size() > B_sizes.size() ? A_sizes.size() - B_sizes.size() : 0;
axes.reserve(size);
new_sizes.reserve(new_sizes.size() + size);
for (size_t i = 0; i < size; i++) {
axes.emplace_back(B_sizes.size() + i);
new_sizes.emplace_back(Dimension(1));
}
if (target_version().version() >= 13) { // Unsqueeze takes 'axes' input
Tensor t;
t.elem_type() = TensorProto_DataType_INT64;
t.sizes() = std::vector<int64_t>{static_cast<int64_t>(axes.size())};
auto& data = t.int64s();
for (auto a : axes) {
data.emplace_back(a);
}
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
} else { // Unsqueeze takes 'axes' attribute
n->is_(kaxes, std::forward<const std::vector<int64_t>>(axes));
}
// Move n before node
n->insertBefore(node);
// Set 2nd input to node to 1st of n and output of n to 2nd input to node
n->output()->setSizes(new_sizes);
node->replaceInput(1, n->output());
}
}
node->removeAttribute(kbroadcast);
}
if (node->hasAttribute(kaxis))
node->removeAttribute(kaxis);
// Assert multi_broadcastable on inputs
const ArrayRef<Value*>& inputs = node->inputs();
assert_numpy_multibroadcastable(inputs[0]->sizes(), inputs[1]->sizes());
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_broadcast_forward_compatibility(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,34 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Cast in default domain from version 9 to 8
#pragma once
#include <memory>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Cast_9_8 final : public Adapter {
public:
explicit Cast_9_8() : Adapter("Cast", OpSetID(9), OpSetID(8)) {}
void adapt_cast_9_8(std::shared_ptr<Graph>, Node* node) const {
if (node->inputs()[0]->elemType() == TensorProto_DataType_STRING || node->i(kto) == TensorProto_DataType_STRING)
ONNX_ASSERTM(false, "Casting From/To STRING data type is not supported")
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_cast_9_8(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,56 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Clip in default domain from version 10 to 11
#pragma once
#include <limits>
#include <memory>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Clip_10_11 final : public Adapter {
public:
explicit Clip_10_11() : Adapter("Clip", OpSetID(10), OpSetID(11)) {}
void adapt_clip_10_11(std::shared_ptr<Graph> graph, Node* node) const {
bool has_min = node->hasAttribute(kmin);
bool has_max = node->hasAttribute(kmax);
// Turn min/max attributes into tensor (if present) and add value as input
if (has_min) {
attrToInput(graph, node, node->f(kmin));
node->removeAttribute(kmin);
}
if (has_max) {
if (!has_min) {
attrToInput(graph, node, std::numeric_limits<float>::lowest());
}
attrToInput(graph, node, node->f(kmax));
node->removeAttribute(kmax);
}
}
void attrToInput(std::shared_ptr<Graph> graph, Node* node, float val) const {
Tensor t;
t.elem_type() = TensorProto_DataType_FLOAT;
auto& data = t.floats();
data.emplace_back(val);
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_clip_10_11(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,30 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter indicating compatibility of op between opsets with separate
// definitions
#pragma once
#include <memory>
#include <string>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
struct CompatibleAdapter final : public Adapter {
explicit CompatibleAdapter(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
Node* adapt(std::shared_ptr<Graph>, Node* node) const override {
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,46 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Dropout in default domain from version 11 to 12
#pragma once
#include <memory>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Dropout_11_12 final : public Adapter {
public:
explicit Dropout_11_12() : Adapter("Dropout", OpSetID(11), OpSetID(12)) {}
void adapt_dropout_11_12(std::shared_ptr<Graph> graph, Node* node) const {
float ratio;
if (node->hasAttribute(kratio)) {
ratio = node->f(kratio);
node->removeAttribute(kratio);
} else {
ratio = 0.5;
}
Tensor t_ratio;
t_ratio.elem_type() = TensorProto_DataType_FLOAT;
auto& data_ratio = t_ratio.floats();
data_ratio.emplace_back(ratio);
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t_ratio);
node->addInput(constant->output());
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_dropout_11_12(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,106 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter indicating compatibility of op between opsets with separate
// definitions
#pragma once
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
struct ExtendSupportedTypes final : public Adapter {
explicit ExtendSupportedTypes(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
Node* create_cast_op(
std::shared_ptr<Graph> graph,
ArrayRef<Value*> inputs,
const int to_type,
const std::vector<Dimension>& output_shape,
const std::string& name) const {
Node* node = graph->create(kCast, inputs);
node->i_(kto, to_type);
node->output()->setUniqueName(name);
node->output()->setSizes(output_shape);
node->output()->setElemType(to_type);
return node;
}
void adapt_type_extension(std::shared_ptr<Graph> graph, Node* node) const {
const ArrayRef<Value*>& inputs = node->inputs();
const ArrayRef<Value*>& outputs = node->outputs();
const std::string original_output_name = node->output()->uniqueName();
const int input_type = inputs.size() > 0 ? inputs[0]->elemType() : -1;
const int output_type = outputs[0]->elemType();
const std::unordered_set<int>& supported_version8_types = {
TensorProto_DataType::TensorProto_DataType_FLOAT,
TensorProto_DataType::TensorProto_DataType_FLOAT16,
TensorProto_DataType::TensorProto_DataType_DOUBLE,
};
const std::unordered_set<int>& unsupported_version9_types = {
TensorProto_DataType::TensorProto_DataType_COMPLEX128,
TensorProto_DataType::TensorProto_DataType_COMPLEX64,
TensorProto_DataType::TensorProto_DataType_STRING,
};
ONNX_ASSERTM(
unsupported_version9_types.find(input_type) == unsupported_version9_types.end(), "Unsupported Input Type");
ONNX_ASSERTM(
unsupported_version9_types.find(output_type) == unsupported_version9_types.end(), "Unsupported Output Type");
bool castInput = (node->kind() != kConstant);
bool castOutput = (node->kind() != kGreater && node->kind() != kLess);
if (castInput && supported_version8_types.find(input_type) == supported_version8_types.end()) {
for (size_t i = 0; i < inputs.size(); i++) {
Node* pre_cast = create_cast_op(
graph,
inputs[i],
TensorProto_DataType::TensorProto_DataType_FLOAT,
inputs[i]->sizes(),
"pre_cast_" + ONNX_NAMESPACE::to_string(i));
pre_cast->insertBefore(node);
node->replaceInput(i, pre_cast->output());
}
}
if (castOutput && supported_version8_types.find(output_type) == supported_version8_types.end()) {
const use_list original_uses(node->output()->uses());
node->output()->setElemType(TensorProto_DataType::TensorProto_DataType_FLOAT);
node->output()->setUniqueName(original_output_name + "_intermediate_output");
Node* post_cast = create_cast_op(graph, outputs[0], output_type, outputs[0]->sizes(), original_output_name);
post_cast->insertAfter(node);
for (Use u : original_uses) {
u.user->replaceInputWith(node->output(), post_cast->output());
}
for (size_t i = 0; i < graph->outputs().size(); i++) {
if (graph->outputs()[i]->uniqueName() == node->output()->uniqueName()) {
graph->return_node()->replaceInput(i, post_cast->output());
}
}
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_type_extension(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,57 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Gemm in default domain from version 6 to 7
#pragma once
#include <memory>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Gemm_6_7 final : public Adapter {
public:
explicit Gemm_6_7() : Adapter("Gemm", OpSetID(6), OpSetID(7)) {}
void adapt_gemm_6_7(std::shared_ptr<Graph>, Node* node) const {
const ArrayRef<Value*>& inputs = node->inputs();
assertInputsAvailable(inputs, name().c_str(), 3);
const auto& A_shape = inputs[0]->sizes();
const auto& B_shape = inputs[1]->sizes();
// Determine if C is broadcastable
const auto& C_shape = inputs[2]->sizes();
// Create (M, N) to input to numpy_unibroadcastable
std::vector<Dimension> MN;
if (node->hasAttribute(ktransA) && node->i(ktransA) == 1) {
MN.emplace_back(A_shape[1]);
} else {
MN.emplace_back(A_shape[0]);
}
if (node->hasAttribute(ktransB) && node->i(ktransB) == 1) {
MN.emplace_back(B_shape[0]);
} else {
MN.emplace_back(B_shape[1]);
}
ONNX_ASSERTM(
check_numpy_unibroadcastable_and_require_broadcast(MN, C_shape) != -1,
"Gemm being converted from 6 to 7 does not have "
"broadcastable inputs.");
if (node->hasAttribute(kbroadcast))
node->removeAttribute(kbroadcast);
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_gemm_6_7(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,63 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Gemm in default domain from version 7 to 6
#pragma once
#include <memory>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Gemm_7_6 final : public Adapter {
public:
explicit Gemm_7_6() : Adapter("Gemm", OpSetID(7), OpSetID(6)) {}
void adapt_gemm_7_6(std::shared_ptr<Graph>, Node* node) const {
const ArrayRef<Value*>& inputs = node->inputs();
assertInputsAvailable(inputs, name().c_str(), 3);
const auto& A_shape = inputs[0]->sizes();
const auto& B_shape = inputs[1]->sizes();
// Determine if C is broadcastable
const auto& C_shape = inputs[2]->sizes();
// Create (M, N) to input to numpy_unibroadcastable
// TODO: Reconcile fact that shapes aren't determined for 1st 2 inputs
std::vector<Dimension> MN;
if (node->hasAttribute(ktransA) && node->i(ktransA) == 1) {
MN.emplace_back(A_shape[1]);
} else {
MN.emplace_back(A_shape[0]);
}
if (node->hasAttribute(ktransB) && node->i(ktransB) == 1) {
MN.emplace_back(B_shape[0]);
} else {
MN.emplace_back(B_shape[1]);
}
int req_broadcast = check_numpy_unibroadcastable_and_require_broadcast(MN, C_shape);
ONNX_ASSERTM(
req_broadcast != -1,
"%s being converted from %d to %d does "
"not have broadcastable inputs.",
name().c_str(),
initial_version().version(),
target_version().version());
if (req_broadcast == 1) {
node->i_(kbroadcast, 1);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_gemm_7_6(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,36 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for GridSample in default domain from version 19 to 20
#pragma once
#include <memory>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class GridSample_19_20 final : public Adapter {
public:
explicit GridSample_19_20() : Adapter("GridSample", OpSetID(19), OpSetID(20)) {}
void adapt_gridsample_19_20(std::shared_ptr<Graph>, Node* node) const {
if (node->hasAttribute(kmode) && (node->s(kmode) == "bilinear")) {
node->s_(kmode, "linear");
}
if (node->hasAttribute(kmode) && (node->s(kmode) == "bicubic")) {
node->s_(kmode, "cubic");
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_gridsample_19_20(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,128 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for GroupNormalization in default domain from version 20 to 21
#pragma once
#include <memory>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class GroupNormalization_20_21 final : public Adapter {
public:
explicit GroupNormalization_20_21() : Adapter("GroupNormalization", OpSetID(20), OpSetID(21)) {}
void transform_input(
std::shared_ptr<Graph> graph,
Node* node,
int64_t input_id,
Value* reshape0_shape,
Value* reshape1_shape,
Value* expand_shape) const {
Node* reshape0 = graph->create(kReshape);
reshape0->addInput(node->inputs()[input_id]);
reshape0->addInput(reshape0_shape);
reshape0->insertBefore(node);
Node* expand = graph->create(kExpand);
expand->addInput(reshape0->output());
expand->addInput(expand_shape);
expand->insertBefore(node);
Node* reshape1 = graph->create(kReshape);
reshape1->addInput(expand->output());
reshape1->addInput(reshape1_shape);
reshape1->insertBefore(node);
node->replaceInput(input_id, reshape1->output());
}
void adapt_group_normalization_20_21(std::shared_ptr<Graph> graph, Node* node) const {
// Perform following sequence of ops on scale/bias, effect is similar to numpy.repeat()
//
// Shape<start=1,end=2>(input0) -- Div(Shape_out (C), num_groups)
// |
// Reshape(input1/2, [-1, 1]) ----------- Expand(Reshape_out, [1, Div_out]) -- Reshape(Expand_out, [-1])
//
// The helper function transform_input() implements the bottom row of the diagram
// Get number of channels: C
Symbol kShape("Shape");
Node* C = graph->create(kShape);
C->i_(kstart, 1);
C->i_(kend, 2);
C->addInput(node->inputs()[0]);
C->insertBefore(node);
// Get number of channels per group
Tensor tensor_num_groups;
tensor_num_groups.elem_type() = TensorProto_DataType_INT64;
int64_t num_groups = node->i(knum_groups);
tensor_num_groups.sizes() = {1};
tensor_num_groups.int64s() = {num_groups};
Node* constant_num_groups = graph->create(kConstant);
constant_num_groups->t_(kvalue, tensor_num_groups);
constant_num_groups->insertBefore(node);
Node* div = graph->create(kDiv);
div->addInput(C->output());
div->addInput(constant_num_groups->output());
div->insertBefore(node);
// Get Expand shape: [1, Div_out]
Tensor tensor_one;
tensor_one.elem_type() = TensorProto_DataType_INT64;
tensor_one.sizes() = {1};
tensor_one.int64s() = {1};
Node* constant_one = graph->create(kConstant);
constant_one->t_(kvalue, tensor_one);
constant_one->insertBefore(node);
Node* concat = graph->create(kConcat);
concat->i_(kaxis, 0);
concat->addInput(constant_one->output());
concat->addInput(div->output());
concat->insertBefore(node);
// Get shape of first reshape: [-1, 1]
Tensor tensor_reshape0_shape;
tensor_reshape0_shape.elem_type() = TensorProto_DataType_INT64;
tensor_reshape0_shape.sizes() = {2};
tensor_reshape0_shape.int64s() = {-1, 1};
Node* constant_reshape0_shape = graph->create(kConstant);
constant_reshape0_shape->t_(kvalue, tensor_reshape0_shape);
constant_reshape0_shape->insertBefore(node);
// Get shape of last reshape: [-1]
Tensor tensor_reshape1_shape;
tensor_reshape1_shape.elem_type() = TensorProto_DataType_INT64;
tensor_reshape1_shape.sizes() = {1};
tensor_reshape1_shape.int64s() = {-1};
Node* constant_reshape1_shape = graph->create(kConstant);
constant_reshape1_shape->t_(kvalue, tensor_reshape1_shape);
constant_reshape1_shape->insertBefore(node);
// transform scale and bias
transform_input(
graph, node, 1, constant_reshape0_shape->output(), constant_reshape1_shape->output(), concat->output());
transform_input(
graph, node, 2, constant_reshape0_shape->output(), constant_reshape1_shape->output(), concat->output());
// Set stash_type
node->i_(kstash_type, node->inputs()[0]->elemType());
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_group_normalization_20_21(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,36 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for MaxPool in default domain from version 8 to 7
#pragma once
#include <memory>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class MaxPool_8_7 final : public Adapter {
public:
explicit MaxPool_8_7() : Adapter("MaxPool", OpSetID(8), OpSetID(7)) {}
void adapt_maxpool_8_7(std::shared_ptr<Graph>, Node* node) const {
const ArrayRef<Value*>& outputs = node->outputs();
ONNX_ASSERTM(outputs.size() != 2, "Opset version 7 of MaxPool cannot include Indices output");
if (node->hasAttribute(kstorage_order))
node->removeAttribute(kstorage_order);
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_maxpool_8_7(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,32 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter indicating lack of a previous version of some op before a given
// opset version.
#pragma once
#include <memory>
#include <string>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class NoPreviousVersionAdapter final : public Adapter {
public:
explicit NoPreviousVersionAdapter(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
Node* adapt(std::shared_ptr<Graph>, Node* node) const override {
ONNX_ASSERTM(false, "No Previous Version of %s exists", name().c_str());
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,56 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Pad in default domain from version 10 to 11
#pragma once
#include <memory>
#include <vector>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Pad_10_11 final : public Adapter {
public:
explicit Pad_10_11() : Adapter("Pad", OpSetID(10), OpSetID(11)) {}
void adapt_pad_10_11(std::shared_ptr<Graph> graph, Node* node) const {
// Turn pads attribute into input
Tensor t_pads;
t_pads.elem_type() = TensorProto_DataType_INT64;
auto& data_pads = t_pads.int64s();
for (int64_t shape : node->is(kpads)) {
data_pads.emplace_back(shape);
}
t_pads.sizes() = std::vector<int64_t>{(int64_t)data_pads.size()};
Value* v_pads = graph->addInitializerAndCreateValue(t_pads);
node->addInput(v_pads);
node->removeAttribute(kpads);
// Turn value attribute into input
if (!node->hasAttribute(kmode) || node->s(kmode) == "constant") {
if (!node->hasAttribute(kvalue))
node->f_(kvalue, 0.);
Tensor t_value;
t_value.elem_type() = TensorProto_DataType_FLOAT;
auto& data_value = t_value.floats();
data_value.emplace_back(node->f(kvalue));
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t_value);
node->addInput(constant->output());
node->removeAttribute(kvalue);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_pad_10_11(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,77 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Cast in default domain from version 9 to 8
#pragma once
#include <memory>
#include <vector>
#include "onnx/version_converter/adapters/type_restriction.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
static const std::vector<TensorProto_DataType> q_dq_20_unallowed_types = {
TensorProto_DataType_UINT16,
TensorProto_DataType_INT16,
TensorProto_DataType_UINT4,
TensorProto_DataType_INT4};
class QuantizeLinear_21_20 final : public TypeRestriction {
public:
explicit QuantizeLinear_21_20()
: TypeRestriction("QuantizeLinear", OpSetID(21), OpSetID(20), q_dq_20_unallowed_types) {}
void adapt_quantize_linear_21_20(std::shared_ptr<Graph>, Node* node) const {
if (node->hasAttribute(kblock_size)) {
if ((node->i(kblock_size) != 0)) {
ONNX_ASSERTM(false, "Blocked quantization is not supported for Opset Version %d.", target_version().version())
}
node->removeAttribute(kblock_size);
}
if (node->hasAttribute(koutput_dtype)) {
if (node->i(koutput_dtype) != TensorProto_DataType_UINT8 && node->inputs().size() < 3) {
ONNX_ASSERTM(
false,
"Attribute output_dtype is not supported for Opset Version %d, supply a zero-point tensor instead",
target_version().version())
}
node->removeAttribute(koutput_dtype);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_type_restriction(graph, node);
adapt_quantize_linear_21_20(graph, node);
return node;
}
};
class DequantizeLinear_21_20 final : public TypeRestriction {
public:
explicit DequantizeLinear_21_20()
: TypeRestriction("DequantizeLinear", OpSetID(21), OpSetID(20), q_dq_20_unallowed_types) {}
void adapt_dequantize_linear_21_20(std::shared_ptr<Graph>, Node* node) const {
if (node->hasAttribute(kblock_size)) {
if ((node->i(kblock_size) != 0)) {
ONNX_ASSERTM(false, "Blocked quantization is not supported for Opset Version %d.", target_version().version())
}
node->removeAttribute(kblock_size);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_type_restriction(graph, node);
adapt_dequantize_linear_21_20(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,32 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for all ops that remove consumed_inputs
#pragma once
#include <memory>
#include <string>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class RemoveConsumedInputs : public Adapter {
public:
explicit RemoveConsumedInputs(const std::string& op_name, const OpSetID& initial, const OpSetID& target)
: Adapter(op_name, initial, target) {}
Node* adapt(std::shared_ptr<Graph>, Node* node) const override {
if (node->hasAttribute(kconsumed_inputs))
node->removeAttribute(kconsumed_inputs);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,49 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Reshape in default domain from version 4 to 5
#pragma once
#include <memory>
#include "onnx/version_converter/adapters/remove_consumed_inputs.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Reshape_4_5 final : public RemoveConsumedInputs {
public:
explicit Reshape_4_5() : RemoveConsumedInputs("Reshape", OpSetID(4), OpSetID(5)) {}
void adapt_reshape_4_5(std::shared_ptr<Graph> graph, Node* node) const {
// Create Input from Attribute - add as Initializer
// Create tensor for value attribute
Tensor t;
t.elem_type() = TensorProto_DataType_INT64;
auto& data = t.int64s();
// Turn shapes attribute into tensor
for (int64_t shape : node->is(kshape)) {
data.emplace_back(shape);
}
// Add value as input to node
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
// Remove kshape attribute
node->removeAttribute(kshape);
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
RemoveConsumedInputs::adapt(graph, node);
adapt_reshape_4_5(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,73 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Reshape in default domain from version 5 to 4
#pragma once
#include <memory>
#include <string>
#include <utility>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Reshape_5_4 final : public Adapter {
public:
explicit Reshape_5_4() : Adapter("Reshape", OpSetID(5), OpSetID(4)) {}
void adapt_reshape_5_4(std::shared_ptr<Graph> graph, Node* node) const {
// Identify if shape is statically determined; if so, feed as attribute
const ArrayRef<Value*>& inputs = node->inputs();
// Get shape from initializer or constant operator, not actual shape
// Identify whether we have a Constant Op or an Initializer
Value* const_val = inputs[1];
Node* node_ptr = const_val->node();
if (node_ptr->kind() == kConstant) {
// Get value attribute of kConstant
const std::vector<int64_t>& int64s = node_ptr->t(kvalue).int64s();
if (int64s.empty()) {
// Also handle raw data
std::string raw_data = node_ptr->t(kvalue).raw();
ONNX_ASSERTM(
raw_data.size() != 0 && raw_data.size() % 8 == 0,
"Raw Data must be non-empty and size must be a multiple of 8");
int64_t* raw = (int64_t*)const_cast<char*>(raw_data.c_str());
node->is_(kshape, std::vector<int64_t>(raw, raw + node_ptr->t(kvalue).size_from_dim(0)));
} else {
node->is_(kshape, std::forward<const std::vector<int64_t>>(int64s));
}
// If Constant node isn't used anywhere else, remove it
node->removeInput(1);
if (const_val->uses().size() < 1) {
node_ptr->destroy();
}
} else {
// Get Value name, find Initializer with same name
for (const auto& initializer : graph->initializers()) {
if (initializer.name() == inputs[1]->uniqueName()) {
node->is_(kshape, std::forward<const std::vector<int64_t>>(initializer.int64s()));
node->removeInput(1);
// Remove initializer
if (const_val->uses().size() < 1)
graph->eraseInitializerAndInput(const_val);
break;
}
}
}
ONNX_ASSERTM(node->hasAttribute(kshape), "No initializer or constant input to Reshape node found");
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_reshape_5_4(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,50 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Resize in default domain from version 10 to 11
#pragma once
#include <memory>
#include <vector>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Resize_10_11 final : public Adapter {
public:
explicit Resize_10_11() : Adapter("Resize", OpSetID(10), OpSetID(11)) {}
void adapt_resize_10_11(std::shared_ptr<Graph> graph, Node* node) const {
int input_rank = node->inputs()[0]->sizes().size();
Value* scales_input = node->inputs()[1];
node->addInput(scales_input);
Tensor t;
t.sizes() = std::vector<int64_t>{2 * input_rank};
t.elem_type() = TensorProto_DataType_FLOAT;
auto& data = t.floats();
for (int i = 0; i < input_rank; i++)
data.emplace_back(0);
for (int i = 0; i < input_rank; i++)
data.emplace_back(1);
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->replaceInput(1, constant->output());
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_resize_10_11(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,64 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Scan in default domain from version 8 to 9
#pragma once
#include <memory>
#include <utility>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
struct Scan_8_9 final : public Adapter {
explicit Scan_8_9() : Adapter("Scan", OpSetID(8), OpSetID(9)) {}
void adapt_scan_8_9(std::shared_ptr<Graph>, Node* node) const {
const std::vector<Value*> inputs(node->inputs().vec());
const std::vector<Value*> outputs(node->outputs().vec());
// Handling Attribute Changes
Symbol dirs = Symbol("directions");
if (node->hasAttribute(dirs)) {
const std::vector<int64_t> directions(node->is(dirs));
node->removeAttribute(dirs);
node->is_(Symbol("scan_input_directions"), std::move(directions));
}
// Handling Input and Output Changes
node->removeAllInputs();
ONNX_ASSERTM(inputs[0]->uniqueName() == "", "Unsupported conversion to opset 9");
for (Value* input : inputs) {
if (!input->sizes().empty()) {
std::vector<Dimension> new_sizes(input->sizes().begin() + 1, input->sizes().end());
input->setSizes(new_sizes);
node->addInput(input);
}
}
for (Value* output : outputs) {
if (!output->sizes().empty()) {
std::vector<Dimension> new_sizes(output->sizes().begin() + 1, output->sizes().end());
output->setSizes(new_sizes);
}
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_scan_8_9(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,93 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Scan in default domain from version 9 to 8
#pragma once
#include <memory>
#include <utility>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
struct Scan_9_8 final : public Adapter {
explicit Scan_9_8() : Adapter("Scan", OpSetID(9), OpSetID(8)) {}
void adapt_scan_9_8(std::shared_ptr<Graph>, Node* node) const {
const std::vector<Value*> inputs(node->inputs().vec());
const std::vector<Value*> outputs(node->outputs().vec());
// Handling Attribute Changes
Symbol input_dirs = Symbol("scan_input_directions");
if (node->hasAttribute(input_dirs)) {
const std::vector<int64_t> scan_input_directions(node->is(input_dirs));
node->removeAttribute(input_dirs);
node->is_(Symbol("directions"), std::move(scan_input_directions));
}
Symbol output_dirs = Symbol("scan_output_directions");
if (node->hasAttribute(output_dirs)) {
const std::vector<int64_t> scan_output_directions(node->is(output_dirs));
for (int64_t x : scan_output_directions) {
ONNX_ASSERTM(x == 0, "Unsupported output direction for Version 8");
}
node->removeAttribute(output_dirs);
}
Symbol input_axes = Symbol("scan_input_axes");
if (node->hasAttribute(input_axes)) {
const std::vector<int64_t> scan_input_axes(node->is(input_axes));
for (int64_t x : scan_input_axes) {
ONNX_ASSERTM(x == 0, "Unsupported input axes for Version 8");
}
node->removeAttribute(input_axes);
}
Symbol output_axes = Symbol("scan_output_axes");
if (node->hasAttribute(output_axes)) {
const std::vector<int64_t> scan_output_axes(node->is(output_axes));
for (int64_t x : scan_output_axes) {
ONNX_ASSERTM(x == 0, "Unsupported output axes for Version 8");
}
node->removeAttribute(output_axes);
}
// Handling Input and Output Changes
node->removeAllInputs();
Value* v = new Value(node, 0);
v->setUniqueName("");
v->setElemType(TensorProto_DataType::TensorProto_DataType_INT32);
node->addInput(v);
for (Value* input : inputs) {
std::vector<Dimension> new_sizes{Dimension(1)};
new_sizes.insert(new_sizes.end(), input->sizes().begin(), input->sizes().end());
input->setSizes(new_sizes);
node->addInput(input);
}
for (Value* output : outputs) {
std::vector<Dimension> new_sizes{Dimension(1)};
new_sizes.insert(new_sizes.end(), output->sizes().begin(), output->sizes().end());
output->setSizes(new_sizes);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_scan_9_8(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,43 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Scatter in default domain from version 10 to 11
#pragma once
#include <memory>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Scatter_10_11 final : public Adapter {
public:
explicit Scatter_10_11() : Adapter("Scatter", OpSetID(10), OpSetID(11)) {}
Node* adapt_scatter_10_11(std::shared_ptr<Graph> graph, Node* node) const {
int axis = node->hasAttribute(kaxis) ? node->i(kaxis) : 0;
// Replace the node with an equivalent ScatterElements node
Node* scatter_elements = graph->create(kScatterElements);
scatter_elements->i_(kaxis, axis);
scatter_elements->addInput(node->inputs()[0]);
scatter_elements->addInput(node->inputs()[1]);
scatter_elements->addInput(node->inputs()[2]);
node->replaceAllUsesWith(scatter_elements);
scatter_elements->insertBefore(node);
node->destroy();
return scatter_elements;
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
return adapt_scatter_10_11(graph, node);
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,54 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Slice in default domain from version 9 to 10
#pragma once
#include <memory>
#include <vector>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Slice_9_10 final : public Adapter {
public:
explicit Slice_9_10() : Adapter("Slice", OpSetID(9), OpSetID(10)) {}
void attrToInput(std::shared_ptr<Graph> graph, Node* node, const std::vector<int64_t>& attr) const {
Tensor t;
t.elem_type() = TensorProto_DataType_INT64;
t.sizes() = std::vector<int64_t>{static_cast<int64_t>(attr.size())};
auto& data = t.int64s();
for (auto a : attr) {
data.emplace_back(a);
}
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
}
void adapt_slice_9_10(std::shared_ptr<Graph> graph, Node* node) const {
attrToInput(graph, node, node->is(kstarts));
node->removeAttribute(kstarts);
attrToInput(graph, node, node->is(kends));
node->removeAttribute(kends);
if (node->hasAttribute(kaxes)) {
attrToInput(graph, node, node->is(kaxes));
node->removeAttribute(kaxes);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_slice_9_10(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,87 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Softmax amd LogSoftmax in default domain from version 12 to 13
#pragma once
#include <memory>
#include <string>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Softmax_12_13 final : public Adapter {
public:
explicit Softmax_12_13(const std::string& op_name) : Adapter(op_name, OpSetID(12), OpSetID(13)) {}
void adapt_softmax_12_13(std::shared_ptr<Graph> graph, Node* node) const {
int old_axis = node->hasAttribute(kaxis) ? node->i(kaxis) : 1;
int input_rank = node->inputs()[0]->sizes().size();
if (old_axis < 0)
old_axis = input_rank + old_axis;
if (old_axis == input_rank - 1)
node->i_(kaxis, -1);
else {
// -- shape ------------------
// / |
// ----- flatten -- softmax -- reshape
// get original softmax's input shape
Symbol kShape("Shape");
Node* shape = graph->create(kShape);
shape->addInput(node->inputs()[0]);
shape->insertBefore(node);
// Insert Flatten node before softmax
Node* flatten = graph->create(kFlatten);
flatten->addInput(node->inputs()[0]);
flatten->insertBefore(node);
flatten->i_(kaxis, old_axis);
node->replaceInput(0, flatten->output());
// Softmax along the last axis of the flattened 2D tensor
node->i_(kaxis, -1);
// Insert Reshape node after softmax
const std::string original_output_name = node->output()->uniqueName();
const use_list original_uses(node->output()->uses());
node->output()->setUniqueName(original_output_name + "_intermediate");
Node* reshape = graph->create(kReshape);
reshape->addInput(node->outputs()[0]);
reshape->addInput(shape->output());
reshape->output()->setUniqueName(original_output_name);
reshape->insertAfter(node);
// Fix outputs & wiring
if (node->output()->sizes().size() != 0) {
reshape->output()->setSizes(node->output()->sizes());
}
reshape->output()->setElemType(node->output()->elemType());
node->output()->wipeSizes();
for (Use u : original_uses) {
u.user->replaceInputWith(node->output(), reshape->output());
}
for (size_t i = 0; i < graph->outputs().size(); i++) {
if (graph->outputs()[i]->uniqueName() == original_output_name) {
graph->return_node()->replaceInput(i, reshape->output());
}
}
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_softmax_12_13(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,47 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for all ops that remove consumed_inputs
#pragma once
#include <memory>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Split_12_13 : public Adapter {
public:
explicit Split_12_13() : Adapter("Split", OpSetID(12), OpSetID(13)) {}
void attrToInput(std::shared_ptr<Graph> graph, Node* node, std::vector<int64_t> axes) const {
Tensor t;
t.elem_type() = TensorProto_DataType_INT64;
t.sizes() = std::vector<int64_t>{static_cast<int64_t>(axes.size())};
auto& data = t.int64s();
for (auto a : axes) {
data.emplace_back(a);
}
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
if (node->hasAttribute(ksplit)) {
attrToInput(graph, node, node->is(ksplit));
node->removeAttribute(ksplit);
}
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,70 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for all ops that remove consumed_inputs
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Split_13_12 : public Adapter {
public:
explicit Split_13_12() : Adapter("Split", OpSetID(13), OpSetID(12)) {}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
// Identify if 'split' is statically determined; if so, feed as attribute
const ArrayRef<Value*>& inputs = node->inputs();
// Get 'split' from initializer or constant operator
// Identify whether we have a Constant Op or an Initializer
Value* const_val = inputs[1];
Node* node_ptr = const_val->node();
if (node_ptr->kind() == kConstant) {
// Get value attribute of kConstant
const std::vector<int64_t>& int64s = node_ptr->t(kvalue).int64s();
if (int64s.empty()) {
// Also handle raw data
std::string raw_data = node_ptr->t(kvalue).raw();
ONNX_ASSERTM(
raw_data.size() != 0 && raw_data.size() % 8 == 0,
"Raw Data must be non-empty and size must be a multiple of 8");
int64_t* raw = (int64_t*)const_cast<char*>(raw_data.c_str());
node->is_(ksplit, std::vector<int64_t>(raw, raw + node_ptr->t(kvalue).size_from_dim(0)));
} else {
node->is_(ksplit, std::forward<const std::vector<int64_t>>(int64s));
}
// If Constant node isn't used anywhere else, remove it
node->removeInput(1);
if (const_val->uses().size() < 1) {
node_ptr->destroy();
}
} else {
// Get Value name, find Initializer with same name
for (const auto& initializer : graph->initializers()) {
if (initializer.name() == inputs[1]->uniqueName()) {
node->is_(ksplit, std::forward<const std::vector<int64_t>>(initializer.int64s()));
node->removeInput(1);
// Remove initializer
if (const_val->uses().size() < 1)
graph->eraseInitializerAndInput(const_val);
break;
}
}
}
ONNX_ASSERTM(node->hasAttribute(ksplit), "No initializer or constant input to node found");
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,38 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Split in default domain from version 17 to 18
#pragma once
#include <memory>
#include "onnx/version_converter/adapters/adapter.h"
#include "onnx/version_converter/adapters/transformers.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Split_17_18 : public Adapter {
public:
explicit Split_17_18() : Adapter("Split", OpSetID(17), OpSetID(18)) {}
void adapt_split_17_18(std::shared_ptr<Graph>, Node* node) const {
const auto num_outputs = node->outputs().size();
node->i_(knum_outputs, num_outputs);
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
// if node does not have neither 'num_outputs' attribute nor 'split' input
if (!node->hasAttribute(knum_outputs) && node->inputs().size() != 2) {
adapt_split_17_18(graph, node);
}
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,41 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Sum in default domain from version 8 to 7
#pragma once
#include <memory>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Sum_8_7 final : public Adapter {
public:
explicit Sum_8_7() : Adapter("Sum", OpSetID(8), OpSetID(7)) {}
void adapt_sum_8_7(std::shared_ptr<Graph>, Node* node) const {
// Throw an exception if any broadcasting occurs
const ArrayRef<Value*>& inputs = node->inputs();
// Determine if inputs are of different sizes
for (int i = 1; i < (int)inputs.size(); i++) {
std::vector<Dimension> A_sizes = inputs[i - 1]->sizes();
std::vector<Dimension> B_sizes = inputs[i]->sizes();
assert_numpy_multibroadcastable(A_sizes, B_sizes);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_sum_8_7(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,43 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for TopK in default domain from version 9 to 10
#pragma once
#include <memory>
#include <vector>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class TopK_9_10 final : public Adapter {
public:
explicit TopK_9_10() : Adapter("TopK", OpSetID(9), OpSetID(10)) {}
void adapt_topk_9_10(std::shared_ptr<Graph> graph, Node* node) const {
Tensor t;
t.elem_type() = TensorProto_DataType_INT64;
t.sizes() = std::vector<int64_t>{1};
auto& data = t.int64s();
data.emplace_back(node->i(kk));
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
node->removeAttribute(kk);
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_topk_9_10(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,84 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <cinttypes>
#include <string>
#include <utility>
#include <vector>
// Node transformers commonly used in version-adapters:
// Capture context by copying values; the graph is unused by these transformers.
#define NODE_TRANSFORMER(node) [=](std::shared_ptr<Graph>, Node * node)
namespace ONNX_NAMESPACE {
namespace version_conversion {
inline NodeTransformerFunction RemoveAttribute(Symbol attr) {
return NODE_TRANSFORMER(node) {
if (node->hasAttribute(attr)) {
node->removeAttribute(attr);
}
return node;
};
}
inline NodeTransformerFunction RemoveAttribute(Symbol attr, int64_t value) {
return NODE_TRANSFORMER(node) {
if (node->hasAttribute(attr)) {
ONNX_ASSERTM(node->i(attr) == value, "Attribute %s must have value %" PRId64, attr.toString(), value);
node->removeAttribute(attr);
}
return node;
};
}
inline NodeTransformerFunction RemoveAttributeNotEq(Symbol attr, int64_t value) {
return NODE_TRANSFORMER(node) {
if (node->hasAttribute(attr)) {
ONNX_ASSERTM(node->i(attr) != value, "Attribute %s must not have value %" PRId64, attr.toString(), value);
node->removeAttribute(attr);
}
return node;
};
}
inline NodeTransformerFunction SetAttribute(Symbol attr, int64_t value) {
return NODE_TRANSFORMER(node) {
node->i_(attr, value);
return node;
};
}
inline NodeTransformerFunction SetAttribute(Symbol attr, const std::string& value) {
return NODE_TRANSFORMER(node) {
node->s_(attr, value);
return node;
};
}
inline NodeTransformerFunction SetAttribute(Symbol attr, std::vector<int64_t> value) {
return NODE_TRANSFORMER(node) {
std::vector<int64_t> local(value);
node->is_(attr, std::move(local));
return node;
};
}
inline NodeTransformerFunction SetAttributeIfAbsent(Symbol attr, int64_t value) {
return NODE_TRANSFORMER(node) {
if (!node->hasAttribute(attr)) {
node->i_(attr, value);
}
return node;
};
}
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,61 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Add in default domain from version 6 to 5
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
class TypeRestriction : public Adapter {
public:
explicit TypeRestriction(
const std::string& op_name,
const OpSetID& initial,
const OpSetID& target,
const std::vector<TensorProto_DataType>& unallowed_types)
: Adapter(op_name, initial, target), unallowed_types_(unallowed_types) {}
void adapt_type_restriction(std::shared_ptr<Graph>, Node* node) const {
// Since consumed_inputs is optional, no need to add it (as in batchnorm)
// Iterate over all inputs and outputs
for (Value* input : node->inputs()) {
isUnallowed(input);
}
for (Value* output : node->outputs()) {
isUnallowed(output);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_type_restriction(graph, node);
return node;
}
private:
std::vector<TensorProto_DataType> unallowed_types_;
void isUnallowed(Value* val) const {
ONNX_ASSERTM(
std::find(std::begin(unallowed_types_), std::end(unallowed_types_), val->elemType()) ==
std::end(unallowed_types_),
"DataType (%d) of Input or Output"
" of operator '%s' is unallowed for Opset Version %d.",
val->elemType(),
name().c_str(),
target_version().version());
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,49 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Upsample in default domain from version 6 to 7
#pragma once
#include <memory>
#include <utility>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
struct Upsample_6_7 final : public Adapter {
explicit Upsample_6_7() : Adapter("Upsample", OpSetID(6), OpSetID(7)) {}
void adapt_upsample_6_7(std::shared_ptr<Graph>, Node* node) const {
Symbol width_scale_symbol = Symbol("width_scale");
Symbol height_scale_symbol = Symbol("height_scale");
ONNX_ASSERTM(
node->hasAttribute(width_scale_symbol) && node->hasAttribute(height_scale_symbol),
"Upsample in opset 1 needs to have width_scale and height_scale attributes");
auto width_scale = node->f(width_scale_symbol);
auto height_scale = node->f(height_scale_symbol);
auto input_shape = node->inputs()[0]->sizes();
ONNX_ASSERTM(input_shape.size() == 4, "Upsample in opset 1 supports only 4D input tensor");
std::vector<double> scales = {1.0, 1.0, height_scale, width_scale};
node->fs_(kscales, std::move(scales));
node->removeAttribute(width_scale_symbol);
node->removeAttribute(height_scale_symbol);
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_upsample_6_7(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,50 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Upsample in default domain from version 8 to 9
#pragma once
#include <memory>
#include <vector>
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
struct Upsample_8_9 final : public Adapter {
explicit Upsample_8_9() : Adapter("Upsample", OpSetID(8), OpSetID(9)) {}
void adapt_upsample_8_9(std::shared_ptr<Graph> graph, Node* node) const {
Symbol input_dirs = Symbol("scales");
int dim = (int)(node->fs(kscales).size());
Tensor t;
t.elem_type() = TensorProto_DataType_FLOAT;
t.sizes() = std::vector<int64_t>{dim};
auto& data = t.floats();
if (node->hasAttribute(input_dirs)) {
for (double scale : node->fs(kscales)) {
data.emplace_back((float)scale);
}
Node* constant = graph->create(kConstant);
constant->insertBefore(node);
constant->t_(kvalue, t);
node->addInput(constant->output());
node->removeAttribute(kscales);
}
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_upsample_8_9(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,42 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Upsample in default domain from version 9 to 10
#pragma once
#include <memory>
#include <string>
namespace ONNX_NAMESPACE {
namespace version_conversion {
class Upsample_9_10 final : public Adapter {
public:
explicit Upsample_9_10() : Adapter("Upsample", OpSetID(9), OpSetID(10)) {}
Node* adapt_upsample_9_10(std::shared_ptr<Graph> graph, Node* node) const {
std::string mode = node->hasAttribute(kmode) ? node->s(kmode) : "nearest";
// Replace the node with an equivalent Resize node
Node* resize = graph->create(kResize);
resize->s_(kmode, mode);
resize->addInput(node->inputs()[0]);
resize->addInput(node->inputs()[1]);
node->replaceAllUsesWith(resize);
resize->insertBefore(node);
node->destroy();
return resize;
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
return adapt_upsample_9_10(graph, node);
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE

View File

@ -0,0 +1,79 @@
// Copyright (c) ONNX Project Contributors
/*
* SPDX-License-Identifier: Apache-2.0
*/
// Adapter for Upsample in default domain from version 9 to 8
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "onnx/defs/tensor_proto_util.h"
#include "onnx/defs/tensor_util.h"
#include "onnx/version_converter/adapters/adapter.h"
namespace ONNX_NAMESPACE {
namespace version_conversion {
struct Upsample_9_8 final : public Adapter {
explicit Upsample_9_8() : Adapter("Upsample", OpSetID(9), OpSetID(8)) {}
void adapt_upsample_9_8(std::shared_ptr<Graph> graph, Node* node) const {
const ArrayRef<Value*>& inputs = node->inputs();
const std::vector<Tensor>& initializers = graph->initializers();
ONNX_ASSERTM(inputs.size() == 2, "Upsample in opset 9 needs to have 2 inputs.");
std::string scale_input_name = node->inputs()[1]->uniqueName();
for (size_t i = 0; i < initializers.size(); i++) {
if (initializers[i].name() == inputs[1]->uniqueName()) {
std::vector<float> value = ParseData<float>(&initializers[i]);
std::vector<double> d_values;
d_values.reserve(value.size());
for (size_t j = 0; j < value.size(); j++) {
d_values.push_back(static_cast<double>(value[j]));
}
node->fs_(kscales, const_cast<std::vector<double>&&>(d_values));
node->removeInput(1);
graph->eraseInitializer(initializers[i].name());
for (size_t j = 0; j < graph->inputs().size(); j++) {
if (graph->inputs()[j]->uniqueName() == scale_input_name) {
graph->eraseInput(j);
break;
}
}
return;
}
}
for (Node* op : graph->nodes()) {
if (op->kind() == kConstant && op->outputs()[0]->uniqueName() == scale_input_name) {
std::vector<float> value = ParseData<float>(&op->t(kvalue));
std::vector<double> d_values;
d_values.reserve(value.size());
for (size_t j = 0; j < value.size(); j++) {
d_values.push_back(static_cast<double>(value[j]));
}
node->fs_(kscales, const_cast<std::vector<double>&&>(d_values));
node->removeInput(1);
op->destroy();
return;
}
}
ONNX_ASSERTM(false, "Unsuppported conversion due to unavailable input: scale");
}
Node* adapt(std::shared_ptr<Graph> graph, Node* node) const override {
adapt_upsample_9_8(graph, node);
return node;
}
};
} // namespace version_conversion
} // namespace ONNX_NAMESPACE