I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,15 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.convert import convert
from torch.ao.quantization.fx.fuse import fuse
# omitting files that's unlikely to be used right now, for example
# the newly added lower_to_fbgemm etc.
from torch.ao.quantization.fx.prepare import prepare

View File

@ -0,0 +1,38 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx._equalize import (
_convert_equalization_ref,
_InputEqualizationObserver,
_WeightEqualizationObserver,
calculate_equalization_scale,
clear_weight_quant_obs_node,
convert_eq_obs,
CUSTOM_MODULE_SUPP_LIST,
custom_module_supports_equalization,
default_equalization_qconfig,
EqualizationQConfig,
fused_module_supports_equalization,
get_equalization_qconfig_dict,
get_layer_sqnr_dict,
get_op_node_and_weight_eq_obs,
input_equalization_observer,
is_equalization_observer,
maybe_get_next_equalization_scale,
maybe_get_next_input_eq_obs,
maybe_get_weight_eq_obs_node,
nn_module_supports_equalization,
node_supports_equalization,
remove_node,
reshape_scale,
scale_input_observer,
scale_weight_functional,
scale_weight_node,
update_obs_for_equalization,
weight_equalization_observer,
)

View File

@ -0,0 +1,9 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.convert import convert

View File

@ -0,0 +1,9 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.fuse import fuse

View File

@ -0,0 +1,9 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.fuse_handler import DefaultFuseHandler, FuseHandler

View File

@ -0,0 +1,17 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.graph_module import (
_is_observed_module,
_is_observed_standalone_module,
FusedGraphModule,
GraphModule,
ObservedGraphModule,
ObservedStandaloneGraphModule,
QuantizedGraphModule,
)

View File

@ -0,0 +1,14 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.match_utils import (
_find_matches,
_is_match,
_MatchResult,
MatchAllNode,
)

View File

@ -0,0 +1,35 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.pattern_utils import (
_register_fusion_pattern,
_register_quant_pattern,
get_default_fusion_patterns,
get_default_output_activation_post_process_map,
get_default_quant_patterns,
QuantizeHandler,
)
# QuantizeHandler.__module__ = _NAMESPACE
_register_fusion_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_fusion_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils"
_register_quant_pattern.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_quant_patterns.__module__ = "torch.ao.quantization.fx.pattern_utils"
get_default_output_activation_post_process_map.__module__ = (
"torch.ao.quantization.fx.pattern_utils"
)
# __all__ = [
# "QuantizeHandler",
# "_register_fusion_pattern",
# "get_default_fusion_patterns",
# "_register_quant_pattern",
# "get_default_quant_patterns",
# "get_default_output_activation_post_process_map",
# ]

View File

@ -0,0 +1,9 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.prepare import prepare

View File

@ -0,0 +1,48 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.quantize_handler import (
BatchNormQuantizeHandler,
BinaryOpQuantizeHandler,
CatQuantizeHandler,
ConvReluQuantizeHandler,
CopyNodeQuantizeHandler,
CustomModuleQuantizeHandler,
DefaultNodeQuantizeHandler,
EmbeddingQuantizeHandler,
FixedQParamsOpQuantizeHandler,
GeneralTensorShapeOpQuantizeHandler,
LinearReLUQuantizeHandler,
QuantizeHandler,
RNNDynamicQuantizeHandler,
StandaloneModuleQuantizeHandler,
)
QuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
BinaryOpQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
CatQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
ConvReluQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
LinearReLUQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
BatchNormQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
EmbeddingQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
RNNDynamicQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
DefaultNodeQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
FixedQParamsOpQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)
CopyNodeQuantizeHandler.__module__ = "torch.ao.quantization.fx.quantization_patterns"
CustomModuleQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)
GeneralTensorShapeOpQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)
StandaloneModuleQuantizeHandler.__module__ = (
"torch.ao.quantization.fx.quantization_patterns"
)

View File

@ -0,0 +1,9 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.utils import Pattern, QuantizerCls

View File

@ -0,0 +1,20 @@
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.utils import (
all_node_args_have_no_tensors,
assert_and_get_unique_device,
create_getattr_from_value,
get_custom_module_class_keys,
get_linear_prepack_op_for_dtype,
get_new_attr_name_with_prefix,
get_non_observable_arg_indexes_and_types,
get_qconv_prepack_op,
graph_module_from_producer_nodes,
maybe_get_next_module,
)