I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,36 @@
from torch.ao.nn.intrinsic import (
BNReLU2d,
BNReLU3d,
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
LinearBn1d,
LinearReLU,
)
from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
# Include the subpackages in case user imports from it directly
from torch.nn.intrinsic import modules, qat, quantized # noqa: F401
__all__ = [
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearReLU",
"BNReLU2d",
"BNReLU3d",
"LinearBn1d",
]

View File

@ -0,0 +1,33 @@
from torch.nn.intrinsic.modules.fused import (
_FusedModule,
BNReLU2d,
BNReLU3d,
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
LinearBn1d,
LinearReLU,
)
__all__ = [
"BNReLU2d",
"BNReLU3d",
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearBn1d",
"LinearReLU",
]

View File

@ -0,0 +1,33 @@
from torch.ao.nn.intrinsic import (
BNReLU2d,
BNReLU3d,
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
LinearBn1d,
LinearReLU,
)
from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
__all__ = [
"BNReLU2d",
"BNReLU3d",
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearBn1d",
"LinearReLU",
]

View File

@ -0,0 +1 @@
from torch.nn.intrinsic.qat.modules import * # noqa: F403

View File

@ -0,0 +1,32 @@
from torch.nn.intrinsic.qat.modules.conv_fused import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
freeze_bn_stats,
update_bn_stats,
)
from torch.nn.intrinsic.qat.modules.linear_fused import LinearBn1d
from torch.nn.intrinsic.qat.modules.linear_relu import LinearReLU
__all__ = [
"LinearReLU",
"LinearBn1d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"ConvBn1d",
"ConvBn2d",
"ConvBn3d",
"ConvBnReLU1d",
"ConvBnReLU2d",
"ConvBnReLU3d",
"update_bn_stats",
"freeze_bn_stats",
]

View File

@ -0,0 +1,40 @@
# flake8: noqa: F401
r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
while adding an import statement here.
"""
from torch.ao.nn.intrinsic.qat import (
ConvBn1d,
ConvBn2d,
ConvBn3d,
ConvBnReLU1d,
ConvBnReLU2d,
ConvBnReLU3d,
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
freeze_bn_stats,
update_bn_stats,
)
__all__ = [
# Modules
"ConvBn1d",
"ConvBnReLU1d",
"ConvReLU1d",
"ConvBn2d",
"ConvBnReLU2d",
"ConvReLU2d",
"ConvBn3d",
"ConvBnReLU3d",
"ConvReLU3d",
# Utilities
"freeze_bn_stats",
"update_bn_stats",
]

View File

@ -0,0 +1,16 @@
# flake8: noqa: F401
r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
while adding an import statement here.
"""
from torch.ao.nn.intrinsic.qat import LinearBn1d
__all__ = [
"LinearBn1d",
]

View File

@ -0,0 +1,16 @@
# flake8: noqa: F401
r"""Intrinsic QAT Modules.
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
while adding an import statement here.
"""
from torch.ao.nn.intrinsic.qat import LinearReLU
__all__ = [
"LinearReLU",
]

View File

@ -0,0 +1,14 @@
# to ensure customers can use the module below
# without importing it directly
from torch.nn.intrinsic.quantized import dynamic, modules # noqa: F401
from torch.nn.intrinsic.quantized.modules import * # noqa: F403
__all__ = [
"BNReLU2d",
"BNReLU3d",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"LinearReLU",
]

View File

@ -0,0 +1 @@
from torch.nn.intrinsic.quantized.dynamic.modules import * # noqa: F403

View File

@ -0,0 +1,6 @@
from torch.nn.intrinsic.quantized.dynamic.modules.linear_relu import LinearReLU
__all__ = [
"LinearReLU",
]

View File

@ -0,0 +1,6 @@
from torch.ao.nn.intrinsic.quantized.dynamic import LinearReLU
__all__ = [
"LinearReLU",
]

View File

@ -0,0 +1,17 @@
from torch.nn.intrinsic.quantized.modules.bn_relu import BNReLU2d, BNReLU3d
from torch.nn.intrinsic.quantized.modules.conv_relu import (
ConvReLU1d,
ConvReLU2d,
ConvReLU3d,
)
from torch.nn.intrinsic.quantized.modules.linear_relu import LinearReLU
__all__ = [
"LinearReLU",
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
"BNReLU2d",
"BNReLU3d",
]

View File

@ -0,0 +1,7 @@
from torch.ao.nn.intrinsic.quantized import BNReLU2d, BNReLU3d
__all__ = [
"BNReLU2d",
"BNReLU3d",
]

View File

@ -0,0 +1,8 @@
from torch.ao.nn.intrinsic.quantized import ConvReLU1d, ConvReLU2d, ConvReLU3d
__all__ = [
"ConvReLU1d",
"ConvReLU2d",
"ConvReLU3d",
]

View File

@ -0,0 +1,6 @@
from torch.ao.nn.intrinsic.quantized import LinearReLU
__all__ = [
"LinearReLU",
]