I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,97 @@
r"""Quantized Modules.
Note::
The `torch.nn.quantized` namespace is in the process of being deprecated.
Please, use `torch.ao.nn.quantized` instead.
"""
# The following imports are needed in case the user decides
# to import the files directly,
# s.a. `from torch.nn.quantized.modules.conv import ...`.
# No need to add them to the `__all__`.
from torch.ao.nn.quantized.modules import (
activation,
batchnorm,
conv,
DeQuantize,
dropout,
embedding_ops,
functional_modules,
linear,
MaxPool2d,
normalization,
Quantize,
rnn,
utils,
)
from torch.ao.nn.quantized.modules.activation import (
ELU,
Hardswish,
LeakyReLU,
MultiheadAttention,
PReLU,
ReLU6,
Sigmoid,
Softmax,
)
from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d, BatchNorm3d
from torch.ao.nn.quantized.modules.conv import (
Conv1d,
Conv2d,
Conv3d,
ConvTranspose1d,
ConvTranspose2d,
ConvTranspose3d,
)
from torch.ao.nn.quantized.modules.dropout import Dropout
from torch.ao.nn.quantized.modules.embedding_ops import Embedding, EmbeddingBag
from torch.ao.nn.quantized.modules.functional_modules import (
FloatFunctional,
FXFloatFunctional,
QFunctional,
)
from torch.ao.nn.quantized.modules.linear import Linear
from torch.ao.nn.quantized.modules.normalization import (
GroupNorm,
InstanceNorm1d,
InstanceNorm2d,
InstanceNorm3d,
LayerNorm,
)
from torch.ao.nn.quantized.modules.rnn import LSTM
__all__ = [
"BatchNorm2d",
"BatchNorm3d",
"Conv1d",
"Conv2d",
"Conv3d",
"ConvTranspose1d",
"ConvTranspose2d",
"ConvTranspose3d",
"DeQuantize",
"ELU",
"Embedding",
"EmbeddingBag",
"GroupNorm",
"Hardswish",
"InstanceNorm1d",
"InstanceNorm2d",
"InstanceNorm3d",
"LayerNorm",
"LeakyReLU",
"Linear",
"LSTM",
"MultiheadAttention",
"Quantize",
"ReLU6",
"Sigmoid",
"Softmax",
"Dropout",
"PReLU",
# Wrapper modules
"FloatFunctional",
"FXFloatFunctional",
"QFunctional",
]

View File

@ -0,0 +1,20 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.activation import (
ELU,
Hardswish,
LeakyReLU,
MultiheadAttention,
PReLU,
ReLU6,
Sigmoid,
Softmax,
)

View File

@ -0,0 +1,11 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d, BatchNorm3d

View File

@ -0,0 +1,29 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.conv import (
_reverse_repeat_padding,
Conv1d,
Conv2d,
Conv3d,
ConvTranspose1d,
ConvTranspose2d,
ConvTranspose3d,
)
__all__ = [
"Conv1d",
"Conv2d",
"Conv3d",
"ConvTranspose1d",
"ConvTranspose2d",
"ConvTranspose3d",
]

View File

@ -0,0 +1,14 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.dropout import Dropout
__all__ = ["Dropout"]

View File

@ -0,0 +1,18 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.embedding_ops import (
Embedding,
EmbeddingBag,
EmbeddingPackedParams,
)
__all__ = ["EmbeddingPackedParams", "Embedding", "EmbeddingBag"]

View File

@ -0,0 +1,18 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.functional_modules import (
FloatFunctional,
FXFloatFunctional,
QFunctional,
)
__all__ = ["FloatFunctional", "FXFloatFunctional", "QFunctional"]

View File

@ -0,0 +1,14 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.linear import Linear, LinearPackedParams
__all__ = ["LinearPackedParams", "Linear"]

View File

@ -0,0 +1,26 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.normalization import (
GroupNorm,
InstanceNorm1d,
InstanceNorm2d,
InstanceNorm3d,
LayerNorm,
)
__all__ = [
"LayerNorm",
"GroupNorm",
"InstanceNorm1d",
"InstanceNorm2d",
"InstanceNorm3d",
]

View File

@ -0,0 +1,11 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.rnn import LSTM

View File

@ -0,0 +1,17 @@
# flake8: noqa: F401
r"""Quantized Modules.
This file is in the process of migration to `torch/ao/nn/quantized`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate file under the `torch/ao/nn/quantized/modules`,
while adding an import statement here.
"""
from torch.ao.nn.quantized.modules.utils import (
_hide_packed_params_repr,
_ntuple_from_first,
_pair_from_first,
_quantize_weight,
WeightedQuantizedModule,
)