1# flake8: noqa: F401 2r"""Quantized Modules. 3 4This file is in the process of migration to `torch/ao/nn/quantized`, and 5is kept here for compatibility while the migration process is ongoing. 6If you are adding a new entry/functionality, please, add it to the 7appropriate file under the `torch/ao/nn/quantized/modules`, 8while adding an import statement here. 9""" 10 11from torch.ao.nn.quantized.modules.normalization import ( 12 GroupNorm, 13 InstanceNorm1d, 14 InstanceNorm2d, 15 InstanceNorm3d, 16 LayerNorm, 17) 18 19 20__all__ = [ 21 "LayerNorm", 22 "GroupNorm", 23 "InstanceNorm1d", 24 "InstanceNorm2d", 25 "InstanceNorm3d", 26] 27