1# flake8: noqa: F401 2r""" 3This file is in the process of migration to `torch/ao/quantization`, and 4is kept here for compatibility while the migration process is ongoing. 5If you are adding a new entry/functionality, please, add it to the 6appropriate files under `torch/ao/quantization/fx/`, while adding an import statement 7here. 8""" 9 10from torch.ao.quantization.fx.convert import convert 11from torch.ao.quantization.fx.fuse import fuse 12 13# omitting files that's unlikely to be used right now, for example 14# the newly added lower_to_fbgemm etc. 15from torch.ao.quantization.fx.prepare import prepare 16