Home
last modified time | relevance | path

Searched refs:PerChannelMinMaxObserver (Results 1 – 24 of 24) sorted by relevance

/external/executorch/backends/mediatek/quantizer/
Dqconfig.py14 from torch.ao.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
110 observer=PerChannelMinMaxObserver, eps=1e-6
113 observer_or_fake_quant = PerChannelMinMaxObserver.with_args(eps=1e-6)
/external/pytorch/torch/ao/quantization/quantizer/
Dembedding_quantizer.py9 from torch.ao.quantization.observer import PerChannelMinMaxObserver
33 observer_or_fake_quant_ctr=PerChannelMinMaxObserver.with_args(eps=2**-12),
Dxnnpack_quantizer.py20 PerChannelMinMaxObserver,
149 weight_observer_or_fake_quant_ctr = PerChannelMinMaxObserver
Dx86_inductor_quantizer.py32 PerChannelMinMaxObserver,
357 FusedMovingAvgObsFakeQuantize if is_qat else PerChannelMinMaxObserver
/external/executorch/backends/vulkan/quantizer/
Dvulkan_quantizer.py15 from torch.ao.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
44 PerChannelMinMaxObserver if is_per_channel else MinMaxObserver
/external/pytorch/torch/quantization/
Dobserver.py33 PerChannelMinMaxObserver,
/external/executorch/backends/arm/quantizer/
Darm_quantizer.py44 PerChannelMinMaxObserver,
144 weight_observer_or_fake_quant_ctr = PerChannelMinMaxObserver
/external/pytorch/benchmarks/operator_benchmark/pt/
Dqobserver_test.py89 ["PerChannelMinMaxObserver", obs.PerChannelMinMaxObserver],
/external/pytorch/test/quantization/core/
Dtest_workflow_module.py7 PerChannelMinMaxObserver,
162 ObserverList = [PerChannelMinMaxObserver(reduce_range=reduce_range,
254 …loaded_obs = PerChannelMinMaxObserver(reduce_range=reduce_range, ch_axis=ch_axis, dtype=qdtype, qs…
292 PerChannelMinMaxObserver,
420 PerChannelMinMaxObserver(),
434 PerChannelMinMaxObserver,
467 observer_list = [PerChannelMinMaxObserver, MovingAveragePerChannelMinMaxObserver]
906 torch.ao.quantization.PerChannelMinMaxObserver.with_args(dtype=torch.qint8),
Dtest_workflow_ops.py708 obs = torch.ao.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)
868 obs = torch.ao.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)
Dtest_quantized_module.py15 PerChannelMinMaxObserver,
1286 …obs = PerChannelMinMaxObserver(dtype=qdtype, qscheme=torch.per_channel_affine_float_qparams, ch_ax…
Dtest_quantized_op.py34 from torch.ao.quantization import PerChannelMinMaxObserver
4530 …obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams,…
4677 …obs = PerChannelMinMaxObserver(dtype=qdtype, qscheme=torch.per_channel_affine_float_qparams, ch_ax…
4783 …obs = PerChannelMinMaxObserver(dtype=dtype, qscheme=torch.per_channel_affine_float_qparams, ch_axi…
4818 …obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams,…
4850 …obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams,…
/external/pytorch/torch/ao/quantization/
Dobserver.py658 class PerChannelMinMaxObserver(UniformQuantizationObserverBase): class
866 class MovingAveragePerChannelMinMaxObserver(PerChannelMinMaxObserver):
1702 default_per_channel_weight_observer = PerChannelMinMaxObserver.with_args(
1710 per_channel_weight_observer_range_neg_127_to_127 = PerChannelMinMaxObserver.with_args(
1731 default_float_qparams_observer = PerChannelMinMaxObserver.with_args(
1738 default_float_qparams_observer_4bit = PerChannelMinMaxObserver.with_args(
Dqconfig.py556 torch.ao.quantization.PerChannelMinMaxObserver,
/external/pytorch/torch/ao/quantization/fx/
D_equalize.py15 PerChannelMinMaxObserver,
80 self.input_obs = PerChannelMinMaxObserver(
181 self.weight_col_obs = PerChannelMinMaxObserver(
/external/pytorch/torch/distributed/algorithms/ddp_comm_hooks/
Dquantization_hooks.py165 myPerChannelObserver = torch.ao.quantization.PerChannelMinMaxObserver().cuda(
/external/executorch/backends/qualcomm/quantizer/
Dqconfig.py14 PerChannelMinMaxObserver,
258 observer_or_fake_quant_ctr=PerChannelMinMaxObserver.with_args(**extra_args),
DREADME.md94 observer_or_fake_quant_ctr=PerChannelMinMaxObserver.with_args(**extra_args),
108 …etter converage of IO activation and apply rules to `weight` w/`PerChannelMinMaxObserver`, `bias` …
/external/pytorch/test/quantization/bc/
Dtest_backward_compatibility.py15 from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver
539 obs = PerChannelMinMaxObserver()
/external/pytorch/test/quantization/fx/
Dtest_equalize_fx.py9 from torch.ao.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
180 …weight_quant_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=weight_qdtype, qscheme=weight_qscheme)
Dtest_quantize_fx.py68 PerChannelMinMaxObserver,
6800 ns.call_module(torch.ao.quantization.PerChannelMinMaxObserver): 1,
7058 ns.call_module(torch.ao.quantization.PerChannelMinMaxObserver): 1,
8687 float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,
/external/pytorch/docs/source/
Dquantization-support.rst237 PerChannelMinMaxObserver
/external/pytorch/test/quantization/eager/
Dtest_quantize_eager_ptq.py23 PerChannelMinMaxObserver,
823 float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,
/external/pytorch/torch/testing/_internal/
Dcommon_quantization.py40 get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, quantize, \
1161 float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,