Home
last modified time | relevance | path

Searched refs:input_qspec_map (Results 1 – 25 of 30) sorted by relevance

12

/external/executorch/backends/qualcomm/quantizer/
Dcustom_annotation.py31 input_qspec_map = {}
34 input_qspec_map[input_act] = input_spec
38 input_qspec_map[input_act1] = input_spec1
41 input_qspec_map=input_qspec_map,
50 input_qspec_map = {}
51 input_qspec_map[first_input_node] = quantization_config.input_activation
57 if input_node not in input_qspec_map:
58 input_qspec_map[input_node] = share_qparams_with_input_act0_qspec
61 input_qspec_map=input_qspec_map,
70 input_qspec_map = {}
[all …]
Dannotators.py105 input_qspec_map={
119 input_qspec_map = {}
122 input_qspec_map[input_act] = quantization_config.input_activation
126 input_qspec_map=input_qspec_map,
149 input_qspec_map = {}
152 input_qspec_map[input_act0] = input_act_qspec
156 input_qspec_map[input_act1] = input_act_qspec
159 input_qspec_map=input_qspec_map,
235 input_qspec_map = {}
238 input_qspec_map[input_act0] = input_act_qspec
[all …]
DREADME.md129 input_qspec_map = {}
134 input_qspec_map[input_act] = input_spec
138 input_qspec_map[kernel] = quantization_config.weight
143 input_qspec_map[bias] = quantization_config.bias(node)
145 …We first check if current graph node has been annotated. If not, an `input_qspec_map` dictionary r…
151 input_qspec_map=input_qspec_map,
156 …After done processing `input_qspec_map`, it's required to have it in node's meta with special tag …
/external/pytorch/torch/ao/quantization/quantizer/
Dxnnpack_quantizer_utils.py250 input_qspec_map = {}
253 input_qspec_map[input_act] = input_act_qspec
257 input_qspec_map[weight] = weight_qspec
263 input_qspec_map[bias] = bias_qspec
273 input_qspec_map=input_qspec_map,
300 input_qspec_map = {}
303 input_qspec_map[input_act] = get_input_act_qspec(quantization_config)
307 input_qspec_map[weight] = get_weight_qspec(quantization_config)
314 input_qspec_map[bias] = get_bias_qspec(quantization_config)
324 input_qspec_map=input_qspec_map,
[all …]
Dx86_inductor_quantizer.py624 input_qspec_map = {}
627 input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
630 input_qspec_map[weight_node] = get_weight_qspec(quantization_config)
633 input_qspec_map[bias_node] = get_bias_qspec(quantization_config)
636 input_qspec_map=input_qspec_map,
642 input_qspec_map=input_qspec_map,
656 input_qspec_map = {}
665 input_qspec_map[input_node] = get_input_act_qspec(quantization_config)
669 input_qspec_map[weight_node] = get_weight_qspec(quantization_config)
673 input_qspec_map[bias_node] = get_bias_qspec(quantization_config)
[all …]
Dutils.py13 if quantization_annotation.input_qspec_map is None:
14 quantization_annotation.input_qspec_map = {}
15 quantization_annotation.input_qspec_map[input_node] = qspec
Dembedding_quantizer.py88 input_qspec_map={
Dquantizer.py120 input_qspec_map: Dict[Node, Optional[QuantizationSpecBase]] = field( variable in QuantizationAnnotation
/external/executorch/backends/arm/quantizer/quantization_annotation/
Dconv_annotator.py35 input_qspec_map = {}
38 input_qspec_map[input_act] = quantization_config.get_input_act_qspec()
42 input_qspec_map[weight] = quantization_config.get_weight_qspec()
49 input_qspec_map[bias] = quantization_config.get_bias_qspec()
59 input_qspec_map=input_qspec_map,
Dmul_annotator.py39 input_qspec_map = {}
46 input_qspec_map[input_act0] = input_act_qspec
54 input_qspec_map[input_act1] = input_act_qspec
57 input_qspec_map=input_qspec_map,
Dmm_annotator.py42 input_qspec_map = {}
47 input_qspec_map[input_act0] = input_act_qspec
53 input_qspec_map[input_act1] = input_act_qspec
56 input_qspec_map=input_qspec_map,
Dadd_annotator.py37 input_qspec_map, output_qspec = arm_quantizer_utils.get_shared_qspec(
40 if input_qspec_map is not None:
42 input_qspec_map=input_qspec_map,
Dsub_annotator.py34 input_qspec_map, output_qspec = arm_quantizer_utils.get_shared_qspec(
37 if input_qspec_map is not None:
39 input_qspec_map=input_qspec_map,
Dupsample_nearest2d_annotator.py66 input_qspec_map={
Dadaptive_ang_pool2d_annotator.py66 input_qspec_map={
Dmax_pool2d_annotator.py63 input_qspec_map={
Dgeneric_annotator.py99 node.meta["quantization_annotation"].input_qspec_map[
/external/executorch/examples/qualcomm/oss_scripts/llama2/
Dllama.py70 input_qspec_map = {}
73 input_qspec_map[input_act] = input_spec
77 input_qspec_map[input_act1] = input_spec1
80 input_qspec_map=input_qspec_map,
89 input_qspec_map = {}
90 input_qspec_map[first_input_node] = quantization_config.input_activation
96 if input_node not in input_qspec_map:
97 input_qspec_map[input_node] = share_qparams_with_input_act0_qspec
100 input_qspec_map=input_qspec_map,
109 input_qspec_map = {}
[all …]
/external/executorch/backends/arm/quantizer/
Darm_quantizer_utils.py82 input_qspec_map = {}
86 input_qspec_map[input_act0] = input_act_qspec
92 input_qspec_map[input_act1] = shared_with_input0_qspec
93 return input_qspec_map, shared_with_input0_qspec
188 input_qspec_map={
/external/pytorch/test/quantization/pt2e/
Dtest_quantize_pt2e.py98 input_qspec_map={
167 input_qspec_map={
242 input_qspec_map={
259 input_qspec_map={
364 input_qspec_map={
464 input_qspec_map={
544 input_qspec_map={
619 input_qspec_map={
641 input_qspec_map={
731 input_qspec_map={
[all …]
/external/pytorch/torch/ao/quantization/pt2e/
Dqat_utils.py568 ].input_qspec_map
569 input_qspec_map = {}
575 input_qspec_map[replacement_node.args[0]] = all_configs[0][1]
577 input_qspec_map[replacement_node.args[1]] = all_configs[1][1]
580 input_qspec_map[replacement_node.args[2]] = all_configs[2][1]
581 replacement_node.meta["quantization_annotation"].input_qspec_map = input_qspec_map
627 for input_node, qspec in annotation.input_qspec_map.items():
628 annotation.input_qspec_map[input_node] = _get_new_qspec(qspec)
Dport_metadata_pass.py210 input_qspec_map = node.meta["quantization_annotation"].input_qspec_map
212 for input_node, qspec in input_qspec_map.items():
Dutils.py135 input_qspec_map = annotation.input_qspec_map
137 if len(input_qspec_map) == 0 and output_qspec is None:
/external/executorch/backends/example/example_operators/
Dutils.py30 quant_annotation.input_qspec_map[input_node] = quant_spec
/external/executorch/backends/cadence/aot/quantizer/
Dquantizer.py123 annotation.input_qspec_map[node.args[idx]] = (

12