Home
last modified time | relevance | path

Searched refs:quantization_mode (Results 1 – 9 of 9) sorted by relevance

/external/pytorch/torch/backends/_coreml/
Dpreprocess.py57 quantization_mode=CoreMLQuantizationMode.NONE, argument
65 quantization_mode,
93 quantization_mode,
107 if quantization_mode != CoreMLQuantizationMode.NONE:
109 mlmodel, nbits=8, quantization_mode=quantization_mode
/external/swiftshader/third_party/astc-encoder/Source/
Dastc_block_sizes2.cpp519 int quantization_mode; in construct_block_size_descriptor_2d() local
523 if (decode_block_mode_2d(i, &x_weights, &y_weights, &is_dual_plane, &quantization_mode)) in construct_block_size_descriptor_2d()
537 bsd->block_modes[i].quantization_mode = -1; in construct_block_size_descriptor_2d()
545 bsd->block_modes[i].quantization_mode = quantization_mode; in construct_block_size_descriptor_2d()
598 int quantization_mode; in construct_block_size_descriptor_3d() local
602 …f (decode_block_mode_3d(i, &x_weights, &y_weights, &z_weights, &is_dual_plane, &quantization_mode)) in construct_block_size_descriptor_3d()
615 bsd->block_modes[i].quantization_mode = -1; in construct_block_size_descriptor_3d()
623 bsd->block_modes[i].quantization_mode = quantization_mode; in construct_block_size_descriptor_3d()
Dastc_codec_internals.h92 int8_t quantization_mode; member
Dastc_symbolic_physical.cpp122 int weight_quantization_method = bsd->block_modes[block_mode].quantization_mode; in physical_to_symbolic()
Dastc_decompress_symbolic.cpp213 int weight_quantization_level = bsd->block_modes[scb->block_mode].quantization_mode; in decompress_symbolic_block()
/external/executorch/extension/llm/export/
Dquantizer_lib.py52 quantization_mode: Optional[str] = None,
56 if quantization_mode:
146 quantization_mode: Optional[str] = None,
206 quantization_mode is None
/external/executorch/examples/models/llama/
Dexport_llama_lib.py534 elif args.quantization_mode in ["8da4w", "8da4w-gptq"]:
570 args.pt2e_quantize, args.quantization_mode
577 args.pt2e_quantize, args.quantization_mode
625 args.quantization_mode is not None
626 and args.quantization_mode.startswith("torchao:")
997 if args.quantization_mode:
/external/executorch/examples/models/llama/source_transformation/
Dquantize.py779 qmode=args.quantization_mode,
/external/executorch/examples/demo-apps/android/LlamaDemo/docs/delegates/
Dxnnpack_README.md90 …s <path-to-your-params.json> -d fp32 -kv --use_sdpa_with_kv_cache --quantization_mode 8da4w --grou…