Searched refs:n_bit (Results 1 – 9 of 9) sorted by relevance
/external/executorch/backends/apple/mps/test/ |
D | test_mps_linear.py | 308 def get_min_max(self, n_bit: int = 4): 309 max_int = 2 ** (n_bit - 1) - 1 310 min_int = -(2 ** (n_bit - 1)) 316 n_bit: int = 4, 329 min_int, max_int = self.get_min_max(n_bit) 343 self, w, n_bit=4, groupsize=128, precision=torch.float32 argument 361 max_int = 2 ** (n_bit - 1) - 1 362 min_int = -(2 ** (n_bit - 1)) 376 self, w, n_bit=4, group_size=128, precision=torch.float32 argument 379 w, n_bit, group_size, precision [all …]
|
/external/executorch/examples/models/llama/tests/ |
D | test_pre_quantization_transforms.py | 55 n_bit = 4 65 weight.to(torch.float32), n_bit, group_size, scales_precision 146 n_bit = 4 157 weight.to(torch.float32), n_bit, group_size, scales_precision 168 n_bit,
|
/external/webrtc/modules/rtp_rtcp/source/ |
D | video_rtp_depacketizer_vp9.cc | 80 bool n_bit; in ParseRefIndices() local 88 n_bit = parser.Read<bool>(); in ParseRefIndices() 97 } while (n_bit); in ParseRefIndices()
|
D | rtp_format_vp9.cc | 223 bool n_bit = !(i == vp9.num_ref_pics - 1); in WriteRefIndices() local 225 RETURN_FALSE_ON_ERROR(writer->WriteBits(n_bit ? 1 : 0, 1)); in WriteRefIndices()
|
/external/deqp/external/openglcts/modules/gles31/ |
D | es31cTextureStorageMultisampleFunctionalTests.cpp | 1638 for (int n_bit = 0; n_bit < mask_bits_to_check; n_bit++) in iterate() local 1651 gl.sampleMaski(0, 1 << n_bit); in iterate() 1672 gl.uniform1ui(n_bit_on_location, n_bit); in iterate() 1715 << "Bit to check: [" << n_bit << "." << tcu::TestLog::EndMessage; in iterate() 2265 for (int n_bit = 0; n_bit < mask_bits_to_check; n_bit++) in iterate() local 2271 gl.sampleMaski(0, 1 << n_bit); in iterate() 2289 gl.uniform1i(n_bit_on_location, n_bit); in iterate()
|
/external/executorch/examples/models/llama/source_transformation/ |
D | quantize.py | 431 n_bit = 8 432 quant_min = -(2 ** (n_bit - 1)) 433 quant_max = 2 ** (n_bit - 1) - 1
|
/external/pytorch/torch/testing/_internal/ |
D | common_quantization.py | 456 def _group_quantize_tensor(w, n_bit=4, q_group_size=16): argument 467 max_int = 2 ** n_bit - 1 472 zeros = min_val + scales * (2 ** (n_bit - 1))
|
/external/pytorch/test/ |
D | test_linalg.py | 6421 b_uint8, _ = _group_quantize_tensor(b, n_bit=4, q_group_size=32) 6449 b, n_bit=4, q_group_size=q_group 6497 b, n_bit=4, q_group_size=q_group
|
D | test_mps.py | 9362 b.to("cpu"), n_bit=4, q_group_size=q_group
|