/external/vulkan-validation-layers/layers/ |
D | unique_objects.cpp | 289 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { in CreateComputePipelines() local 290 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); in CreateComputePipelines() 291 if (pCreateInfos[idx0].basePipelineHandle) { in CreateComputePipelines() 292 …local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineH… in CreateComputePipelines() 294 if (pCreateInfos[idx0].layout) { in CreateComputePipelines() 295 local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); in CreateComputePipelines() 297 if (pCreateInfos[idx0].stage.module) { in CreateComputePipelines() 298 … local_pCreateInfos[idx0].stage.module = Unwrap(device_data, pCreateInfos[idx0].stage.module); in CreateComputePipelines() 329 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { in CreateGraphicsPipelines() local 334 … device_data->renderpasses_states.find(Unwrap(device_data, pCreateInfos[idx0].renderPass)); in CreateGraphicsPipelines() [all …]
|
D | object_tracker_utils.cpp | 446 for (uint32_t idx0 = 0; idx0 < descriptorCopyCount; ++idx0) { in UpdateDescriptorSets() local 447 if (pDescriptorCopies[idx0].dstSet) { in UpdateDescriptorSets() 448 …skip |= ValidateObject(device, pDescriptorCopies[idx0].dstSet, kVulkanObjectTypeDescriptorSet, fal… in UpdateDescriptorSets() 451 if (pDescriptorCopies[idx0].srcSet) { in UpdateDescriptorSets() 452 …skip |= ValidateObject(device, pDescriptorCopies[idx0].srcSet, kVulkanObjectTypeDescriptorSet, fal… in UpdateDescriptorSets() 477 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { in CreateComputePipelines() local 478 if (pCreateInfos[idx0].basePipelineHandle) { in CreateComputePipelines() 479 …skip |= ValidateObject(device, pCreateInfos[idx0].basePipelineHandle, kVulkanObjectTypePipeline, t… in CreateComputePipelines() 482 if (pCreateInfos[idx0].layout) { in CreateComputePipelines() 483 … skip |= ValidateObject(device, pCreateInfos[idx0].layout, kVulkanObjectTypePipelineLayout, false, in CreateComputePipelines() [all …]
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorStriding.h | 174 const Index idx0 = indices[0] / m_outputStrides[i]; 176 inputIndices[0] += idx0 * m_inputStrides[i]; 178 indices[0] -= idx0 * m_outputStrides[i]; 185 const Index idx0 = indices[0] / m_outputStrides[i]; 187 inputIndices[0] += idx0 * m_inputStrides[i]; 189 indices[0] -= idx0 * m_outputStrides[i]; 299 const Index idx0 = indices[0] / this->m_outputStrides[i]; 301 inputIndices[0] += idx0 * this->m_inputStrides[i]; 303 indices[0] -= idx0 * this->m_outputStrides[i]; 310 const Index idx0 = indices[0] / this->m_outputStrides[i]; [all …]
|
D | TensorMorphing.h | 419 const Index idx0 = indices[0] / m_fastOutputStrides[i]; 421 inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; 423 indices[0] -= idx0 * m_outputStrides[i]; 430 const Index idx0 = indices[0] / m_fastOutputStrides[i]; 432 inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; 434 indices[0] -= idx0 * m_outputStrides[i]; 569 const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; 571 inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i]; 573 indices[0] -= idx0 * this->m_outputStrides[i]; 580 const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; [all …]
|
D | TensorContractionMapper.h | 158 const Index idx0 = nocontract_val[0] / m_ij_strides[i]; 160 linidx[0] += idx0 * m_nocontract_strides[i]; 162 nocontract_val[0] -= idx0 * m_ij_strides[i]; 178 const Index idx0 = contract_val[0] / m_k_strides[i]; 180 linidx[0] += idx0 * m_contract_strides[i]; 182 contract_val[0] -= idx0 * m_k_strides[i];
|
/external/llvm/test/CodeGen/X86/ |
D | dag-merge-fast-accesses.ll | 21 %idx0 = getelementptr i64, i64* %ptr, i64 0 24 store i64 0, i64* %idx0, align 8 45 %idx0 = getelementptr double, double* %ptr, i64 0 48 store double %vecext0, double* %idx0, align 8 77 %idx0 = getelementptr i64, i64* %ptr, i64 0 80 %ld0 = load i64, i64* %idx0, align 4
|
D | vector-merge-store-fp-constants.ll | 18 %idx0 = getelementptr float, float* %ptr, i64 0 26 store float 0.0, float* %idx0, align 4
|
D | MergeConsecutiveStores.ll | 476 %idx0 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3 484 store <4 x float> %shuffle0, <4 x float>* %idx0, align 16 519 %idx0 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3 521 store <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>* %idx0, align 16 536 %idx0 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 0 541 %a0 = load i64, i64* %idx0, align 8
|
/external/swiftshader/third_party/subzero/crosstest/ |
D | test_vector_ops_ll.ll | 3 i32 0, label %idx0 8 idx0: 28 i32 0, label %idx0 33 idx0: 57 i32 0, label %idx0 66 idx0: 106 i32 0, label %idx0 123 idx0: 195 i32 0, label %idx0 200 idx0: [all …]
|
/external/eigen/test/ |
D | visitor.cpp | 106 Index idx0 = internal::random<Index>(0,size-1); in vectorVisitor() local 110 v1(idx0) = v1(idx1); in vectorVisitor() 111 v2(idx0) = v2(idx2); in vectorVisitor() 114 VERIFY(eigen_minidx == (std::min)(idx0,idx1)); in vectorVisitor() 115 VERIFY(eigen_maxidx == (std::min)(idx0,idx2)); in vectorVisitor()
|
/external/compiler-rt/lib/tsan/rtl/ |
D | tsan_sync.cc | 208 u32 idx0 = *meta; in GetAndLock() local 212 u32 idx = idx0; in GetAndLock() 235 if (*meta != idx0) { in GetAndLock() 236 idx0 = *meta; in GetAndLock() 246 mys->next = idx0; in GetAndLock() 247 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, in GetAndLock()
|
/external/freetype/src/autofit/ |
D | afwarp.c | 81 FT_Int idx_min, idx_max, idx0; in af_warper_compute_line_best() local 89 idx0 = xx1 - warper->t1; in af_warper_compute_line_best() 123 FT_Pos y = y0 + ( idx_min - idx0 ); in af_warper_compute_line_best() 140 AF_WarpScore distort = base_distort + ( idx - idx0 ); in af_warper_compute_line_best() 150 warper->best_delta = delta + ( idx - idx0 ); in af_warper_compute_line_best()
|
/external/mesa3d/src/compiler/nir/ |
D | nir_intrinsics.c | 34 idx0, idx1, idx2, _flags) \ argument 44 [NIR_INTRINSIC_ ## idx0] = 1, \
|
D | nir_intrinsics.h | 301 #define SYSTEM_VALUE(name, components, num_indices, idx0, idx1, idx2) \ argument 304 idx0, idx1, idx2, \ 392 #define LOAD(name, srcs, num_indices, idx0, idx1, idx2, flags) \ argument 393 INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, num_indices, idx0, idx1, idx2, flags) 427 #define STORE(name, srcs, num_indices, idx0, idx1, idx2, flags) \ argument 428 INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, num_indices, idx0, idx1, idx2, flags)
|
/external/aac/libAACenc/src/ |
D | aacenc_tns.cpp | 625 int i, idx0, idx1, idx2, idx3, idx4, lag; in FDKaacEnc_MergedAutoCorrelation() local 636 idx0 = lpcStartLine[LOFILT]; in FDKaacEnc_MergedAutoCorrelation() 638 idx1 = idx0 + i / 4; in FDKaacEnc_MergedAutoCorrelation() 639 idx2 = idx0 + i / 2; in FDKaacEnc_MergedAutoCorrelation() 640 idx3 = idx0 + i * 3 / 4; in FDKaacEnc_MergedAutoCorrelation() 646 idx0 = lpcStartLine[LOFILT]; in FDKaacEnc_MergedAutoCorrelation() 654 INT sc1 = FDKaacEnc_ScaleUpSpectrum(pSpectrum, spectrum, idx0, idx1); in FDKaacEnc_MergedAutoCorrelation() 661 for (nsc1 = 1; (1 << nsc1) < (idx1 - idx0); nsc1++) in FDKaacEnc_MergedAutoCorrelation() 672 rxx1_0 = FDKaacEnc_CalcAutoCorrValue(pSpectrum, idx0, idx1, 0, nsc1); in FDKaacEnc_MergedAutoCorrelation() 690 FDKaacEnc_CalcAutoCorrValue(pSpectrum, idx0, idx1, lag, nsc1); in FDKaacEnc_MergedAutoCorrelation() [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vector-merge-store-fp-constants.ll | 11 %idx0 = getelementptr float, float* %ptr, i64 0 19 store float 0.0, float* %idx0, align 4
|
/external/compiler-rt/lib/sanitizer_common/ |
D | sanitizer_bitvector.h | 146 uptr i0 = idx0(idx); in setBit() 161 uptr i0 = idx0(idx); in clearBit() 175 uptr i0 = idx0(idx); in getBit() 327 uptr idx0(uptr idx) const { in idx0() function
|
/external/icu/android_icu4j/src/main/java/android/icu/impl/coll/ |
D | TailoredSet.java | 196 int idx0 = Collation.indexFromCE32(ce32); in compare() local 200 if (data.ce32s[idx0 + i] != baseData.ce32s[idx1 + i]) { in compare() 214 int idx0 = Collation.indexFromCE32(ce32); in compare() local 218 if (data.ces[idx0 + i] != baseData.ces[idx1 + i]) { in compare()
|
/external/icu/icu4j/main/classes/collate/src/com/ibm/icu/impl/coll/ |
D | TailoredSet.java | 194 int idx0 = Collation.indexFromCE32(ce32); in compare() local 198 if (data.ce32s[idx0 + i] != baseData.ce32s[idx1 + i]) { in compare() 212 int idx0 = Collation.indexFromCE32(ce32); in compare() local 216 if (data.ces[idx0 + i] != baseData.ces[idx1 + i]) { in compare()
|
/external/llvm/test/CodeGen/AArch64/ |
D | merge-store.ll | 31 %idx0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3 37 store <2 x float> %shuffle0, <2 x float>* %idx0, align 8
|
/external/tensorflow/tensorflow/python/debug/cli/ |
D | command_parser.py | 86 idx0 = 0 92 argument = command[idx0:start] 99 idx0 = end
|
D | analyzer_cli_test.py | 177 idx0 = line.index("Size") 179 tst.assertEqual(idx0, attr_seg[0]) 180 tst.assertEqual(idx0 + len("Size (B)"), attr_seg[1]) 187 idx0 = line.index("Op type") 189 tst.assertEqual(idx0, attr_seg[0]) 190 tst.assertEqual(idx0 + len("Op type"), attr_seg[1]) 198 idx0 = line.index("Tensor name") 200 tst.assertEqual(idx0, attr_seg[0]) 201 tst.assertEqual(idx0 + len("Tensor name"), attr_seg[1])
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | unique_op_test.py | 69 y0, idx0 = gen_array_ops._unique_v2(x, axis=np.array([0], dtype)) 70 tf_y0, tf_idx0 = sess.run([y0, idx0])
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | vector-extract-insert.ll | 33 …elt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx0, i32 %idx1) #1 { 39 %insert = insertelement <4 x i32> %vec, i32 %val, i32 %idx0
|
D | indirect-addressing-si.ll | 241 %idx0 = load volatile i32, i32 addrspace(1)* %gep 242 %idx1 = add i32 %idx0, 1 243 %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0 299 %idx0 = load volatile i32, i32 addrspace(1)* %gep 300 %idx1 = add i32 %idx0, 1 302 %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0
|