/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorConversion.h | 60 : m_impl(impl) {} 64 return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<LoadMode>(index)); 68 const TensorEvaluator& m_impl; 76 : m_impl(impl) {} 82 SrcPacket src1 = m_impl.template packet<LoadMode>(index); 83 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize); 89 const TensorEvaluator& m_impl; 96 : m_impl(impl) {} 102 SrcPacket src1 = m_impl.template packet<LoadMode>(index); 103 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize); [all …]
|
D | TensorBroadcasting.h | 143 m_device(device), m_broadcast(op.broadcast()), m_impl(op.expression(), device) 150 const InputDimensions& input_dims = m_impl.dimensions(); 214 m_impl.evalSubExprsIfNeeded(NULL); 222 m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); 227 m_impl.cleanup(); 233 return m_impl.coeff(0); 238 return m_impl.coeff(index); 244 return m_impl.coeff(index); 258 eigen_assert(idx < m_impl.dimensions()[i]); 262 eigen_assert(idx % m_impl.dimensions()[i] == 0); [all …]
|
D | TensorMorphing.h | 146 : m_impl(op.expression(), device), m_dimensions(op.dimensions()) 150 … eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions())); 159 m_impl.evalSubExprsIfNeededAsync(data, std::move(done)); 164 return m_impl.evalSubExprsIfNeeded(data); 167 m_impl.cleanup(); 172 return m_impl.coeff(index); 178 return m_impl.template packet<LoadMode>(index); 182 return m_impl.costPerCoeff(vectorized); 202 eigen_assert(m_impl.data() != NULL); 211 m_impl.data() + desc.offset(), desc.dimensions()); [all …]
|
D | TensorLayoutSwap.h | 117 : m_impl(op.expression(), device) 120 m_dimensions[i] = m_impl.dimensions()[NumDims-1-i]; 127 m_impl.bind(cgh); 140 return m_impl.evalSubExprsIfNeeded(data); 143 m_impl.cleanup(); 148 return m_impl.coeff(index); 154 return m_impl.template packet<LoadMode>(index); 158 return m_impl.costPerCoeff(vectorized); 162 return constCast(m_impl.data()); 165 const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; } [all …]
|
D | TensorEvalTo.h | 135 …: m_impl(op.expression(), device), m_buffer(device.get(op.buffer())), m_expression(op.expression()… 142 EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } 147 return m_impl.evalSubExprsIfNeeded(m_buffer); 156 m_impl.evalSubExprsIfNeededAsync(m_buffer, std::move(done)); 161 m_buffer[i] = m_impl.coeff(i); 164 …internal::pstoret<CoeffReturnType, PacketReturnType, Aligned>(m_buffer + i, m_impl.template packet… 169 return m_impl.getResourceRequirements(); 177 /*dst_strides=*/internal::strides<Layout>(m_impl.dimensions())); 180 m_impl.block(desc, scratch, /*root_of_expr_ast=*/true); 187 desc.dimensions(), internal::strides<Layout>(m_impl.dimensions()), [all …]
|
D | TensorArgMax.h | 103 : m_impl(op.expression(), device) { } 106 return m_impl.dimensions(); 110 m_impl.evalSubExprsIfNeeded(NULL); 114 m_impl.cleanup(); 119 return CoeffReturnType(index, m_impl.coeff(index)); 124 return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1); 131 m_impl.bind(cgh); 136 TensorEvaluator<ArgType, Device> m_impl; 245 m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device), 263 return m_impl.dimensions(); [all …]
|
D | TensorChipping.h | 168 : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device) 173 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 204 m_impl.evalSubExprsIfNeeded(NULL); 209 m_impl.cleanup(); 214 return m_impl.coeff(srcCoeff(index)); 230 values[i] = m_impl.coeff(inputIndex); 238 return m_impl.template packet<LoadMode>(index + m_inputOffset); 244 return m_impl.template packet<LoadMode>(inputIndex); 277 return m_impl.costPerCoeff(vectorized) + 286 m_impl.getResourceRequirements()); [all …]
|
D | TensorStriding.h | 113 : m_impl(op.expression(), device) 115 m_dimensions = m_impl.dimensions(); 120 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 146 m_impl.evalSubExprsIfNeeded(NULL); 150 m_impl.cleanup(); 155 return m_impl.coeff(srcCoeff(index)); 192 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]); 197 values[0] = m_impl.coeff(inputIndices[0]); 198 values[PacketSize-1] = m_impl.coeff(inputIndices[1]); 217 return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) + [all …]
|
D | TensorInflation.h | 107 : m_impl(op.expression(), device), m_strides(op.strides()) 109 m_dimensions = m_impl.dimensions(); 120 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 141 m_impl.evalSubExprsIfNeeded(NULL); 145 m_impl.cleanup(); 191 return m_impl.coeff(inputIndex); 218 const double input_size = m_impl.dimensions().TotalSize(); 222 return m_impl.costPerCoeff(vectorized) + 232 m_impl.bind(cgh); 240 TensorEvaluator<ArgType, Device> m_impl;
|
D | TensorShuffling.h | 123 m_impl(op.expression(), device) 125 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 168 m_impl.evalSubExprsIfNeeded(NULL); 176 m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); 181 m_impl.cleanup(); 187 return m_impl.coeff(index); 189 return m_impl.coeff(srcCoeff(index)); 212 return self.m_impl.template packet<LoadMode>(index); 258 assert(m_impl.data() != NULL); 270 TensorBlockIOSrc src(input_strides, m_impl.data(), srcCoeff(desc.offset())); [all …]
|
D | TensorPatch.h | 111 : m_impl(op.expression(), device) 114 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 156 m_impl.evalSubExprsIfNeeded(NULL); 161 m_impl.cleanup(); 192 return m_impl.coeff(inputIndex); 246 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]); 251 values[0] = m_impl.coeff(inputIndices[0]); 252 values[PacketSize-1] = m_impl.coeff(inputIndices[1]); 266 return m_impl.costPerCoeff(vectorized) + 275 m_impl.bind(cgh); [all …]
|
D | TensorPadding.h | 120 …: m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value()), m_… 128 m_dimensions = m_impl.dimensions(); 132 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 155 m_impl.evalSubExprsIfNeeded(NULL); 163 m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); 168 m_impl.cleanup(); 204 return m_impl.coeff(inputIndex); 217 TensorOpCost cost = m_impl.costPerCoeff(vectorized); 235 m_impl.getResourceRequirements()); 307 static_cast<Index>(m_impl.dimensions()[inner_dim_idx]); [all …]
|
D | TensorReverse.h | 125 : m_impl(op.expression(), device), 133 m_dimensions = m_impl.dimensions(); 153 m_impl.evalSubExprsIfNeeded(NULL); 161 m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); 166 m_impl.cleanup(); 209 return m_impl.coeff(reverseIndex(index)); 324 block_buffer[dst] = m_impl.coeff(src); 330 block_buffer[dst] = m_impl.coeff(src); 364 return m_impl.costPerCoeff(vectorized) + 373 m_impl.bind(cgh); [all …]
|
D | TensorReductionGpu.h | 174 typename Self::CoeffReturnType val = input.m_impl.coeff(index); in FullReductionKernel() 227 __halves2half2(input.m_impl.coeff(i), input.m_impl.coeff(i + 1)); in ReductionInitFullReduxKernelHalfFloat() 231 half lastCoeff = input.m_impl.coeff(num_coeffs - 1); in ReductionInitFullReduxKernelHalfFloat() 279 input.m_impl.coeff(num_coeffs - packet_width + 2 * i), in FullReductionKernelHalfFloat() 280 input.m_impl.coeff(num_coeffs - packet_width + 2 * i + 1)); in FullReductionKernelHalfFloat() 284 half last = input.m_impl.coeff(num_coeffs - 1); in FullReductionKernelHalfFloat() 301 PacketType val = input.m_impl.template packet<Unaligned>(index); in FullReductionKernelHalfFloat() 465 const Index num_coeffs = array_prod(self.m_impl.dimensions()); 517 const Type val = input.m_impl.coeff(row * num_coeffs_to_reduce + col); 526 reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val); [all …]
|
D | TensorForcedEval.h | 139 : m_impl(op.expression(), device), m_op(op.expression()), 143 EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } 146 const Index numValues = internal::array_prod(m_impl.dimensions()); 167 const Index numValues = internal::array_prod(m_impl.dimensions()); 210 return TensorBlock::materialize(m_buffer, m_impl.dimensions(), desc, scratch); 224 m_impl.bind(cgh); 228 TensorEvaluator<ArgType, Device> m_impl;
|
D | TensorTrace.h | 112 : m_impl(op.expression(), device), m_traceDim(1), m_device(device) 140 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 215 m_impl.evalSubExprsIfNeeded(NULL); 220 m_impl.cleanup(); 237 result += m_impl.coeff(cur_index); 261 m_impl.bind(cgh); 289 TensorEvaluator<ArgType, Device> m_impl;
|
D | TensorReduction.h | 157 reducer.reduce(self.m_impl.coeff(input), accum); 164 reducer.reduce(self.m_impl.coeff(index), accum); 175 reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); 188 reducer.reducePacket(self.m_impl.template packet<Unaligned>(firstIndex + j), &paccum); 192 reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); 215 reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); 255 self.m_impl.template packet<Unaligned>(firstIndex + j), &paccum); 257 self.m_impl.template packet<Unaligned>(firstIndex + j + packetSize), 261 reducer.reducePacket(self.m_impl.template packet<Unaligned>( 267 reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); [all …]
|
D | TensorRef.h | 51 …TensorLazyEvaluatorReadOnly(const Expr& expr, const Device& device) : m_impl(expr, device), m_dumm… in TensorLazyEvaluatorReadOnly() 52 m_dims = m_impl.dimensions(); in TensorLazyEvaluatorReadOnly() 53 m_impl.evalSubExprsIfNeeded(NULL); in TensorLazyEvaluatorReadOnly() 56 m_impl.cleanup(); in ~TensorLazyEvaluatorReadOnly() 63 return m_impl.data(); in data() 67 return m_impl.coeff(index); in coeff() 75 TensorEvaluator<Expr, Device> m_impl; 94 return this->m_impl.coeffRef(index); in coeffRef()
|
D | TensorImagePatch.h | 246 : m_device(device), m_impl(op.expression(), device) 252 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); 393 m_impl.evalSubExprsIfNeeded(NULL); 401 m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); 406 m_impl.cleanup(); 444 return m_impl.coeff(inputIndex); 499 return m_impl.template packet<Unaligned>(inputIndex); 508 …CE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; } 513 m_impl.bind(cgh); 536 return m_impl.costPerCoeff(vectorized) + [all …]
|
D | TensorScan.h | 406 : m_impl(op.expression(), device), 410 m_size(m_impl.dimensions()[op.axis()]), 419 const Dimensions& dims = m_impl.dimensions(); 436 return m_impl.dimensions(); 460 return m_impl; 468 m_impl.evalSubExprsIfNeeded(NULL); 505 m_impl.cleanup(); 511 m_impl.bind(cgh); 516 TensorEvaluator<ArgType, Device> m_impl;
|
/external/mdnsresponder/mDNSWindows/DLL.NET/ |
D | dnssd_NET.cpp | 63 m_impl = new ServiceRefImpl(this); in ServiceRef() 80 check( m_impl != NULL ); in StartThread() 82 m_impl->SetupEvents(); in StartThread() 101 m_impl->ProcessingThread(); in ProcessingThread() 113 check(m_impl != NULL); in Dispose() 125 m_impl->Dispose(); in Dispose() 126 m_impl = NULL; in Dispose() 149 if ((m_callback != NULL) && (m_impl != NULL)) in EnumerateDomainsDispatch() 172 if ((m_callback != NULL) && (m_impl != NULL)) in RegisterDispatch() 196 if ((m_callback != NULL) && (m_impl != NULL)) in BrowseDispatch() [all …]
|
D | dnssd_NET.h | 164 m_impl = new RecordRefImpl; in RecordRef() 165 m_impl->m_ref = NULL; in RecordRef() 170 delete m_impl; in ~RecordRef() local 180 RecordRefImpl * m_impl; variable 415 ServiceRefImpl * m_impl; variable 455 m_impl = new TextRecordImpl(); in TextRecord() 456 TXTRecordCreate(&m_impl->m_ref, 0, NULL); in TextRecord() 461 TXTRecordDeallocate(&m_impl->m_ref); in ~TextRecord() 462 delete m_impl; in ~TextRecord() local 472 TextRecordImpl * m_impl; variable
|
/external/deqp/external/vulkancts/framework/vulkan/ |
D | vkPrograms.hpp | 96 explicit Iterator (const IteratorImpl& i) : m_impl(i) {} in Iterator() 98 Iterator& operator++ (void) { ++m_impl; return *this; } in operator ++() 101 const std::string& getName (void) const { return m_impl->first; } in getName() 102 const Program& getProgram (void) const { return *m_impl->second; } in getProgram() 104 bool operator== (const Iterator& other) const { return m_impl == other.m_impl; } in operator ==() 105 bool operator!= (const Iterator& other) const { return m_impl != other.m_impl; } in operator !=() 109 IteratorImpl m_impl; member in vk::ProgramCollection::Iterator
|
/external/deqp/external/openglcts/modules/common/subgroups/ |
D | glcSubgroupsTestsUtils.hpp | 83 explicit Iterator (const IteratorImpl& i) : m_impl(i) {} in Iterator() 85 Iterator& operator++ (void) { ++m_impl; return *this; } in operator ++() 88 const std::string& getName (void) const { return m_impl->first; } in getName() 89 const Program& getProgram (void) const { return *m_impl->second; } in getProgram() 91 bool operator== (const Iterator& other) const { return m_impl == other.m_impl; } in operator ==() 92 bool operator!= (const Iterator& other) const { return m_impl != other.m_impl; } in operator !=() 96 IteratorImpl m_impl; member in glc::ProgramCollection::Iterator
|
/external/eigen/unsupported/Eigen/ |
D | FFT | 173 FFT( const impl_type & impl=impl_type() , Flag flags=Default ) :m_impl(impl),m_flag(flags) { } 187 m_impl.fwd(dst,src,static_cast<int>(nfft)); 195 m_impl.fwd(dst,src,static_cast<int>(nfft)); 202 m_impl.fwd2(dst,src,n0,n1); 272 m_impl.inv( dst,src,static_cast<int>(nfft) ); 280 m_impl.inv( dst,src,static_cast<int>(nfft) ); 362 m_impl.inv2(dst,src,n0,n1); 369 impl_type & impl() {return m_impl;} 397 impl_type m_impl;
|