Home
last modified time | relevance | path

Searched refs:m_impl (Results 1 – 25 of 33) sorted by relevance

12

/external/v8/src/inspector/
Dstring-16.h45 const UChar* characters16() const { return m_impl.c_str(); } in characters16()
46 size_t length() const { return m_impl.length(); } in length()
47 bool isEmpty() const { return !m_impl.length(); } in isEmpty()
48 UChar operator[](size_t index) const { return m_impl[index]; }
50 return String16(m_impl.substr(pos, len));
53 return m_impl.find(str.m_impl, start);
56 return m_impl.rfind(str.m_impl, start);
58 size_t find(UChar c, size_t start = 0) const { return m_impl.find(c, start); }
60 return m_impl.rfind(c, start);
63 m_impl.swap(other.m_impl); in swap()
[all …]
Dstring-16.cc376 : m_impl(other.m_impl), hash_code(other.hash_code) {} in String16()
379 : m_impl(std::move(other.m_impl)), in String16()
383 : m_impl(characters, size) {} in String16()
385 String16::String16(const UChar* characters) : m_impl(characters) {} in String16()
391 m_impl.resize(size); in String16()
392 for (size_t i = 0; i < size; ++i) m_impl[i] = characters[i]; in String16()
395 String16::String16(const std::basic_string<UChar>& impl) : m_impl(impl) {} in String16()
398 m_impl = other.m_impl; in operator =()
404 m_impl = std::move(other.m_impl); in operator =()
576 const UChar* characters = m_impl.data(); in utf8()
[all …]
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/
DTensorBroadcasting.h116 : m_broadcast(op.broadcast()),m_impl(op.expression(), device)
122 const InputDimensions& input_dims = m_impl.dimensions();
149 m_impl.evalSubExprsIfNeeded(NULL);
154 m_impl.cleanup();
160 return m_impl.coeff(0);
177 eigen_assert(idx < m_impl.dimensions()[i]);
181 eigen_assert(idx % m_impl.dimensions()[i] == 0);
183 inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
189 eigen_assert(index < m_impl.dimensions()[0]);
193 eigen_assert(index % m_impl.dimensions()[0] == 0);
[all …]
DTensorConversion.h56 : m_impl(impl) {}
60 return internal::pcast<SrcPacket, TgtPacket>(m_impl.template packet<LoadMode>(index));
64 const TensorEvaluator& m_impl;
72 : m_impl(impl) {}
78 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
79 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
85 const TensorEvaluator& m_impl;
92 : m_impl(impl) {}
98 SrcPacket src1 = m_impl.template packet<LoadMode>(index);
99 SrcPacket src2 = m_impl.template packet<LoadMode>(index + SrcPacketSize);
[all …]
DTensorMorphing.h113 : m_impl(op.expression(), device), m_dimensions(op.dimensions())
117 … eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions()));
128 return m_impl.evalSubExprsIfNeeded(data);
131 m_impl.cleanup();
136 return m_impl.coeff(index);
142 return m_impl.template packet<LoadMode>(index);
146 return m_impl.costPerCoeff(vectorized);
149 EIGEN_DEVICE_FUNC Scalar* data() const { return const_cast<Scalar*>(m_impl.data()); }
151 EIGEN_DEVICE_FUNC const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
154 TensorEvaluator<ArgType, Device> m_impl;
[all …]
DTensorLayoutSwap.h127 : m_impl(op.expression(), device)
130 m_dimensions[i] = m_impl.dimensions()[NumDims-1-i];
141 return m_impl.evalSubExprsIfNeeded(data);
144 m_impl.cleanup();
149 return m_impl.coeff(index);
155 return m_impl.template packet<LoadMode>(index);
159 return m_impl.costPerCoeff(vectorized);
162 EIGEN_DEVICE_FUNC Scalar* data() const { return m_impl.data(); }
164 const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
167 TensorEvaluator<ArgType, Device> m_impl;
[all …]
DTensorArgMax.h96 : m_impl(op.expression(), device) { }
99 return m_impl.dimensions();
103 m_impl.evalSubExprsIfNeeded(NULL);
107 m_impl.cleanup();
112 return CoeffReturnType(index, m_impl.coeff(index));
117 return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1);
123 TensorEvaluator<ArgType, Device> m_impl;
224 m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
239 return m_impl.dimensions();
243 m_impl.evalSubExprsIfNeeded(NULL);
[all …]
DTensorStriding.h120 : m_impl(op.expression(), device)
122 m_dimensions = m_impl.dimensions();
127 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
152 m_impl.evalSubExprsIfNeeded(NULL);
156 m_impl.cleanup();
161 return m_impl.coeff(srcCoeff(index));
196 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
201 values[0] = m_impl.coeff(inputIndices[0]);
202 values[PacketSize-1] = m_impl.coeff(inputIndices[1]);
220 return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) +
[all …]
DTensorEvalTo.h112 : m_impl(op.expression(), device), m_device(device),
125 EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
130 return m_impl.evalSubExprsIfNeeded(m_buffer);
134 m_buffer[i] = m_impl.coeff(i);
137 …internal::pstoret<CoeffReturnType, PacketReturnType, Aligned>(m_buffer + i, m_impl.template packet…
141 m_impl.cleanup();
158 return m_impl.costPerCoeff(vectorized) +
166 const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
171 TensorEvaluator<ArgType, Device> m_impl;
DTensorChipping.h153 : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device)
158 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
189 m_impl.evalSubExprsIfNeeded(NULL);
194 m_impl.cleanup();
199 return m_impl.coeff(srcCoeff(index));
215 values[i] = m_impl.coeff(inputIndex);
224 return m_impl.template packet<LoadMode>(index + m_inputOffset);
230 return m_impl.template packet<LoadMode>(inputIndex);
262 return m_impl.costPerCoeff(vectorized) +
267 CoeffReturnType* result = const_cast<CoeffReturnType*>(m_impl.data());
[all …]
DTensorInflation.h99 : m_impl(op.expression(), device), m_strides(op.strides())
101 m_dimensions = m_impl.dimensions();
112 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
133 m_impl.evalSubExprsIfNeeded(NULL);
137 m_impl.cleanup();
181 return m_impl.coeff(inputIndex);
207 const double input_size = m_impl.dimensions().TotalSize();
211 return m_impl.costPerCoeff(vectorized) +
222 TensorEvaluator<ArgType, Device> m_impl;
DTensorPatch.h102 : m_impl(op.expression(), device)
105 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
147 m_impl.evalSubExprsIfNeeded(NULL);
152 m_impl.cleanup();
181 return m_impl.coeff(inputIndex);
233 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
238 values[0] = m_impl.coeff(inputIndices[0]);
239 values[PacketSize-1] = m_impl.coeff(inputIndices[1]);
252 return m_impl.costPerCoeff(vectorized) +
264 TensorEvaluator<ArgType, Device> m_impl;
DTensorPadding.h104 : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value())
112 m_dimensions = m_impl.dimensions();
116 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
139 m_impl.evalSubExprsIfNeeded(NULL);
143 m_impl.cleanup();
177 return m_impl.coeff(inputIndex);
190 TensorOpCost cost = m_impl.costPerCoeff(vectorized);
239 const double in = static_cast<double>(m_impl.dimensions()[i]);
309 return m_impl.template packet<Unaligned>(inputIndex);
367 return m_impl.template packet<Unaligned>(inputIndex);
[all …]
DTensorReverse.h122 : m_impl(op.expression(), device), m_reverse(op.reverse())
128 m_dimensions = m_impl.dimensions();
146 m_impl.evalSubExprsIfNeeded(NULL);
150 m_impl.cleanup();
191 return m_impl.coeff(reverseIndex(index));
221 return m_impl.costPerCoeff(vectorized) +
230 TensorEvaluator<ArgType, Device> m_impl;
267 return this->m_impl.coeffRef(this->reverseIndex(index));
DTensorShuffling.h120 : m_impl(op.expression(), device)
122 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
154 m_impl.evalSubExprsIfNeeded(NULL);
158 m_impl.cleanup();
163 return m_impl.coeff(srcCoeff(index));
184 return m_impl.costPerCoeff(vectorized) +
213 TensorEvaluator<ArgType, Device> m_impl;
245 return this->m_impl.coeffRef(this->srcCoeff(index));
DTensorScan.h106 : m_impl(op.expression(), device),
110 m_size(m_impl.dimensions()[op.axis()]),
119 const Dimensions& dims = m_impl.dimensions();
132 return m_impl.dimensions();
152 return m_impl;
160 m_impl.evalSubExprsIfNeeded(NULL);
197 m_impl.cleanup();
201 TensorEvaluator<ArgType, Device> m_impl;
DTensorForcedEval.h111 : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL)
114 EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
117 const Index numValues = internal::array_prod(m_impl.dimensions());
154 const TensorEvaluator<ArgType, Device>& impl() { return m_impl; }
158 TensorEvaluator<ArgType, Device> m_impl;
DTensorReduction.h144 reducer.reduce(self.m_impl.coeff(input), accum);
151 reducer.reduce(self.m_impl.coeff(index), accum);
160 reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
173 reducer.reducePacket(self.m_impl.template packet<Unaligned>(firstIndex + j), &p);
177 reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum);
206 reducer.reducePacket(self.m_impl.template packet<Unaligned>(input), accum);
223 const typename Self::Index num_coeffs = array_prod(self.m_impl.dimensions());
253 const Index num_coeffs = array_prod(self.m_impl.dimensions());
259 self.m_impl.costPerCoeff(Vectorizable) +
412 …: m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device), m_xp…
[all …]
DTensorReductionCuda.h165 typename Self::CoeffReturnType val = input.m_impl.coeff(index); in FullReductionKernel()
195 half last = input.m_impl.coeff(num_coeffs-1); in ReductionInitFullReduxKernelHalfFloat()
228 half last = input.m_impl.coeff(num_coeffs-1); in FullReductionKernelHalfFloat()
241 half2 val = input.m_impl.template packet<Unaligned>(index); in FullReductionKernelHalfFloat()
361 const Index num_coeffs = array_prod(self.m_impl.dimensions());
413 const Type val = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
422 reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val);
491 … const half2 val1 = input.m_impl.template packet<Unaligned>(row * num_coeffs_to_reduce + col);
493 … const half2 val2 = input.m_impl.template packet<Unaligned>((row+1) * num_coeffs_to_reduce + col);
498 const half last1 = input.m_impl.coeff(row * num_coeffs_to_reduce + col);
[all …]
DTensorRef.h48 …TensorLazyEvaluatorReadOnly(const Expr& expr, const Device& device) : m_impl(expr, device), m_dumm… in TensorLazyEvaluatorReadOnly()
49 m_dims = m_impl.dimensions(); in TensorLazyEvaluatorReadOnly()
50 m_impl.evalSubExprsIfNeeded(NULL); in TensorLazyEvaluatorReadOnly()
53 m_impl.cleanup(); in ~TensorLazyEvaluatorReadOnly()
60 return m_impl.data(); in data()
64 return m_impl.coeff(index); in coeff()
72 TensorEvaluator<Expr, Device> m_impl;
89 return this->m_impl.coeffRef(index); in coeffRef()
/external/mdnsresponder/mDNSWindows/DLL.NET/
Ddnssd_NET.cpp63 m_impl = new ServiceRefImpl(this); in ServiceRef()
80 check( m_impl != NULL ); in StartThread()
82 m_impl->SetupEvents(); in StartThread()
101 m_impl->ProcessingThread(); in ProcessingThread()
113 check(m_impl != NULL); in Dispose()
125 m_impl->Dispose(); in Dispose()
126 m_impl = NULL; in Dispose()
149 if ((m_callback != NULL) && (m_impl != NULL)) in EnumerateDomainsDispatch()
172 if ((m_callback != NULL) && (m_impl != NULL)) in RegisterDispatch()
196 if ((m_callback != NULL) && (m_impl != NULL)) in BrowseDispatch()
[all …]
Ddnssd_NET.h164 m_impl = new RecordRefImpl; in RecordRef()
165 m_impl->m_ref = NULL; in RecordRef()
170 delete m_impl; in ~RecordRef() local
180 RecordRefImpl * m_impl; variable
415 ServiceRefImpl * m_impl; variable
455 m_impl = new TextRecordImpl(); in TextRecord()
456 TXTRecordCreate(&m_impl->m_ref, 0, NULL); in TextRecord()
461 TXTRecordDeallocate(&m_impl->m_ref); in ~TextRecord()
462 delete m_impl; in ~TextRecord() local
472 TextRecordImpl * m_impl; variable
/external/deqp/external/vulkancts/framework/vulkan/
DvkPrograms.hpp88 explicit Iterator (const IteratorImpl& i) : m_impl(i) {} in Iterator()
90 Iterator& operator++ (void) { ++m_impl; return *this; } in operator ++()
93 const std::string& getName (void) const { return m_impl->first; } in getName()
94 const Program& getProgram (void) const { return *m_impl->second; } in getProgram()
96 bool operator== (const Iterator& other) const { return m_impl == other.m_impl; } in operator ==()
97 bool operator!= (const Iterator& other) const { return m_impl != other.m_impl; } in operator !=()
101 IteratorImpl m_impl; member in vk::ProgramCollection::Iterator
/external/deqp/framework/platform/android/
DtcuAndroidInternals.cpp242 , m_impl (DE_NULL) in GraphicBuffer()
248 m_impl = createGraphicBuffer(m_functions, m_baseFunctions, width, height, format, usage); in GraphicBuffer()
253 if (m_impl && m_baseFunctions.decRef) in ~GraphicBuffer()
255 m_baseFunctions.decRef(getAndroidNativeBase(m_impl)); in ~GraphicBuffer()
256 m_impl = DE_NULL; in ~GraphicBuffer()
262 return m_functions.lock(m_impl, usage, vaddr); in lock()
267 return m_functions.unlock(m_impl); in unlock()
272 return m_functions.getNativeBuffer(m_impl); in getNativeBuffer()
/external/tensorflow/tensorflow/core/kernels/
Deigen_volume_patch.h54 : m_impl(op.expression(), device) { in CustomTensorEvaluator()
60 m_impl.dimensions(); in CustomTensorEvaluator()
245 m_impl.evalSubExprsIfNeeded(NULL); in evalSubExprsIfNeeded()
249 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } in cleanup()
329 return m_impl.coeff(inputIndex); in coeff()
427 return m_impl.template packet<Unaligned>(inputIndex); in packet()
443 const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; } in impl()
536 return m_impl.coeff(inputCoords); in coeff()
550 return m_impl.coeff(inputIndex); in coeff()
632 TensorEvaluator<ArgType, Device> m_impl; member

12