Home
last modified time | relevance | path

Searched refs:evalSubExprsIfNeeded (Results 1 – 25 of 33) sorted by relevance

12

/external/eigen/unsupported/Eigen/CXX11/src/Tensor/
DTensorEvaluator.h56 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* dest) { in evalSubExprsIfNeeded() function
177 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
254 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { return true; }
319 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
320 m_argImpl.evalSubExprsIfNeeded(NULL);
396 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
397 m_leftImpl.evalSubExprsIfNeeded(NULL);
398 m_rightImpl.evalSubExprsIfNeeded(NULL);
491 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
492 m_arg1Impl.evalSubExprsIfNeeded(NULL);
[all …]
DTensorExecutor.h35 const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
57 const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
149 const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
251 const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL);
DTensorArgMax.h102 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
103 m_impl.evalSubExprsIfNeeded(NULL);
242 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
243 m_impl.evalSubExprsIfNeeded(NULL);
DTensorAssign.h121 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
123 m_leftImpl.evalSubExprsIfNeeded(NULL);
128 return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data());
DTensorConversion.h168 impl.evalSubExprsIfNeeded(NULL);
175 return impl.evalSubExprsIfNeeded(data);
208 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data)
DTensorEvalTo.h127 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(DevicePointer scalar) {
130 return m_impl.evalSubExprsIfNeeded(m_buffer);
DTensorLayoutSwap.h140 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
141 return m_impl.evalSubExprsIfNeeded(data);
DTensorInflation.h132 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
133 m_impl.evalSubExprsIfNeeded(NULL);
DTensorSyclRun.h35 const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); in run()
DTensorConcatenation.h180 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/)
182 m_leftImpl.evalSubExprsIfNeeded(NULL);
183 m_rightImpl.evalSubExprsIfNeeded(NULL);
DTensorMorphing.h127 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
128 return m_impl.evalSubExprsIfNeeded(data);
366 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
367 m_impl.evalSubExprsIfNeeded(NULL);
792 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
793 m_impl.evalSubExprsIfNeeded(NULL);
DTensorIO.h66 tensor.evalSubExprsIfNeeded(NULL);
DTensorReverse.h145 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
146 m_impl.evalSubExprsIfNeeded(NULL);
DTensorShuffling.h153 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
154 m_impl.evalSubExprsIfNeeded(NULL);
DTensorScan.h159 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
160 m_impl.evalSubExprsIfNeeded(NULL);
DTensorPatch.h146 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
147 m_impl.evalSubExprsIfNeeded(NULL);
DTensorCustomOp.h108 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
264 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
DTensorBroadcasting.h148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
149 m_impl.evalSubExprsIfNeeded(NULL);
DTensorStriding.h151 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
152 m_impl.evalSubExprsIfNeeded(NULL);
DTensorPadding.h138 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
139 m_impl.evalSubExprsIfNeeded(NULL);
DTensorRef.h50 m_impl.evalSubExprsIfNeeded(NULL); in TensorLazyEvaluatorReadOnly()
378 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
DTensorContraction.h330 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
331 m_leftImpl.evalSubExprsIfNeeded(NULL);
332 m_rightImpl.evalSubExprsIfNeeded(NULL);
DTensorConvolution.h377 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
378 m_inputImpl.evalSubExprsIfNeeded(NULL);
392 evalSubExprsIfNeeded(NULL);
797 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) {
799 m_inputImpl.evalSubExprsIfNeeded(NULL);
DTensorChipping.h188 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
189 m_impl.evalSubExprsIfNeeded(NULL);
/external/tensorflow/tensorflow/core/kernels/
Dmirror_pad_op.h154 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
155 impl_.evalSubExprsIfNeeded(nullptr);

12