/external/tensorflow/tensorflow/lite/kernels/ |
D | transpose.cc | 34 kGenericOptimized, enumerator 119 if (kernel_type == kGenericOptimized) { in Eval() 127 if (kernel_type == kGenericOptimized) { in Eval() 141 if (kernel_type == kGenericOptimized) { in Eval() 171 transpose::Eval<transpose::kGenericOptimized>}; in Register_TRANSPOSE_GENERIC_OPTIMIZED()
|
D | floor.cc | 34 kGenericOptimized, enumerator 59 if (type == kGenericOptimized) { in Eval() 81 floor::Eval<floor::kGenericOptimized>}; in Register_FLOOR()
|
D | l2norm.cc | 35 kGenericOptimized, enumerator 111 if (kernel_type == kGenericOptimized) { in Eval() 126 if (kernel_type == kGenericOptimized) { in Eval() 160 l2norm::Eval<l2norm::kGenericOptimized>}; in Register_L2NORM_GENERIC_OPT()
|
D | pad.cc | 38 kGenericOptimized, enumerator 221 } else if (kernel_type == kGenericOptimized) { in Eval() 245 } else if (kernel_type == kGenericOptimized) { in Eval() 256 } else if (kernel_type == kGenericOptimized) { in Eval() 279 pad::Eval<pad::kGenericOptimized>}; in Register_PAD_GENERIC_OPT() 294 pad::Eval<pad::kGenericOptimized>}; in Register_PADV2_GENERIC_OPT()
|
D | maximum_minimum.cc | 37 kGenericOptimized, enumerator 108 void TFLiteOperation<maximum_minimum::kGenericOptimized, int8, MaximumOp>( in TFLiteOperation() 133 void TFLiteOperation<maximum_minimum::kGenericOptimized, int8, MinimumOp>( in TFLiteOperation() 205 maximum_minimum::Eval<maximum_minimum::kGenericOptimized, in Register_MAXIMUM_GENERIC_OPT() 221 maximum_minimum::Eval<maximum_minimum::kGenericOptimized, in Register_MINIMUM_GENERIC_OPT()
|
D | resize_bilinear.cc | 36 kGenericOptimized, // Neon-free enumerator 129 if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { in Eval() 136 if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { in Eval() 165 resize_bilinear::Eval<resize_bilinear::kGenericOptimized>}; in Register_RESIZE_BILINEAR_GENERIC_OPT()
|
D | local_response_norm.cc | 32 kGenericOptimized, enumerator 86 if (kernel_type == kGenericOptimized) { in Eval() 111 local_response_norm::Eval<local_response_norm::kGenericOptimized>}; in Register_LOCAL_RESPONSE_NORM_GENERIC_OPT()
|
D | conv.cc | 57 kGenericOptimized, // Neon-free enumerator 194 case kGenericOptimized: in IsIm2ColRequired() 620 effective_kernel_type = kGenericOptimized; in EvalQuantized() 651 case kGenericOptimized: in EvalQuantized() 697 case kGenericOptimized: in EvalQuantizedPerChannel() 734 case kGenericOptimized: in EvalQuantizedPerChannel16x8() 763 effective_kernel_type = kGenericOptimized; in EvalFloat() 786 case kGenericOptimized: { in EvalFloat() 888 case kGenericOptimized: in EvalHybridPerChannel() 958 case kGenericOptimized: in EvalHybrid() [all …]
|
D | resize_nearest_neighbor.cc | 38 kGenericOptimized, enumerator 121 if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { in Eval() 160 resize_nearest_neighbor::kGenericOptimized>}; in Register_RESIZE_NEAREST_NEIGHBOR_GENERIC_OPT()
|
D | strided_slice.cc | 41 kGenericOptimized, enumerator 196 if (kernel_type == kGenericOptimized) { \ in Eval() 255 strided_slice::Eval<strided_slice::kGenericOptimized>}; in Register_STRIDED_SLICE()
|
D | transpose_conv.cc | 45 kGenericOptimized, // Neon-free enumerator 131 if (kernel_type == kGenericOptimized) { in AllocateTemporaryTensorsIfRequired() 142 if (kernel_type == kGenericOptimized) { in AllocateTemporaryTensorsIfRequired() 419 case kGenericOptimized: { in EvalFloat() 471 case kGenericOptimized: { in EvalQuantized() 518 case kGenericOptimized: { in EvalQuantizedPerChannel() 702 transpose_conv::Prepare<transpose_conv::kGenericOptimized>, in Register_TRANSPOSECONV_GENERIC_OPT() 703 transpose_conv::Eval<transpose_conv::kGenericOptimized>}; in Register_TRANSPOSECONV_GENERIC_OPT()
|
D | slice.cc | 39 kGenericOptimized, enumerator 205 if (kernel_type == kGenericOptimized) { \ in Eval() 258 slice::Eval<slice::kGenericOptimized>}; in Register_SLICE()
|
D | space_to_depth.cc | 35 kGenericOptimized, enumerator 156 space_to_depth::Eval<space_to_depth::kGenericOptimized>}; in Register_SPACE_TO_DEPTH_GENERIC_OPT()
|
D | depth_to_space.cc | 35 kGenericOptimized, enumerator 160 depth_to_space::Eval<depth_to_space::kGenericOptimized>}; in Register_DEPTH_TO_SPACE_GENERIC_OPT()
|
D | activations.cc | 59 kGenericOptimized, enumerator 414 if (kernel_type == kGenericOptimized || kernel_type == kReference) { in TanhPrepare() 519 if (kernel_type == kGenericOptimized || kernel_type == kReference) { in SigmoidPrepare() 1191 if (kernel_type == kGenericOptimized) { in LogSoftmaxEval() 1204 if (kernel_type == kGenericOptimized) { in LogSoftmaxEval() 1217 if (kernel_type == kGenericOptimized) { in LogSoftmaxEval() 1264 if (kernel_type == kGenericOptimized) { in PreluEval() 1488 activations::TanhPrepare<activations::kGenericOptimized>, in Register_TANH_GENERIC_OPT() 1489 activations::TanhEval<activations::kGenericOptimized>}; in Register_TANH_GENERIC_OPT() 1519 activations::SigmoidPrepare<activations::kGenericOptimized>, in Register_LOGISTIC_GENERIC_OPT() [all …]
|
D | batch_to_space_nd.cc | 33 kGenericOptimized, enumerator 189 batch_to_space_nd::Eval<batch_to_space_nd::kGenericOptimized>}; in Register_BATCH_TO_SPACE_ND_GENERIC_OPT()
|
D | space_to_batch_nd.cc | 34 kGenericOptimized, enumerator 196 space_to_batch_nd::Eval<space_to_batch_nd::kGenericOptimized>}; in Register_SPACE_TO_BATCH_ND_GENERIC_OPT()
|
D | concatenation.cc | 37 kGenericOptimized, enumerator 212 concatenation::Eval<concatenation::kGenericOptimized>}; in Register_CONCATENATION_GENERIC_OPT()
|
D | div.cc | 40 kGenericOptimized, // Neon-free enumerator 246 div::Eval<div::kGenericOptimized>}; in Register_DIV_GENERIC_OPT()
|
D | pooling.cc | 43 kGenericOptimized, enumerator 484 pooling::AverageEval<pooling::kGenericOptimized>}; in Register_AVERAGE_POOL_GENERIC_OPT() 491 pooling::MaxEval<pooling::kGenericOptimized>}; in Register_MAX_POOL_GENERIC_OPT() 498 pooling::L2Eval<pooling::kGenericOptimized>}; in Register_L2_POOL_GENERIC_OPT()
|
D | dequantize.h | 38 kGenericOptimized, enumerator
|
/external/tensorflow/tensorflow/lite/micro/kernels/ |
D | transpose.cc | 34 kGenericOptimized, enumerator 119 if (kernel_type == kGenericOptimized) { in Eval() 127 if (kernel_type == kGenericOptimized) { in Eval() 141 if (kernel_type == kGenericOptimized) { in Eval() 171 transpose::Eval<transpose::kGenericOptimized>}; in Register_TRANSPOSE_GENERIC_OPTIMIZED()
|
D | space_to_depth.cc | 35 kGenericOptimized, enumerator 156 space_to_depth::Eval<space_to_depth::kGenericOptimized>}; in Register_SPACE_TO_DEPTH_GENERIC_OPT()
|
D | depth_to_space.cc | 35 kGenericOptimized, enumerator 160 depth_to_space::Eval<depth_to_space::kGenericOptimized>}; in Register_DEPTH_TO_SPACE_GENERIC_OPT()
|
D | div.cc | 40 kGenericOptimized, // Neon-free enumerator 246 div::Eval<div::kGenericOptimized>}; in Register_DIV_GENERIC_OPT()
|